diff --git a/sys/arm/allwinner/a10_ahci.c b/sys/arm/allwinner/a10_ahci.c index 74765e1a30dd..a0717a0e9dfa 100644 --- a/sys/arm/allwinner/a10_ahci.c +++ b/sys/arm/allwinner/a10_ahci.c @@ -1,423 +1,423 @@ /*- * Copyright (c) 2015 Luiz Otavio O Souza All rights reserved. * Copyright (c) 2014-2015 M. Warner Losh * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The magic-bit-bang sequence used in this code may be based on a linux * platform driver in the Allwinner SDK from Allwinner Technology Co., Ltd. * www.allwinnertech.com, by Daniel Wang * though none of the original code was copied. */ #include "opt_bus.h" #include #include #include #include #include #include #include #include #include #include -#include +#include #include /* * Allwinner a1x/a2x/a8x SATA attachment. This is just the AHCI register * set with a few extra implementation-specific registers that need to * be accounted for. There's only one PHY in the system, and it needs * to be trained to bring the link up. In addition, there's some DMA * specific things that need to be done as well. These things are also * just about completely undocumented, except in ugly code in the Linux * SDK Allwinner releases. */ /* BITx -- Unknown bit that needs to be set/cleared at position x */ /* UFx -- Uknown multi-bit field frobbed during init */ #define AHCI_BISTAFR 0x00A0 #define AHCI_BISTCR 0x00A4 #define AHCI_BISTFCTR 0x00A8 #define AHCI_BISTSR 0x00AC #define AHCI_BISTDECR 0x00B0 #define AHCI_DIAGNR 0x00B4 #define AHCI_DIAGNR1 0x00B8 #define AHCI_OOBR 0x00BC #define AHCI_PHYCS0R 0x00C0 /* Bits 0..17 are a mystery */ #define PHYCS0R_BIT18 (1 << 18) #define PHYCS0R_POWER_ENABLE (1 << 19) #define PHYCS0R_UF1_MASK (7 << 20) /* Unknown Field 1 */ #define PHYCS0R_UF1_INIT (3 << 20) #define PHYCS0R_BIT23 (1 << 23) #define PHYCS0R_UF2_MASK (7 << 24) /* Uknown Field 2 */ #define PHYCS0R_UF2_INIT (5 << 24) /* Bit 27 mystery */ #define PHYCS0R_POWER_STATUS_MASK (7 << 28) #define PHYCS0R_PS_GOOD (2 << 28) /* Bit 31 mystery */ #define AHCI_PHYCS1R 0x00C4 /* Bits 0..5 are a mystery */ #define PHYCS1R_UF1_MASK (3 << 6) #define PHYCS1R_UF1_INIT (2 << 6) #define PHYCS1R_UF2_MASK (0x1f << 8) #define PHYCS1R_UF2_INIT (6 << 8) /* Bits 13..14 are a mystery */ #define PHYCS1R_BIT15 (1 << 15) #define PHYCS1R_UF3_MASK (3 << 16) #define PHYCS1R_UF3_INIT (2 << 16) /* Bit 18 mystery */ #define PHYCS1R_HIGHZ (1 << 19) /* Bits 20..27 mystery */ #define PHYCS1R_BIT28 (1 << 28) /* Bits 29..31 mystery */ #define AHCI_PHYCS2R 0x00C8 /* bits 0..4 mystery */ #define PHYCS2R_UF1_MASK (0x1f << 5) #define PHYCS2R_UF1_INIT (0x19 << 5) /* Bits 10..23 mystery */ #define PHYCS2R_CALIBRATE (1 << 24) /* Bits 25..31 mystery */ #define AHCI_TIMER1MS 0x00E0 #define AHCI_GPARAM1R 0x00E8 #define AHCI_GPARAM2R 0x00EC #define AHCI_PPARAMR 0x00F0 #define AHCI_TESTR 0x00F4 #define AHCI_VERSIONR 0x00F8 #define AHCI_IDR 0x00FC #define AHCI_RWCR 0x00FC #define AHCI_P0DMACR 0x0070 #define AHCI_P0PHYCR 0x0078 #define AHCI_P0PHYSR 0x007C #define PLL_FREQ 100000000 struct ahci_a10_softc { struct ahci_controller ahci_ctlr; regulator_t ahci_reg; clk_t clk_pll; clk_t clk_gate; }; static void inline ahci_set(struct resource *m, bus_size_t off, uint32_t set) { uint32_t val = ATA_INL(m, off); val |= set; ATA_OUTL(m, off, val); } static void inline ahci_clr(struct resource *m, bus_size_t off, uint32_t clr) { uint32_t val = ATA_INL(m, off); val &= ~clr; ATA_OUTL(m, off, val); } static void inline ahci_mask_set(struct resource *m, bus_size_t off, uint32_t mask, uint32_t set) { uint32_t val = ATA_INL(m, off); val &= mask; val |= set; ATA_OUTL(m, off, val); } /* * Should this be phy_reset or phy_init */ #define PHY_RESET_TIMEOUT 1000 static void ahci_a10_phy_reset(device_t dev) { uint32_t to, val; struct ahci_controller *ctlr = device_get_softc(dev); /* * Here starts the magic -- most of the comments are based * on guesswork, names of routines and printf error * messages. The code works, but it will do that even if the * comments are 100% BS. */ /* * Lock out other access while we initialize. Or at least that * seems to be the case based on Linux SDK #defines. Maybe this * put things into reset? */ ATA_OUTL(ctlr->r_mem, AHCI_RWCR, 0); DELAY(100); /* * Set bit 19 in PHYCS1R. Guessing this disables driving the PHY * port for a bit while we reset things. */ ahci_set(ctlr->r_mem, AHCI_PHYCS1R, PHYCS1R_HIGHZ); /* * Frob PHYCS0R... */ ahci_mask_set(ctlr->r_mem, AHCI_PHYCS0R, ~PHYCS0R_UF2_MASK, PHYCS0R_UF2_INIT | PHYCS0R_BIT23 | PHYCS0R_BIT18); /* * Set three fields in PHYCS1R */ ahci_mask_set(ctlr->r_mem, AHCI_PHYCS1R, ~(PHYCS1R_UF1_MASK | PHYCS1R_UF2_MASK | PHYCS1R_UF3_MASK), PHYCS1R_UF1_INIT | PHYCS1R_UF2_INIT | PHYCS1R_UF3_INIT); /* * Two more mystery bits in PHYCS1R. -- can these be combined above? */ ahci_set(ctlr->r_mem, AHCI_PHYCS1R, PHYCS1R_BIT15 | PHYCS1R_BIT28); /* * Now clear that first mysery bit. Perhaps this starts * driving the PHY again so we can power it up and start * talking to the SATA drive, if any below. */ ahci_clr(ctlr->r_mem, AHCI_PHYCS1R, PHYCS1R_HIGHZ); /* * Frob PHYCS0R again... */ ahci_mask_set(ctlr->r_mem, AHCI_PHYCS0R, ~PHYCS0R_UF1_MASK, PHYCS0R_UF1_INIT); /* * Frob PHYCS2R, because 25 means something? */ ahci_mask_set(ctlr->r_mem, AHCI_PHYCS2R, ~PHYCS2R_UF1_MASK, PHYCS2R_UF1_INIT); DELAY(100); /* WAG */ /* * Turn on the power to the PHY and wait for it to report back * good? */ ahci_set(ctlr->r_mem, AHCI_PHYCS0R, PHYCS0R_POWER_ENABLE); for (to = PHY_RESET_TIMEOUT; to > 0; to--) { val = ATA_INL(ctlr->r_mem, AHCI_PHYCS0R); if ((val & PHYCS0R_POWER_STATUS_MASK) == PHYCS0R_PS_GOOD) break; DELAY(10); } if (to == 0 && bootverbose) device_printf(dev, "PHY Power Failed PHYCS0R = %#x\n", val); /* * Calibrate the clocks between the device and the host. This appears * to be an automated process that clears the bit when it is done. */ ahci_set(ctlr->r_mem, AHCI_PHYCS2R, PHYCS2R_CALIBRATE); for (to = PHY_RESET_TIMEOUT; to > 0; to--) { val = ATA_INL(ctlr->r_mem, AHCI_PHYCS2R); if ((val & PHYCS2R_CALIBRATE) == 0) break; DELAY(10); } if (to == 0 && bootverbose) device_printf(dev, "PHY Cal Failed PHYCS2R %#x\n", val); /* * OK, let things settle down a bit. */ DELAY(1000); /* * Go back into normal mode now that we've calibrated the PHY. */ ATA_OUTL(ctlr->r_mem, AHCI_RWCR, 7); } static void ahci_a10_ch_start(struct ahci_channel *ch) { uint32_t reg; /* * Magical values from Allwinner SDK, setup the DMA before start * operations on this channel. */ reg = ATA_INL(ch->r_mem, AHCI_P0DMACR); reg &= ~0xff00; reg |= 0x4400; ATA_OUTL(ch->r_mem, AHCI_P0DMACR, reg); } static int ahci_a10_ctlr_reset(device_t dev) { ahci_a10_phy_reset(dev); return (ahci_ctlr_reset(dev)); } static int ahci_a10_probe(device_t dev) { if (!ofw_bus_is_compatible(dev, "allwinner,sun4i-a10-ahci")) return (ENXIO); device_set_desc(dev, "Allwinner Integrated AHCI controller"); return (BUS_PROBE_DEFAULT); } static int ahci_a10_attach(device_t dev) { int error; struct ahci_a10_softc *sc; struct ahci_controller *ctlr; sc = device_get_softc(dev); ctlr = &sc->ahci_ctlr; ctlr->quirks = AHCI_Q_NOPMP; ctlr->vendorid = 0; ctlr->deviceid = 0; ctlr->subvendorid = 0; ctlr->subdeviceid = 0; ctlr->r_rid = 0; if (!(ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE))) return (ENXIO); /* Enable the (optional) regulator */ if (regulator_get_by_ofw_property(dev, 0, "target-supply", &sc->ahci_reg) == 0) { error = regulator_enable(sc->ahci_reg); if (error != 0) { device_printf(dev, "Could not enable regulator\n"); goto fail; } } /* Enable clocks */ error = clk_get_by_ofw_index(dev, 0, 0, &sc->clk_gate); if (error != 0) { device_printf(dev, "Cannot get gate clock\n"); goto fail; } error = clk_get_by_ofw_index(dev, 0, 1, &sc->clk_pll); if (error != 0) { device_printf(dev, "Cannot get PLL clock\n"); goto fail; } error = clk_set_freq(sc->clk_pll, PLL_FREQ, CLK_SET_ROUND_DOWN); if (error != 0) { device_printf(dev, "Cannot set PLL frequency\n"); goto fail; } error = clk_enable(sc->clk_pll); if (error != 0) { device_printf(dev, "Cannot enable PLL\n"); goto fail; } error = clk_enable(sc->clk_gate); if (error != 0) { device_printf(dev, "Cannot enable clk gate\n"); goto fail; } /* Reset controller */ if ((error = ahci_a10_ctlr_reset(dev)) != 0) goto fail; /* * No MSI registers on this platform. */ ctlr->msi = 0; ctlr->numirqs = 1; /* Channel start callback(). */ ctlr->ch_start = ahci_a10_ch_start; /* * Note: ahci_attach will release ctlr->r_mem on errors automatically */ return (ahci_attach(dev)); fail: if (sc->ahci_reg != NULL) regulator_disable(sc->ahci_reg); if (sc->clk_gate != NULL) clk_release(sc->clk_gate); if (sc->clk_pll != NULL) clk_release(sc->clk_pll); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); return (error); } static int ahci_a10_detach(device_t dev) { struct ahci_a10_softc *sc; struct ahci_controller *ctlr; sc = device_get_softc(dev); ctlr = &sc->ahci_ctlr; if (sc->ahci_reg != NULL) regulator_disable(sc->ahci_reg); if (sc->clk_gate != NULL) clk_release(sc->clk_gate); if (sc->clk_pll != NULL) clk_release(sc->clk_pll); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); return (ahci_detach(dev)); } static device_method_t ahci_ata_methods[] = { DEVMETHOD(device_probe, ahci_a10_probe), DEVMETHOD(device_attach, ahci_a10_attach), DEVMETHOD(device_detach, ahci_a10_detach), DEVMETHOD(bus_print_child, ahci_print_child), DEVMETHOD(bus_alloc_resource, ahci_alloc_resource), DEVMETHOD(bus_release_resource, ahci_release_resource), DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr,ahci_teardown_intr), DEVMETHOD(bus_child_location, ahci_child_location), DEVMETHOD_END }; static driver_t ahci_ata_driver = { "ahci", ahci_ata_methods, sizeof(struct ahci_a10_softc) }; DRIVER_MODULE(a10_ahci, simplebus, ahci_ata_driver, 0, 0); diff --git a/sys/arm/allwinner/a10_codec.c b/sys/arm/allwinner/a10_codec.c index 711d2203f9a5..fc4937351f3b 100644 --- a/sys/arm/allwinner/a10_codec.c +++ b/sys/arm/allwinner/a10_codec.c @@ -1,1206 +1,1206 @@ /*- * Copyright (c) 2014-2016 Jared D. McNeill * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Allwinner A10/A20 and H3 Audio Codec */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include "sunxi_dma_if.h" #include "mixer_if.h" struct a10codec_info; struct a10codec_config { /* mixer class */ struct kobj_class *mixer_class; /* toggle DAC/ADC mute */ void (*mute)(struct a10codec_info *, int, int); /* DRQ types */ u_int drqtype_codec; u_int drqtype_sdram; /* register map */ bus_size_t DPC, DAC_FIFOC, DAC_FIFOS, DAC_TXDATA, ADC_FIFOC, ADC_FIFOS, ADC_RXDATA, DAC_CNT, ADC_CNT; }; #define TX_TRIG_LEVEL 0xf #define RX_TRIG_LEVEL 0x7 #define DRQ_CLR_CNT 0x3 #define AC_DAC_DPC(_sc) ((_sc)->cfg->DPC) #define DAC_DPC_EN_DA 0x80000000 #define AC_DAC_FIFOC(_sc) ((_sc)->cfg->DAC_FIFOC) #define DAC_FIFOC_FS_SHIFT 29 #define DAC_FIFOC_FS_MASK (7U << DAC_FIFOC_FS_SHIFT) #define DAC_FS_48KHZ 0 #define DAC_FS_32KHZ 1 #define DAC_FS_24KHZ 2 #define DAC_FS_16KHZ 3 #define DAC_FS_12KHZ 4 #define DAC_FS_8KHZ 5 #define DAC_FS_192KHZ 6 #define DAC_FS_96KHZ 7 #define DAC_FIFOC_FIFO_MODE_SHIFT 24 #define DAC_FIFOC_FIFO_MODE_MASK (3U << DAC_FIFOC_FIFO_MODE_SHIFT) #define FIFO_MODE_24_31_8 0 #define FIFO_MODE_16_31_16 0 #define FIFO_MODE_16_15_0 1 #define DAC_FIFOC_DRQ_CLR_CNT_SHIFT 21 #define DAC_FIFOC_DRQ_CLR_CNT_MASK (3U << DAC_FIFOC_DRQ_CLR_CNT_SHIFT) #define DAC_FIFOC_TX_TRIG_LEVEL_SHIFT 8 #define DAC_FIFOC_TX_TRIG_LEVEL_MASK (0x7f << DAC_FIFOC_TX_TRIG_LEVEL_SHIFT) #define DAC_FIFOC_MONO_EN (1U << 6) #define DAC_FIFOC_TX_BITS (1U << 5) #define DAC_FIFOC_DRQ_EN (1U << 4) #define DAC_FIFOC_FIFO_FLUSH (1U << 0) #define AC_DAC_FIFOS(_sc) ((_sc)->cfg->DAC_FIFOS) #define AC_DAC_TXDATA(_sc) ((_sc)->cfg->DAC_TXDATA) #define AC_ADC_FIFOC(_sc) ((_sc)->cfg->ADC_FIFOC) #define ADC_FIFOC_FS_SHIFT 29 #define ADC_FIFOC_FS_MASK (7U << ADC_FIFOC_FS_SHIFT) #define ADC_FS_48KHZ 0 #define ADC_FIFOC_EN_AD (1U << 28) #define ADC_FIFOC_RX_FIFO_MODE (1U << 24) #define ADC_FIFOC_RX_TRIG_LEVEL_SHIFT 8 #define ADC_FIFOC_RX_TRIG_LEVEL_MASK (0x1f << ADC_FIFOC_RX_TRIG_LEVEL_SHIFT) #define ADC_FIFOC_MONO_EN (1U << 7) #define ADC_FIFOC_RX_BITS (1U << 6) #define ADC_FIFOC_DRQ_EN (1U << 4) #define ADC_FIFOC_FIFO_FLUSH (1U << 1) #define AC_ADC_FIFOS(_sc) ((_sc)->cfg->ADC_FIFOS) #define AC_ADC_RXDATA(_sc) ((_sc)->cfg->ADC_RXDATA) #define AC_DAC_CNT(_sc) ((_sc)->cfg->DAC_CNT) #define AC_ADC_CNT(_sc) ((_sc)->cfg->ADC_CNT) static uint32_t a10codec_fmt[] = { SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps a10codec_pcaps = { 8000, 192000, a10codec_fmt, 0 }; static struct pcmchan_caps a10codec_rcaps = { 8000, 48000, a10codec_fmt, 0 }; struct a10codec_info; struct a10codec_chinfo { struct snd_dbuf *buffer; struct pcm_channel *channel; struct a10codec_info *parent; bus_dmamap_t dmamap; void *dmaaddr; bus_addr_t physaddr; bus_size_t fifo; device_t dmac; void *dmachan; int dir; int run; uint32_t pos; uint32_t format; uint32_t blocksize; uint32_t speed; }; struct a10codec_info { device_t dev; struct resource *res[2]; struct mtx *lock; bus_dma_tag_t dmat; unsigned dmasize; void *ih; struct a10codec_config *cfg; struct a10codec_chinfo play; struct a10codec_chinfo rec; }; static struct resource_spec a10codec_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; #define CODEC_ANALOG_READ(sc, reg) bus_read_4((sc)->res[1], (reg)) #define CODEC_ANALOG_WRITE(sc, reg, val) bus_write_4((sc)->res[1], (reg), (val)) #define CODEC_READ(sc, reg) bus_read_4((sc)->res[0], (reg)) #define CODEC_WRITE(sc, reg, val) bus_write_4((sc)->res[0], (reg), (val)) /* * A10/A20 mixer interface */ #define A10_DAC_ACTL 0x10 #define A10_DACAREN (1U << 31) #define A10_DACALEN (1U << 30) #define A10_MIXEN (1U << 29) #define A10_DACPAS (1U << 8) #define A10_PAMUTE (1U << 6) #define A10_PAVOL_SHIFT 0 #define A10_PAVOL_MASK (0x3f << A10_PAVOL_SHIFT) #define A10_ADC_ACTL 0x28 #define A10_ADCREN (1U << 31) #define A10_ADCLEN (1U << 30) #define A10_PREG1EN (1U << 29) #define A10_PREG2EN (1U << 28) #define A10_VMICEN (1U << 27) #define A10_ADCG_SHIFT 20 #define A10_ADCG_MASK (7U << A10_ADCG_SHIFT) #define A10_ADCIS_SHIFT 17 #define A10_ADCIS_MASK (7U << A10_ADCIS_SHIFT) #define A10_ADC_IS_LINEIN 0 #define A10_ADC_IS_FMIN 1 #define A10_ADC_IS_MIC1 2 #define A10_ADC_IS_MIC2 3 #define A10_ADC_IS_MIC1_L_MIC2_R 4 #define A10_ADC_IS_MIC1_LR_MIC2_LR 5 #define A10_ADC_IS_OMIX 6 #define A10_ADC_IS_LINEIN_L_MIC1_R 7 #define A10_LNRDF (1U << 16) #define A10_LNPREG_SHIFT 13 #define A10_LNPREG_MASK (7U << A10_LNPREG_SHIFT) #define A10_PA_EN (1U << 4) #define A10_DDE (1U << 3) static int a10_mixer_init(struct snd_mixer *m) { struct a10codec_info *sc = mix_getdevinfo(m); uint32_t val; mix_setdevs(m, SOUND_MASK_VOLUME | SOUND_MASK_LINE | SOUND_MASK_RECLEV); mix_setrecdevs(m, SOUND_MASK_LINE | SOUND_MASK_LINE1 | SOUND_MASK_MIC); /* Unmute input source to PA */ val = CODEC_READ(sc, A10_DAC_ACTL); val |= A10_PAMUTE; CODEC_WRITE(sc, A10_DAC_ACTL, val); /* Enable PA */ val = CODEC_READ(sc, A10_ADC_ACTL); val |= A10_PA_EN; CODEC_WRITE(sc, A10_ADC_ACTL, val); return (0); } static const struct a10_mixer { unsigned reg; unsigned mask; unsigned shift; } a10_mixers[SOUND_MIXER_NRDEVICES] = { [SOUND_MIXER_VOLUME] = { A10_DAC_ACTL, A10_PAVOL_MASK, A10_PAVOL_SHIFT }, [SOUND_MIXER_LINE] = { A10_ADC_ACTL, A10_LNPREG_MASK, A10_LNPREG_SHIFT }, [SOUND_MIXER_RECLEV] = { A10_ADC_ACTL, A10_ADCG_MASK, A10_ADCG_SHIFT }, }; static int a10_mixer_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct a10codec_info *sc = mix_getdevinfo(m); uint32_t val; unsigned nvol, max; max = a10_mixers[dev].mask >> a10_mixers[dev].shift; nvol = (left * max) / 100; val = CODEC_READ(sc, a10_mixers[dev].reg); val &= ~a10_mixers[dev].mask; val |= (nvol << a10_mixers[dev].shift); CODEC_WRITE(sc, a10_mixers[dev].reg, val); left = right = (left * 100) / max; return (left | (right << 8)); } static uint32_t a10_mixer_setrecsrc(struct snd_mixer *m, uint32_t src) { struct a10codec_info *sc = mix_getdevinfo(m); uint32_t val; val = CODEC_READ(sc, A10_ADC_ACTL); switch (src) { case SOUND_MASK_LINE: /* line-in */ val &= ~A10_ADCIS_MASK; val |= (A10_ADC_IS_LINEIN << A10_ADCIS_SHIFT); break; case SOUND_MASK_MIC: /* MIC1 */ val &= ~A10_ADCIS_MASK; val |= (A10_ADC_IS_MIC1 << A10_ADCIS_SHIFT); break; case SOUND_MASK_LINE1: /* MIC2 */ val &= ~A10_ADCIS_MASK; val |= (A10_ADC_IS_MIC2 << A10_ADCIS_SHIFT); break; default: break; } CODEC_WRITE(sc, A10_ADC_ACTL, val); switch ((val & A10_ADCIS_MASK) >> A10_ADCIS_SHIFT) { case A10_ADC_IS_LINEIN: return (SOUND_MASK_LINE); case A10_ADC_IS_MIC1: return (SOUND_MASK_MIC); case A10_ADC_IS_MIC2: return (SOUND_MASK_LINE1); default: return (0); } } static void a10_mute(struct a10codec_info *sc, int mute, int dir) { uint32_t val; if (dir == PCMDIR_PLAY) { val = CODEC_READ(sc, A10_DAC_ACTL); if (mute) { /* Disable DAC analog l/r channels and output mixer */ val &= ~A10_DACAREN; val &= ~A10_DACALEN; val &= ~A10_DACPAS; } else { /* Enable DAC analog l/r channels and output mixer */ val |= A10_DACAREN; val |= A10_DACALEN; val |= A10_DACPAS; } CODEC_WRITE(sc, A10_DAC_ACTL, val); } else { val = CODEC_READ(sc, A10_ADC_ACTL); if (mute) { /* Disable ADC analog l/r channels, MIC1 preamp, * and VMIC pin voltage */ val &= ~A10_ADCREN; val &= ~A10_ADCLEN; val &= ~A10_PREG1EN; val &= ~A10_VMICEN; } else { /* Enable ADC analog l/r channels, MIC1 preamp, * and VMIC pin voltage */ val |= A10_ADCREN; val |= A10_ADCLEN; val |= A10_PREG1EN; val |= A10_VMICEN; } CODEC_WRITE(sc, A10_ADC_ACTL, val); } } static kobj_method_t a10_mixer_methods[] = { KOBJMETHOD(mixer_init, a10_mixer_init), KOBJMETHOD(mixer_set, a10_mixer_set), KOBJMETHOD(mixer_setrecsrc, a10_mixer_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(a10_mixer); /* * H3 mixer interface */ #define H3_PR_CFG 0x00 #define H3_AC_PR_RST (1 << 28) #define H3_AC_PR_RW (1 << 24) #define H3_AC_PR_ADDR_SHIFT 16 #define H3_AC_PR_ADDR_MASK (0x1f << H3_AC_PR_ADDR_SHIFT) #define H3_ACDA_PR_WDAT_SHIFT 8 #define H3_ACDA_PR_WDAT_MASK (0xff << H3_ACDA_PR_WDAT_SHIFT) #define H3_ACDA_PR_RDAT_SHIFT 0 #define H3_ACDA_PR_RDAT_MASK (0xff << H3_ACDA_PR_RDAT_SHIFT) #define H3_LOMIXSC 0x01 #define H3_LOMIXSC_LDAC (1 << 1) #define H3_ROMIXSC 0x02 #define H3_ROMIXSC_RDAC (1 << 1) #define H3_DAC_PA_SRC 0x03 #define H3_DACAREN (1 << 7) #define H3_DACALEN (1 << 6) #define H3_RMIXEN (1 << 5) #define H3_LMIXEN (1 << 4) #define H3_LINEIN_GCTR 0x05 #define H3_LINEING_SHIFT 4 #define H3_LINEING_MASK (0x7 << H3_LINEING_SHIFT) #define H3_MIC_GCTR 0x06 #define H3_MIC1_GAIN_SHIFT 4 #define H3_MIC1_GAIN_MASK (0x7 << H3_MIC1_GAIN_SHIFT) #define H3_MIC2_GAIN_SHIFT 0 #define H3_MIC2_GAIN_MASK (0x7 << H3_MIC2_GAIN_SHIFT) #define H3_PAEN_CTR 0x07 #define H3_LINEOUTEN (1 << 7) #define H3_LINEOUT_VOLC 0x09 #define H3_LINEOUTVOL_SHIFT 3 #define H3_LINEOUTVOL_MASK (0x1f << H3_LINEOUTVOL_SHIFT) #define H3_MIC2G_LINEOUT_CTR 0x0a #define H3_LINEOUT_LSEL (1 << 3) #define H3_LINEOUT_RSEL (1 << 2) #define H3_LADCMIXSC 0x0c #define H3_RADCMIXSC 0x0d #define H3_ADCMIXSC_MIC1 (1 << 6) #define H3_ADCMIXSC_MIC2 (1 << 5) #define H3_ADCMIXSC_LINEIN (1 << 2) #define H3_ADCMIXSC_OMIXER (3 << 0) #define H3_ADC_AP_EN 0x0f #define H3_ADCREN (1 << 7) #define H3_ADCLEN (1 << 6) #define H3_ADCG_SHIFT 0 #define H3_ADCG_MASK (0x7 << H3_ADCG_SHIFT) static u_int h3_pr_read(struct a10codec_info *sc, u_int addr) { uint32_t val; /* Read current value */ val = CODEC_ANALOG_READ(sc, H3_PR_CFG); /* De-assert reset */ val |= H3_AC_PR_RST; CODEC_ANALOG_WRITE(sc, H3_PR_CFG, val); /* Read mode */ val &= ~H3_AC_PR_RW; CODEC_ANALOG_WRITE(sc, H3_PR_CFG, val); /* Set address */ val &= ~H3_AC_PR_ADDR_MASK; val |= (addr << H3_AC_PR_ADDR_SHIFT); CODEC_ANALOG_WRITE(sc, H3_PR_CFG, val); /* Read data */ return (CODEC_ANALOG_READ(sc , H3_PR_CFG) & H3_ACDA_PR_RDAT_MASK); } static void h3_pr_write(struct a10codec_info *sc, u_int addr, u_int data) { uint32_t val; /* Read current value */ val = CODEC_ANALOG_READ(sc, H3_PR_CFG); /* De-assert reset */ val |= H3_AC_PR_RST; CODEC_ANALOG_WRITE(sc, H3_PR_CFG, val); /* Set address */ val &= ~H3_AC_PR_ADDR_MASK; val |= (addr << H3_AC_PR_ADDR_SHIFT); CODEC_ANALOG_WRITE(sc, H3_PR_CFG, val); /* Write data */ val &= ~H3_ACDA_PR_WDAT_MASK; val |= (data << H3_ACDA_PR_WDAT_SHIFT); CODEC_ANALOG_WRITE(sc, H3_PR_CFG, val); /* Write mode */ val |= H3_AC_PR_RW; CODEC_ANALOG_WRITE(sc, H3_PR_CFG, val); } static void h3_pr_set_clear(struct a10codec_info *sc, u_int addr, u_int set, u_int clr) { u_int old, new; old = h3_pr_read(sc, addr); new = set | (old & ~clr); h3_pr_write(sc, addr, new); } static int h3_mixer_init(struct snd_mixer *m) { int rid=1; pcell_t reg[2]; phandle_t analogref; struct a10codec_info *sc = mix_getdevinfo(m); if (OF_getencprop(ofw_bus_get_node(sc->dev), "allwinner,codec-analog-controls", &analogref, sizeof(analogref)) <= 0) { return (ENXIO); } if (OF_getencprop(OF_node_from_xref(analogref), "reg", reg, sizeof(reg)) <= 0) { return (ENXIO); } sc->res[1] = bus_alloc_resource(sc->dev, SYS_RES_MEMORY, &rid, reg[0], reg[0]+reg[1], reg[1], RF_ACTIVE ); if (sc->res[1] == NULL) { return (ENXIO); } mix_setdevs(m, SOUND_MASK_PCM | SOUND_MASK_VOLUME | SOUND_MASK_RECLEV | SOUND_MASK_MIC | SOUND_MASK_LINE | SOUND_MASK_LINE1); mix_setrecdevs(m, SOUND_MASK_MIC | SOUND_MASK_LINE | SOUND_MASK_LINE1 | SOUND_MASK_IMIX); pcm_setflags(sc->dev, pcm_getflags(sc->dev) | SD_F_SOFTPCMVOL); /* Right & Left LINEOUT enable */ h3_pr_set_clear(sc, H3_PAEN_CTR, H3_LINEOUTEN, 0); h3_pr_set_clear(sc, H3_MIC2G_LINEOUT_CTR, H3_LINEOUT_LSEL | H3_LINEOUT_RSEL, 0); return (0); } static const struct h3_mixer { unsigned reg; unsigned mask; unsigned shift; } h3_mixers[SOUND_MIXER_NRDEVICES] = { [SOUND_MIXER_VOLUME] = { H3_LINEOUT_VOLC, H3_LINEOUTVOL_MASK, H3_LINEOUTVOL_SHIFT }, [SOUND_MIXER_RECLEV] = { H3_ADC_AP_EN, H3_ADCG_MASK, H3_ADCG_SHIFT }, [SOUND_MIXER_LINE] = { H3_LINEIN_GCTR, H3_LINEING_MASK, H3_LINEING_SHIFT }, [SOUND_MIXER_MIC] = { H3_MIC_GCTR, H3_MIC1_GAIN_MASK, H3_MIC1_GAIN_SHIFT }, [SOUND_MIXER_LINE1] = { H3_MIC_GCTR, H3_MIC2_GAIN_MASK, H3_MIC2_GAIN_SHIFT }, }; static int h3_mixer_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct a10codec_info *sc = mix_getdevinfo(m); unsigned nvol, max; max = h3_mixers[dev].mask >> h3_mixers[dev].shift; nvol = (left * max) / 100; h3_pr_set_clear(sc, h3_mixers[dev].reg, nvol << h3_mixers[dev].shift, h3_mixers[dev].mask); left = right = (left * 100) / max; return (left | (right << 8)); } static uint32_t h3_mixer_setrecsrc(struct snd_mixer *m, uint32_t src) { struct a10codec_info *sc = mix_getdevinfo(m); uint32_t val; val = 0; src &= (SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_LINE1 | SOUND_MASK_IMIX); if ((src & SOUND_MASK_LINE) != 0) /* line-in */ val |= H3_ADCMIXSC_LINEIN; if ((src & SOUND_MASK_MIC) != 0) /* MIC1 */ val |= H3_ADCMIXSC_MIC1; if ((src & SOUND_MASK_LINE1) != 0) /* MIC2 */ val |= H3_ADCMIXSC_MIC2; if ((src & SOUND_MASK_IMIX) != 0) /* l/r output mixer */ val |= H3_ADCMIXSC_OMIXER; h3_pr_write(sc, H3_LADCMIXSC, val); h3_pr_write(sc, H3_RADCMIXSC, val); return (src); } static void h3_mute(struct a10codec_info *sc, int mute, int dir) { if (dir == PCMDIR_PLAY) { if (mute) { /* Mute DAC l/r channels to output mixer */ h3_pr_set_clear(sc, H3_LOMIXSC, 0, H3_LOMIXSC_LDAC); h3_pr_set_clear(sc, H3_ROMIXSC, 0, H3_ROMIXSC_RDAC); /* Disable DAC analog l/r channels and output mixer */ h3_pr_set_clear(sc, H3_DAC_PA_SRC, 0, H3_DACAREN | H3_DACALEN | H3_RMIXEN | H3_LMIXEN); } else { /* Enable DAC analog l/r channels and output mixer */ h3_pr_set_clear(sc, H3_DAC_PA_SRC, H3_DACAREN | H3_DACALEN | H3_RMIXEN | H3_LMIXEN, 0); /* Unmute DAC l/r channels to output mixer */ h3_pr_set_clear(sc, H3_LOMIXSC, H3_LOMIXSC_LDAC, 0); h3_pr_set_clear(sc, H3_ROMIXSC, H3_ROMIXSC_RDAC, 0); } } else { if (mute) { /* Disable ADC analog l/r channels */ h3_pr_set_clear(sc, H3_ADC_AP_EN, 0, H3_ADCREN | H3_ADCLEN); } else { /* Enable ADC analog l/r channels */ h3_pr_set_clear(sc, H3_ADC_AP_EN, H3_ADCREN | H3_ADCLEN, 0); } } } static kobj_method_t h3_mixer_methods[] = { KOBJMETHOD(mixer_init, h3_mixer_init), KOBJMETHOD(mixer_set, h3_mixer_set), KOBJMETHOD(mixer_setrecsrc, h3_mixer_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(h3_mixer); /* * Channel interface */ static void a10codec_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct a10codec_chinfo *ch = arg; if (error != 0) return; ch->physaddr = segs[0].ds_addr; } static void a10codec_transfer(struct a10codec_chinfo *ch) { bus_addr_t src, dst; int error; if (ch->dir == PCMDIR_PLAY) { src = ch->physaddr + ch->pos; dst = ch->fifo; } else { src = ch->fifo; dst = ch->physaddr + ch->pos; } error = SUNXI_DMA_TRANSFER(ch->dmac, ch->dmachan, src, dst, ch->blocksize); if (error) { ch->run = 0; device_printf(ch->parent->dev, "DMA transfer failed: %d\n", error); } } static void a10codec_dmaconfig(struct a10codec_chinfo *ch) { struct a10codec_info *sc = ch->parent; struct sunxi_dma_config conf; memset(&conf, 0, sizeof(conf)); conf.src_width = conf.dst_width = 16; conf.src_burst_len = conf.dst_burst_len = 4; if (ch->dir == PCMDIR_PLAY) { conf.dst_noincr = true; conf.src_drqtype = sc->cfg->drqtype_sdram; conf.dst_drqtype = sc->cfg->drqtype_codec; } else { conf.src_noincr = true; conf.src_drqtype = sc->cfg->drqtype_codec; conf.dst_drqtype = sc->cfg->drqtype_sdram; } SUNXI_DMA_SET_CONFIG(ch->dmac, ch->dmachan, &conf); } static void a10codec_dmaintr(void *priv) { struct a10codec_chinfo *ch = priv; unsigned bufsize; bufsize = sndbuf_getsize(ch->buffer); ch->pos += ch->blocksize; if (ch->pos >= bufsize) ch->pos -= bufsize; if (ch->run) { chn_intr(ch->channel); a10codec_transfer(ch); } } static unsigned a10codec_fs(struct a10codec_chinfo *ch) { switch (ch->speed) { case 48000: return (DAC_FS_48KHZ); case 24000: return (DAC_FS_24KHZ); case 12000: return (DAC_FS_12KHZ); case 192000: return (DAC_FS_192KHZ); case 32000: return (DAC_FS_32KHZ); case 16000: return (DAC_FS_16KHZ); case 8000: return (DAC_FS_8KHZ); case 96000: return (DAC_FS_96KHZ); default: return (DAC_FS_48KHZ); } } static void a10codec_start(struct a10codec_chinfo *ch) { struct a10codec_info *sc = ch->parent; uint32_t val; ch->pos = 0; if (ch->dir == PCMDIR_PLAY) { /* Flush DAC FIFO */ CODEC_WRITE(sc, AC_DAC_FIFOC(sc), DAC_FIFOC_FIFO_FLUSH); /* Clear DAC FIFO status */ CODEC_WRITE(sc, AC_DAC_FIFOS(sc), CODEC_READ(sc, AC_DAC_FIFOS(sc))); /* Unmute output */ sc->cfg->mute(sc, 0, ch->dir); /* Configure DAC DMA channel */ a10codec_dmaconfig(ch); /* Configure DAC FIFO */ CODEC_WRITE(sc, AC_DAC_FIFOC(sc), (AFMT_CHANNEL(ch->format) == 1 ? DAC_FIFOC_MONO_EN : 0) | (a10codec_fs(ch) << DAC_FIFOC_FS_SHIFT) | (FIFO_MODE_16_15_0 << DAC_FIFOC_FIFO_MODE_SHIFT) | (DRQ_CLR_CNT << DAC_FIFOC_DRQ_CLR_CNT_SHIFT) | (TX_TRIG_LEVEL << DAC_FIFOC_TX_TRIG_LEVEL_SHIFT)); /* Enable DAC DRQ */ val = CODEC_READ(sc, AC_DAC_FIFOC(sc)); val |= DAC_FIFOC_DRQ_EN; CODEC_WRITE(sc, AC_DAC_FIFOC(sc), val); } else { /* Flush ADC FIFO */ CODEC_WRITE(sc, AC_ADC_FIFOC(sc), ADC_FIFOC_FIFO_FLUSH); /* Clear ADC FIFO status */ CODEC_WRITE(sc, AC_ADC_FIFOS(sc), CODEC_READ(sc, AC_ADC_FIFOS(sc))); /* Unmute input */ sc->cfg->mute(sc, 0, ch->dir); /* Configure ADC DMA channel */ a10codec_dmaconfig(ch); /* Configure ADC FIFO */ CODEC_WRITE(sc, AC_ADC_FIFOC(sc), ADC_FIFOC_EN_AD | ADC_FIFOC_RX_FIFO_MODE | (AFMT_CHANNEL(ch->format) == 1 ? ADC_FIFOC_MONO_EN : 0) | (a10codec_fs(ch) << ADC_FIFOC_FS_SHIFT) | (RX_TRIG_LEVEL << ADC_FIFOC_RX_TRIG_LEVEL_SHIFT)); /* Enable ADC DRQ */ val = CODEC_READ(sc, AC_ADC_FIFOC(sc)); val |= ADC_FIFOC_DRQ_EN; CODEC_WRITE(sc, AC_ADC_FIFOC(sc), val); } /* Start DMA transfer */ a10codec_transfer(ch); } static void a10codec_stop(struct a10codec_chinfo *ch) { struct a10codec_info *sc = ch->parent; /* Disable DMA channel */ SUNXI_DMA_HALT(ch->dmac, ch->dmachan); sc->cfg->mute(sc, 1, ch->dir); if (ch->dir == PCMDIR_PLAY) { /* Disable DAC DRQ */ CODEC_WRITE(sc, AC_DAC_FIFOC(sc), 0); } else { /* Disable ADC DRQ */ CODEC_WRITE(sc, AC_ADC_FIFOC(sc), 0); } } static void * a10codec_chan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct a10codec_info *sc = devinfo; struct a10codec_chinfo *ch = dir == PCMDIR_PLAY ? &sc->play : &sc->rec; phandle_t xref; pcell_t *cells; int ncells, error; error = ofw_bus_parse_xref_list_alloc(ofw_bus_get_node(sc->dev), "dmas", "#dma-cells", dir == PCMDIR_PLAY ? 1 : 0, &xref, &ncells, &cells); if (error != 0) { device_printf(sc->dev, "cannot parse 'dmas' property\n"); return (NULL); } OF_prop_free(cells); ch->parent = sc; ch->channel = c; ch->buffer = b; ch->dir = dir; ch->fifo = rman_get_start(sc->res[0]) + (dir == PCMDIR_REC ? AC_ADC_RXDATA(sc) : AC_DAC_TXDATA(sc)); ch->dmac = OF_device_from_xref(xref); if (ch->dmac == NULL) { device_printf(sc->dev, "cannot find DMA controller\n"); device_printf(sc->dev, "xref = 0x%x\n", (u_int)xref); return (NULL); } ch->dmachan = SUNXI_DMA_ALLOC(ch->dmac, false, a10codec_dmaintr, ch); if (ch->dmachan == NULL) { device_printf(sc->dev, "cannot allocate DMA channel\n"); return (NULL); } error = bus_dmamem_alloc(sc->dmat, &ch->dmaaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &ch->dmamap); if (error != 0) { device_printf(sc->dev, "cannot allocate channel buffer\n"); return (NULL); } error = bus_dmamap_load(sc->dmat, ch->dmamap, ch->dmaaddr, sc->dmasize, a10codec_dmamap_cb, ch, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->dev, "cannot load DMA map\n"); return (NULL); } memset(ch->dmaaddr, 0, sc->dmasize); if (sndbuf_setup(ch->buffer, ch->dmaaddr, sc->dmasize) != 0) { device_printf(sc->dev, "cannot setup sndbuf\n"); return (NULL); } return (ch); } static int a10codec_chan_free(kobj_t obj, void *data) { struct a10codec_chinfo *ch = data; struct a10codec_info *sc = ch->parent; SUNXI_DMA_FREE(ch->dmac, ch->dmachan); bus_dmamap_unload(sc->dmat, ch->dmamap); bus_dmamem_free(sc->dmat, ch->dmaaddr, ch->dmamap); return (0); } static int a10codec_chan_setformat(kobj_t obj, void *data, uint32_t format) { struct a10codec_chinfo *ch = data; ch->format = format; return (0); } static uint32_t a10codec_chan_setspeed(kobj_t obj, void *data, uint32_t speed) { struct a10codec_chinfo *ch = data; /* * The codec supports full duplex operation but both DAC and ADC * use the same source clock (PLL2). Limit the available speeds to * those supported by a 24576000 Hz input. */ switch (speed) { case 8000: case 12000: case 16000: case 24000: case 32000: case 48000: ch->speed = speed; break; case 96000: case 192000: /* 96 KHz / 192 KHz mode only supported for playback */ if (ch->dir == PCMDIR_PLAY) { ch->speed = speed; } else { ch->speed = 48000; } break; case 44100: ch->speed = 48000; break; case 22050: ch->speed = 24000; break; case 11025: ch->speed = 12000; break; default: ch->speed = 48000; break; } return (ch->speed); } static uint32_t a10codec_chan_setblocksize(kobj_t obj, void *data, uint32_t blocksize) { struct a10codec_chinfo *ch = data; ch->blocksize = blocksize & ~3; return (ch->blocksize); } static int a10codec_chan_trigger(kobj_t obj, void *data, int go) { struct a10codec_chinfo *ch = data; struct a10codec_info *sc = ch->parent; if (!PCMTRIG_COMMON(go)) return (0); snd_mtxlock(sc->lock); switch (go) { case PCMTRIG_START: ch->run = 1; a10codec_stop(ch); a10codec_start(ch); break; case PCMTRIG_STOP: case PCMTRIG_ABORT: ch->run = 0; a10codec_stop(ch); break; default: break; } snd_mtxunlock(sc->lock); return (0); } static uint32_t a10codec_chan_getptr(kobj_t obj, void *data) { struct a10codec_chinfo *ch = data; return (ch->pos); } static struct pcmchan_caps * a10codec_chan_getcaps(kobj_t obj, void *data) { struct a10codec_chinfo *ch = data; if (ch->dir == PCMDIR_PLAY) { return (&a10codec_pcaps); } else { return (&a10codec_rcaps); } } static kobj_method_t a10codec_chan_methods[] = { KOBJMETHOD(channel_init, a10codec_chan_init), KOBJMETHOD(channel_free, a10codec_chan_free), KOBJMETHOD(channel_setformat, a10codec_chan_setformat), KOBJMETHOD(channel_setspeed, a10codec_chan_setspeed), KOBJMETHOD(channel_setblocksize, a10codec_chan_setblocksize), KOBJMETHOD(channel_trigger, a10codec_chan_trigger), KOBJMETHOD(channel_getptr, a10codec_chan_getptr), KOBJMETHOD(channel_getcaps, a10codec_chan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(a10codec_chan); /* * Device interface */ static const struct a10codec_config a10_config = { .mixer_class = &a10_mixer_class, .mute = a10_mute, .drqtype_codec = 19, .drqtype_sdram = 22, .DPC = 0x00, .DAC_FIFOC = 0x04, .DAC_FIFOS = 0x08, .DAC_TXDATA = 0x0c, .ADC_FIFOC = 0x1c, .ADC_FIFOS = 0x20, .ADC_RXDATA = 0x24, .DAC_CNT = 0x30, .ADC_CNT = 0x34, }; static const struct a10codec_config h3_config = { .mixer_class = &h3_mixer_class, .mute = h3_mute, .drqtype_codec = 15, .drqtype_sdram = 1, .DPC = 0x00, .DAC_FIFOC = 0x04, .DAC_FIFOS = 0x08, .DAC_TXDATA = 0x20, .ADC_FIFOC = 0x10, .ADC_FIFOS = 0x14, .ADC_RXDATA = 0x18, .DAC_CNT = 0x40, .ADC_CNT = 0x44, }; static struct ofw_compat_data compat_data[] = { { "allwinner,sun4i-a10-codec", (uintptr_t)&a10_config }, { "allwinner,sun7i-a20-codec", (uintptr_t)&a10_config }, { "allwinner,sun8i-h3-codec", (uintptr_t)&h3_config }, { NULL, 0 } }; static int a10codec_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner Audio Codec"); return (BUS_PROBE_DEFAULT); } static int a10codec_attach(device_t dev) { struct a10codec_info *sc; char status[SND_STATUSLEN]; struct gpiobus_pin *pa_pin; phandle_t node; clk_t clk_bus, clk_codec; hwreset_t rst; uint32_t val; int error; node = ofw_bus_get_node(dev); sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); sc->cfg = (void *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; sc->dev = dev; sc->lock = snd_mtxcreate(device_get_nameunit(dev), "a10codec softc"); if (bus_alloc_resources(dev, a10codec_spec, sc->res)) { device_printf(dev, "cannot allocate resources for device\n"); error = ENXIO; goto fail; } sc->dmasize = 131072; error = bus_dma_tag_create( bus_get_dma_tag(dev), 4, sc->dmasize, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->dmasize, 1, /* maxsize, nsegs */ sc->dmasize, 0, /* maxsegsize, flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->dmat); if (error != 0) { device_printf(dev, "cannot create DMA tag\n"); goto fail; } /* Get clocks */ if (clk_get_by_ofw_name(dev, 0, "apb", &clk_bus) != 0 && clk_get_by_ofw_name(dev, 0, "ahb", &clk_bus) != 0) { device_printf(dev, "cannot find bus clock\n"); goto fail; } if (clk_get_by_ofw_name(dev, 0, "codec", &clk_codec) != 0) { device_printf(dev, "cannot find codec clock\n"); goto fail; } /* Gating bus clock for codec */ if (clk_enable(clk_bus) != 0) { device_printf(dev, "cannot enable bus clock\n"); goto fail; } /* Activate audio codec clock. According to the A10 and A20 user * manuals, Audio_pll can be either 24.576MHz or 22.5792MHz. Most * audio sampling rates require an 24.576MHz input clock with the * exception of 44.1kHz, 22.05kHz, and 11.025kHz. Unfortunately, * both capture and playback use the same clock source so to * safely support independent full duplex operation, we use a fixed * 24.576MHz clock source and don't advertise native support for * the three sampling rates that require a 22.5792MHz input. */ error = clk_set_freq(clk_codec, 24576000, CLK_SET_ROUND_DOWN); if (error != 0) { device_printf(dev, "cannot set codec clock frequency\n"); goto fail; } /* Enable audio codec clock */ error = clk_enable(clk_codec); if (error != 0) { device_printf(dev, "cannot enable codec clock\n"); goto fail; } /* De-assert hwreset */ if (hwreset_get_by_ofw_idx(dev, 0, 0, &rst) == 0) { error = hwreset_deassert(rst); if (error != 0) { device_printf(dev, "cannot de-assert reset\n"); goto fail; } } /* Enable DAC */ val = CODEC_READ(sc, AC_DAC_DPC(sc)); val |= DAC_DPC_EN_DA; CODEC_WRITE(sc, AC_DAC_DPC(sc), val); if (mixer_init(dev, sc->cfg->mixer_class, sc)) { device_printf(dev, "mixer_init failed\n"); goto fail; } /* Unmute PA */ if (gpio_pin_get_by_ofw_property(dev, node, "allwinner,pa-gpios", &pa_pin) == 0) { error = gpio_pin_set_active(pa_pin, 1); if (error != 0) device_printf(dev, "failed to unmute PA\n"); } pcm_setflags(dev, pcm_getflags(dev) | SD_F_MPSAFE); if (pcm_register(dev, sc, 1, 1)) { device_printf(dev, "pcm_register failed\n"); goto fail; } pcm_addchan(dev, PCMDIR_PLAY, &a10codec_chan_class, sc); pcm_addchan(dev, PCMDIR_REC, &a10codec_chan_class, sc); snprintf(status, SND_STATUSLEN, "at %s", ofw_bus_get_name(dev)); pcm_setstatus(dev, status); return (0); fail: bus_release_resources(dev, a10codec_spec, sc->res); snd_mtxfree(sc->lock); free(sc, M_DEVBUF); return (ENXIO); } static device_method_t a10codec_pcm_methods[] = { /* Device interface */ DEVMETHOD(device_probe, a10codec_probe), DEVMETHOD(device_attach, a10codec_attach), DEVMETHOD_END }; static driver_t a10codec_pcm_driver = { "pcm", a10codec_pcm_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(a10codec, simplebus, a10codec_pcm_driver, 0, 0); MODULE_DEPEND(a10codec, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(a10codec, 1); diff --git a/sys/arm/allwinner/a10_dmac.c b/sys/arm/allwinner/a10_dmac.c index d5d27748a9d9..e711c5268b2d 100644 --- a/sys/arm/allwinner/a10_dmac.c +++ b/sys/arm/allwinner/a10_dmac.c @@ -1,468 +1,468 @@ /*- * Copyright (c) 2014-2016 Jared D. McNeill * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Allwinner A10/A20 DMA controller */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include "sunxi_dma_if.h" #define NDMA_CHANNELS 8 #define DDMA_CHANNELS 8 enum a10dmac_type { CH_NDMA, CH_DDMA }; struct a10dmac_softc; struct a10dmac_channel { struct a10dmac_softc * ch_sc; uint8_t ch_index; enum a10dmac_type ch_type; void (*ch_callback)(void *); void * ch_callbackarg; uint32_t ch_regoff; }; struct a10dmac_softc { struct resource * sc_res[2]; struct mtx sc_mtx; void * sc_ih; struct a10dmac_channel sc_ndma_channels[NDMA_CHANNELS]; struct a10dmac_channel sc_ddma_channels[DDMA_CHANNELS]; }; static struct resource_spec a10dmac_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; #define DMA_READ(sc, reg) bus_read_4((sc)->sc_res[0], (reg)) #define DMA_WRITE(sc, reg, val) bus_write_4((sc)->sc_res[0], (reg), (val)) #define DMACH_READ(ch, reg) \ DMA_READ((ch)->ch_sc, (reg) + (ch)->ch_regoff) #define DMACH_WRITE(ch, reg, val) \ DMA_WRITE((ch)->ch_sc, (reg) + (ch)->ch_regoff, (val)) static void a10dmac_intr(void *); static int a10dmac_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "allwinner,sun4i-a10-dma")) return (ENXIO); device_set_desc(dev, "Allwinner DMA controller"); return (BUS_PROBE_DEFAULT); } static int a10dmac_attach(device_t dev) { struct a10dmac_softc *sc; unsigned int index; clk_t clk; int error; sc = device_get_softc(dev); if (bus_alloc_resources(dev, a10dmac_spec, sc->sc_res)) { device_printf(dev, "cannot allocate resources for device\n"); return (ENXIO); } mtx_init(&sc->sc_mtx, "a10 dmac", NULL, MTX_SPIN); /* Activate DMA controller clock */ error = clk_get_by_ofw_index(dev, 0, 0, &clk); if (error != 0) { device_printf(dev, "cannot get clock\n"); return (error); } error = clk_enable(clk); if (error != 0) { device_printf(dev, "cannot enable clock\n"); return (error); } /* Disable all interrupts and clear pending status */ DMA_WRITE(sc, AWIN_DMA_IRQ_EN_REG, 0); DMA_WRITE(sc, AWIN_DMA_IRQ_PEND_STA_REG, ~0); /* Initialize channels */ for (index = 0; index < NDMA_CHANNELS; index++) { sc->sc_ndma_channels[index].ch_sc = sc; sc->sc_ndma_channels[index].ch_index = index; sc->sc_ndma_channels[index].ch_type = CH_NDMA; sc->sc_ndma_channels[index].ch_callback = NULL; sc->sc_ndma_channels[index].ch_callbackarg = NULL; sc->sc_ndma_channels[index].ch_regoff = AWIN_NDMA_REG(index); DMACH_WRITE(&sc->sc_ndma_channels[index], AWIN_NDMA_CTL_REG, 0); } for (index = 0; index < DDMA_CHANNELS; index++) { sc->sc_ddma_channels[index].ch_sc = sc; sc->sc_ddma_channels[index].ch_index = index; sc->sc_ddma_channels[index].ch_type = CH_DDMA; sc->sc_ddma_channels[index].ch_callback = NULL; sc->sc_ddma_channels[index].ch_callbackarg = NULL; sc->sc_ddma_channels[index].ch_regoff = AWIN_DDMA_REG(index); DMACH_WRITE(&sc->sc_ddma_channels[index], AWIN_DDMA_CTL_REG, 0); } error = bus_setup_intr(dev, sc->sc_res[1], INTR_MPSAFE | INTR_TYPE_MISC, NULL, a10dmac_intr, sc, &sc->sc_ih); if (error != 0) { device_printf(dev, "could not setup interrupt handler\n"); bus_release_resources(dev, a10dmac_spec, sc->sc_res); mtx_destroy(&sc->sc_mtx); return (ENXIO); } OF_device_register_xref(OF_xref_from_node(ofw_bus_get_node(dev)), dev); return (0); } static void a10dmac_intr(void *priv) { struct a10dmac_softc *sc = priv; uint32_t sta, bit, mask; uint8_t index; sta = DMA_READ(sc, AWIN_DMA_IRQ_PEND_STA_REG); DMA_WRITE(sc, AWIN_DMA_IRQ_PEND_STA_REG, sta); while ((bit = ffs(sta & AWIN_DMA_IRQ_END_MASK)) != 0) { mask = (1U << (bit - 1)); sta &= ~mask; /* * Map status bit to channel number. The status register is * encoded with two bits of status per channel (lowest bit * is half transfer pending, highest bit is end transfer * pending). The 8 normal DMA channel status are in the lower * 16 bits and the 8 dedicated DMA channel status are in * the upper 16 bits. The output is a channel number from 0-7. */ index = ((bit - 1) / 2) & 7; if (mask & AWIN_DMA_IRQ_NDMA) { if (sc->sc_ndma_channels[index].ch_callback == NULL) continue; sc->sc_ndma_channels[index].ch_callback( sc->sc_ndma_channels[index].ch_callbackarg); } else { if (sc->sc_ddma_channels[index].ch_callback == NULL) continue; sc->sc_ddma_channels[index].ch_callback( sc->sc_ddma_channels[index].ch_callbackarg); } } } static uint32_t a10dmac_read_ctl(struct a10dmac_channel *ch) { if (ch->ch_type == CH_NDMA) { return (DMACH_READ(ch, AWIN_NDMA_CTL_REG)); } else { return (DMACH_READ(ch, AWIN_DDMA_CTL_REG)); } } static void a10dmac_write_ctl(struct a10dmac_channel *ch, uint32_t val) { if (ch->ch_type == CH_NDMA) { DMACH_WRITE(ch, AWIN_NDMA_CTL_REG, val); } else { DMACH_WRITE(ch, AWIN_DDMA_CTL_REG, val); } } static int a10dmac_set_config(device_t dev, void *priv, const struct sunxi_dma_config *cfg) { struct a10dmac_channel *ch = priv; uint32_t val; unsigned int dst_dw, dst_bl, dst_bs, dst_wc, dst_am; unsigned int src_dw, src_bl, src_bs, src_wc, src_am; switch (cfg->dst_width) { case 8: dst_dw = AWIN_DMA_CTL_DATA_WIDTH_8; break; case 16: dst_dw = AWIN_DMA_CTL_DATA_WIDTH_16; break; case 32: dst_dw = AWIN_DMA_CTL_DATA_WIDTH_32; break; default: return (EINVAL); } switch (cfg->dst_burst_len) { case 1: dst_bl = AWIN_DMA_CTL_BURST_LEN_1; break; case 4: dst_bl = AWIN_DMA_CTL_BURST_LEN_4; break; case 8: dst_bl = AWIN_DMA_CTL_BURST_LEN_8; break; default: return (EINVAL); } switch (cfg->src_width) { case 8: src_dw = AWIN_DMA_CTL_DATA_WIDTH_8; break; case 16: src_dw = AWIN_DMA_CTL_DATA_WIDTH_16; break; case 32: src_dw = AWIN_DMA_CTL_DATA_WIDTH_32; break; default: return (EINVAL); } switch (cfg->src_burst_len) { case 1: src_bl = AWIN_DMA_CTL_BURST_LEN_1; break; case 4: src_bl = AWIN_DMA_CTL_BURST_LEN_4; break; case 8: src_bl = AWIN_DMA_CTL_BURST_LEN_8; break; default: return (EINVAL); } val = (dst_dw << AWIN_DMA_CTL_DST_DATA_WIDTH_SHIFT) | (dst_bl << AWIN_DMA_CTL_DST_BURST_LEN_SHIFT) | (cfg->dst_drqtype << AWIN_DMA_CTL_DST_DRQ_TYPE_SHIFT) | (src_dw << AWIN_DMA_CTL_SRC_DATA_WIDTH_SHIFT) | (src_bl << AWIN_DMA_CTL_SRC_BURST_LEN_SHIFT) | (cfg->src_drqtype << AWIN_DMA_CTL_SRC_DRQ_TYPE_SHIFT); if (ch->ch_type == CH_NDMA) { if (cfg->dst_noincr) val |= AWIN_NDMA_CTL_DST_ADDR_NOINCR; if (cfg->src_noincr) val |= AWIN_NDMA_CTL_SRC_ADDR_NOINCR; DMACH_WRITE(ch, AWIN_NDMA_CTL_REG, val); } else { dst_am = cfg->dst_noincr ? AWIN_DDMA_CTL_DMA_ADDR_IO : AWIN_DDMA_CTL_DMA_ADDR_LINEAR; src_am = cfg->src_noincr ? AWIN_DDMA_CTL_DMA_ADDR_IO : AWIN_DDMA_CTL_DMA_ADDR_LINEAR; val |= (dst_am << AWIN_DDMA_CTL_DST_ADDR_MODE_SHIFT); val |= (src_am << AWIN_DDMA_CTL_SRC_ADDR_MODE_SHIFT); DMACH_WRITE(ch, AWIN_DDMA_CTL_REG, val); dst_bs = cfg->dst_blksize - 1; dst_wc = cfg->dst_wait_cyc - 1; src_bs = cfg->src_blksize - 1; src_wc = cfg->src_wait_cyc - 1; DMACH_WRITE(ch, AWIN_DDMA_PARA_REG, (dst_bs << AWIN_DDMA_PARA_DST_DATA_BLK_SIZ_SHIFT) | (dst_wc << AWIN_DDMA_PARA_DST_WAIT_CYC_SHIFT) | (src_bs << AWIN_DDMA_PARA_SRC_DATA_BLK_SIZ_SHIFT) | (src_wc << AWIN_DDMA_PARA_SRC_WAIT_CYC_SHIFT)); } return (0); } static void * a10dmac_alloc(device_t dev, bool dedicated, void (*cb)(void *), void *cbarg) { struct a10dmac_softc *sc = device_get_softc(dev); struct a10dmac_channel *ch_list; struct a10dmac_channel *ch = NULL; uint32_t irqen; uint8_t ch_count, index; if (dedicated) { ch_list = sc->sc_ddma_channels; ch_count = DDMA_CHANNELS; } else { ch_list = sc->sc_ndma_channels; ch_count = NDMA_CHANNELS; } mtx_lock_spin(&sc->sc_mtx); for (index = 0; index < ch_count; index++) { if (ch_list[index].ch_callback == NULL) { ch = &ch_list[index]; ch->ch_callback = cb; ch->ch_callbackarg = cbarg; irqen = DMA_READ(sc, AWIN_DMA_IRQ_EN_REG); if (ch->ch_type == CH_NDMA) irqen |= AWIN_DMA_IRQ_NDMA_END(index); else irqen |= AWIN_DMA_IRQ_DDMA_END(index); DMA_WRITE(sc, AWIN_DMA_IRQ_EN_REG, irqen); break; } } mtx_unlock_spin(&sc->sc_mtx); return (ch); } static void a10dmac_free(device_t dev, void *priv) { struct a10dmac_channel *ch = priv; struct a10dmac_softc *sc = ch->ch_sc; uint32_t irqen, sta, cfg; mtx_lock_spin(&sc->sc_mtx); irqen = DMA_READ(sc, AWIN_DMA_IRQ_EN_REG); cfg = a10dmac_read_ctl(ch); if (ch->ch_type == CH_NDMA) { sta = AWIN_DMA_IRQ_NDMA_END(ch->ch_index); cfg &= ~AWIN_NDMA_CTL_DMA_LOADING; } else { sta = AWIN_DMA_IRQ_DDMA_END(ch->ch_index); cfg &= ~AWIN_DDMA_CTL_DMA_LOADING; } irqen &= ~sta; a10dmac_write_ctl(ch, cfg); DMA_WRITE(sc, AWIN_DMA_IRQ_EN_REG, irqen); DMA_WRITE(sc, AWIN_DMA_IRQ_PEND_STA_REG, sta); ch->ch_callback = NULL; ch->ch_callbackarg = NULL; mtx_unlock_spin(&sc->sc_mtx); } static int a10dmac_transfer(device_t dev, void *priv, bus_addr_t src, bus_addr_t dst, size_t nbytes) { struct a10dmac_channel *ch = priv; uint32_t cfg; cfg = a10dmac_read_ctl(ch); if (ch->ch_type == CH_NDMA) { if (cfg & AWIN_NDMA_CTL_DMA_LOADING) return (EBUSY); DMACH_WRITE(ch, AWIN_NDMA_SRC_ADDR_REG, src); DMACH_WRITE(ch, AWIN_NDMA_DEST_ADDR_REG, dst); DMACH_WRITE(ch, AWIN_NDMA_BC_REG, nbytes); cfg |= AWIN_NDMA_CTL_DMA_LOADING; a10dmac_write_ctl(ch, cfg); } else { if (cfg & AWIN_DDMA_CTL_DMA_LOADING) return (EBUSY); DMACH_WRITE(ch, AWIN_DDMA_SRC_START_ADDR_REG, src); DMACH_WRITE(ch, AWIN_DDMA_DEST_START_ADDR_REG, dst); DMACH_WRITE(ch, AWIN_DDMA_BC_REG, nbytes); cfg |= AWIN_DDMA_CTL_DMA_LOADING; a10dmac_write_ctl(ch, cfg); } return (0); } static void a10dmac_halt(device_t dev, void *priv) { struct a10dmac_channel *ch = priv; uint32_t cfg; cfg = a10dmac_read_ctl(ch); if (ch->ch_type == CH_NDMA) { cfg &= ~AWIN_NDMA_CTL_DMA_LOADING; } else { cfg &= ~AWIN_DDMA_CTL_DMA_LOADING; } a10dmac_write_ctl(ch, cfg); } static device_method_t a10dmac_methods[] = { /* Device interface */ DEVMETHOD(device_probe, a10dmac_probe), DEVMETHOD(device_attach, a10dmac_attach), /* sunxi DMA interface */ DEVMETHOD(sunxi_dma_alloc, a10dmac_alloc), DEVMETHOD(sunxi_dma_free, a10dmac_free), DEVMETHOD(sunxi_dma_set_config, a10dmac_set_config), DEVMETHOD(sunxi_dma_transfer, a10dmac_transfer), DEVMETHOD(sunxi_dma_halt, a10dmac_halt), DEVMETHOD_END }; static driver_t a10dmac_driver = { "a10dmac", a10dmac_methods, sizeof(struct a10dmac_softc) }; DRIVER_MODULE(a10dmac, simplebus, a10dmac_driver, 0, 0); diff --git a/sys/arm/allwinner/a10_timer.c b/sys/arm/allwinner/a10_timer.c index 890cb2332396..18fb2a56da3a 100644 --- a/sys/arm/allwinner/a10_timer.c +++ b/sys/arm/allwinner/a10_timer.c @@ -1,480 +1,480 @@ /*- * Copyright (c) 2012 Ganbold Tsagaankhuu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #if defined(__aarch64__) #include "opt_soc.h" #else #include #endif /** * Timer registers addr * */ #define TIMER_IRQ_EN_REG 0x00 #define TIMER_IRQ_ENABLE(x) (1 << x) #define TIMER_IRQ_STA_REG 0x04 #define TIMER_IRQ_PENDING(x) (1 << x) /* * On A10, A13, A20 and A31/A31s 6 timers are available */ #define TIMER_CTRL_REG(x) (0x10 + 0x10 * x) #define TIMER_CTRL_START (1 << 0) #define TIMER_CTRL_AUTORELOAD (1 << 1) #define TIMER_CTRL_CLKSRC_MASK (3 << 2) #define TIMER_CTRL_OSC24M (1 << 2) #define TIMER_CTRL_PRESCALAR_MASK (0x7 << 4) #define TIMER_CTRL_PRESCALAR(x) ((x - 1) << 4) #define TIMER_CTRL_MODE_MASK (1 << 7) #define TIMER_CTRL_MODE_SINGLE (1 << 7) #define TIMER_CTRL_MODE_CONTINUOUS (0 << 7) #define TIMER_INTV_REG(x) (0x14 + 0x10 * x) #define TIMER_CURV_REG(x) (0x18 + 0x10 * x) /* 64 bit counter, available in A10 and A13 */ #define CNT64_CTRL_REG 0xa0 #define CNT64_CTRL_RL_EN 0x02 /* read latch enable */ #define CNT64_LO_REG 0xa4 #define CNT64_HI_REG 0xa8 #define SYS_TIMER_CLKSRC 24000000 /* clock source */ enum a10_timer_type { A10_TIMER = 1, A23_TIMER, }; struct a10_timer_softc { device_t sc_dev; struct resource *res[2]; void *sc_ih; /* interrupt handler */ uint32_t sc_period; uint64_t timer0_freq; struct eventtimer et; enum a10_timer_type type; }; #define timer_read_4(sc, reg) \ bus_read_4(sc->res[A10_TIMER_MEMRES], reg) #define timer_write_4(sc, reg, val) \ bus_write_4(sc->res[A10_TIMER_MEMRES], reg, val) static u_int a10_timer_get_timecount(struct timecounter *); #if defined(__arm__) static int a10_timer_timer_start(struct eventtimer *, sbintime_t first, sbintime_t period); static int a10_timer_timer_stop(struct eventtimer *); #endif static uint64_t timer_read_counter64(struct a10_timer_softc *sc); #if defined(__arm__) static void a10_timer_eventtimer_setup(struct a10_timer_softc *sc); #endif #if defined(__aarch64__) static void a23_timer_timecounter_setup(struct a10_timer_softc *sc); static u_int a23_timer_get_timecount(struct timecounter *tc); #endif static int a10_timer_irq(void *); static int a10_timer_probe(device_t); static int a10_timer_attach(device_t); #if defined(__arm__) static delay_func a10_timer_delay; #endif static struct timecounter a10_timer_timecounter = { .tc_name = "a10_timer timer0", .tc_get_timecount = a10_timer_get_timecount, .tc_counter_mask = ~0u, .tc_frequency = 0, .tc_quality = 1000, }; #if defined(__aarch64__) static struct timecounter a23_timer_timecounter = { .tc_name = "a10_timer timer0", .tc_get_timecount = a23_timer_get_timecount, .tc_counter_mask = ~0u, .tc_frequency = 0, /* We want it to be selected over the arm generic timecounter */ .tc_quality = 2000, }; #endif #define A10_TIMER_MEMRES 0 #define A10_TIMER_IRQRES 1 static struct resource_spec a10_timer_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; static struct ofw_compat_data compat_data[] = { {"allwinner,sun4i-a10-timer", A10_TIMER}, #if defined(__aarch64__) {"allwinner,sun8i-a23-timer", A23_TIMER}, #endif {NULL, 0}, }; static int a10_timer_probe(device_t dev) { #if defined(__arm__) u_int soc_family; #endif if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); #if defined(__arm__) /* For SoC >= A10 we have the ARM Timecounter/Eventtimer */ soc_family = allwinner_soc_family(); if (soc_family != ALLWINNERSOC_SUN4I && soc_family != ALLWINNERSOC_SUN5I) return (ENXIO); #endif device_set_desc(dev, "Allwinner timer"); return (BUS_PROBE_DEFAULT); } static int a10_timer_attach(device_t dev) { struct a10_timer_softc *sc; clk_t clk; int err; sc = device_get_softc(dev); sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; if (bus_alloc_resources(dev, a10_timer_spec, sc->res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->sc_dev = dev; /* Setup and enable the timer interrupt */ err = bus_setup_intr(dev, sc->res[A10_TIMER_IRQRES], INTR_TYPE_CLK, a10_timer_irq, NULL, sc, &sc->sc_ih); if (err != 0) { bus_release_resources(dev, a10_timer_spec, sc->res); device_printf(dev, "Unable to setup the clock irq handler, " "err = %d\n", err); return (ENXIO); } if (clk_get_by_ofw_index(dev, 0, 0, &clk) != 0) sc->timer0_freq = SYS_TIMER_CLKSRC; else { if (clk_get_freq(clk, &sc->timer0_freq) != 0) { device_printf(dev, "Cannot get clock source frequency\n"); return (ENXIO); } } #if defined(__arm__) a10_timer_eventtimer_setup(sc); arm_set_delay(a10_timer_delay, sc); a10_timer_timecounter.tc_priv = sc; a10_timer_timecounter.tc_frequency = sc->timer0_freq; tc_init(&a10_timer_timecounter); #elif defined(__aarch64__) a23_timer_timecounter_setup(sc); #endif if (bootverbose) { device_printf(sc->sc_dev, "clock: hz=%d stathz = %d\n", hz, stathz); device_printf(sc->sc_dev, "event timer clock frequency %ju\n", sc->timer0_freq); device_printf(sc->sc_dev, "timecounter clock frequency %jd\n", a10_timer_timecounter.tc_frequency); } return (0); } static int a10_timer_irq(void *arg) { struct a10_timer_softc *sc; uint32_t val; sc = (struct a10_timer_softc *)arg; /* Clear interrupt pending bit. */ timer_write_4(sc, TIMER_IRQ_STA_REG, TIMER_IRQ_PENDING(0)); val = timer_read_4(sc, TIMER_CTRL_REG(0)); /* * Disabled autoreload and sc_period > 0 means * timer_start was called with non NULL first value. * Now we will set periodic timer with the given period * value. */ if ((val & (1<<1)) == 0 && sc->sc_period > 0) { /* Update timer */ timer_write_4(sc, TIMER_CURV_REG(0), sc->sc_period); /* Make periodic and enable */ val |= TIMER_CTRL_AUTORELOAD | TIMER_CTRL_START; timer_write_4(sc, TIMER_CTRL_REG(0), val); } if (sc->et.et_active) sc->et.et_event_cb(&sc->et, sc->et.et_arg); return (FILTER_HANDLED); } /* * Event timer function for A10 and A13 */ #if defined(__arm__) static void a10_timer_eventtimer_setup(struct a10_timer_softc *sc) { uint32_t val; /* Set clock source to OSC24M, 1 pre-division, continuous mode */ val = timer_read_4(sc, TIMER_CTRL_REG(0)); val &= ~TIMER_CTRL_PRESCALAR_MASK | ~TIMER_CTRL_MODE_MASK | ~TIMER_CTRL_CLKSRC_MASK; val |= TIMER_CTRL_PRESCALAR(1) | TIMER_CTRL_OSC24M; timer_write_4(sc, TIMER_CTRL_REG(0), val); /* Enable timer0 */ val = timer_read_4(sc, TIMER_IRQ_EN_REG); val |= TIMER_IRQ_ENABLE(0); timer_write_4(sc, TIMER_IRQ_EN_REG, val); /* Set desired frequency in event timer and timecounter */ sc->et.et_frequency = sc->timer0_freq; sc->et.et_name = "a10_timer Eventtimer"; sc->et.et_flags = ET_FLAGS_ONESHOT | ET_FLAGS_PERIODIC; sc->et.et_quality = 1000; sc->et.et_min_period = (0x00000005LLU << 32) / sc->et.et_frequency; sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency; sc->et.et_start = a10_timer_timer_start; sc->et.et_stop = a10_timer_timer_stop; sc->et.et_priv = sc; et_register(&sc->et); } static int a10_timer_timer_start(struct eventtimer *et, sbintime_t first, sbintime_t period) { struct a10_timer_softc *sc; uint32_t count; uint32_t val; sc = (struct a10_timer_softc *)et->et_priv; if (period != 0) sc->sc_period = ((uint32_t)et->et_frequency * period) >> 32; else sc->sc_period = 0; if (first != 0) count = ((uint32_t)et->et_frequency * first) >> 32; else count = sc->sc_period; /* Update timer values */ timer_write_4(sc, TIMER_INTV_REG(0), sc->sc_period); timer_write_4(sc, TIMER_CURV_REG(0), count); val = timer_read_4(sc, TIMER_CTRL_REG(0)); if (period != 0) { /* periodic */ val |= TIMER_CTRL_AUTORELOAD; } else { /* oneshot */ val &= ~TIMER_CTRL_AUTORELOAD; } /* Enable timer0 */ val |= TIMER_IRQ_ENABLE(0); timer_write_4(sc, TIMER_CTRL_REG(0), val); return (0); } static int a10_timer_timer_stop(struct eventtimer *et) { struct a10_timer_softc *sc; uint32_t val; sc = (struct a10_timer_softc *)et->et_priv; /* Disable timer0 */ val = timer_read_4(sc, TIMER_CTRL_REG(0)); val &= ~TIMER_CTRL_START; timer_write_4(sc, TIMER_CTRL_REG(0), val); sc->sc_period = 0; return (0); } #endif /* * Timecounter functions for A23 and above */ #if defined(__aarch64__) static void a23_timer_timecounter_setup(struct a10_timer_softc *sc) { uint32_t val; /* Set clock source to OSC24M, 1 pre-division, continuous mode */ val = timer_read_4(sc, TIMER_CTRL_REG(0)); val &= ~TIMER_CTRL_PRESCALAR_MASK | ~TIMER_CTRL_MODE_MASK | ~TIMER_CTRL_CLKSRC_MASK; val |= TIMER_CTRL_PRESCALAR(1) | TIMER_CTRL_OSC24M; timer_write_4(sc, TIMER_CTRL_REG(0), val); /* Set reload value */ timer_write_4(sc, TIMER_INTV_REG(0), ~0); val = timer_read_4(sc, TIMER_INTV_REG(0)); /* Enable timer0 */ val = timer_read_4(sc, TIMER_CTRL_REG(0)); val |= TIMER_CTRL_AUTORELOAD | TIMER_CTRL_START; timer_write_4(sc, TIMER_CTRL_REG(0), val); val = timer_read_4(sc, TIMER_CURV_REG(0)); a23_timer_timecounter.tc_priv = sc; a23_timer_timecounter.tc_frequency = sc->timer0_freq; tc_init(&a23_timer_timecounter); } static u_int a23_timer_get_timecount(struct timecounter *tc) { struct a10_timer_softc *sc; uint32_t val; sc = (struct a10_timer_softc *)tc->tc_priv; if (sc == NULL) return (0); val = timer_read_4(sc, TIMER_CURV_REG(0)); /* Counter count backwards */ return (~0u - val); } #endif /* * Timecounter functions for A10 and A13, using the 64 bits counter */ static uint64_t timer_read_counter64(struct a10_timer_softc *sc) { uint32_t lo, hi; /* Latch counter, wait for it to be ready to read. */ timer_write_4(sc, CNT64_CTRL_REG, CNT64_CTRL_RL_EN); while (timer_read_4(sc, CNT64_CTRL_REG) & CNT64_CTRL_RL_EN) continue; hi = timer_read_4(sc, CNT64_HI_REG); lo = timer_read_4(sc, CNT64_LO_REG); return (((uint64_t)hi << 32) | lo); } #if defined(__arm__) static void a10_timer_delay(int usec, void *arg) { struct a10_timer_softc *sc = arg; uint64_t end, now; now = timer_read_counter64(sc); end = now + (sc->timer0_freq / 1000000) * (usec + 1); while (now < end) now = timer_read_counter64(sc); } #endif static u_int a10_timer_get_timecount(struct timecounter *tc) { if (tc->tc_priv == NULL) return (0); return ((u_int)timer_read_counter64(tc->tc_priv)); } static device_method_t a10_timer_methods[] = { DEVMETHOD(device_probe, a10_timer_probe), DEVMETHOD(device_attach, a10_timer_attach), DEVMETHOD_END }; static driver_t a10_timer_driver = { "a10_timer", a10_timer_methods, sizeof(struct a10_timer_softc), }; EARLY_DRIVER_MODULE(a10_timer, simplebus, a10_timer_driver, 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/arm/allwinner/a31_dmac.c b/sys/arm/allwinner/a31_dmac.c index 0a291becbb9d..988444c4b1c5 100644 --- a/sys/arm/allwinner/a31_dmac.c +++ b/sys/arm/allwinner/a31_dmac.c @@ -1,554 +1,554 @@ /*- * Copyright (c) 2016 Jared D. McNeill * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Allwinner DMA controller */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include "sunxi_dma_if.h" #define DMA_IRQ_EN_REG0 0x00 #define DMA_IRQ_EN_REG1 0x04 #define DMA_IRQ_EN_REG(ch) (DMA_IRQ_EN_REG0 + ((ch) / 8) * 4) #define DMA_PKG_IRQ_EN(ch) (1 << (((ch) % 8) * 4 + 1)) #define DMA_PKG_IRQ_MASK 0x2222222222222222ULL #define DMA_IRQ_PEND_REG0 0x10 #define DMA_IRQ_PEND_REG1 0x14 #define DMA_IRQ_PEND_REG(ch) (DMA_IRQ_PEND_REG0 + ((ch) / 8) * 4) #define DMA_STA_REG 0x30 #define DMA_EN_REG(n) (0x100 + (n) * 0x40 + 0x00) #define DMA_EN (1 << 0) #define DMA_PAU_REG(n) (0x100 + (n) * 0x40 + 0x04) #define DMA_STAR_ADDR_REG(n) (0x100 + (n) * 0x40 + 0x08) #define DMA_CFG_REG(n) (0x100 + (n) * 0x40 + 0x0c) #define DMA_DEST_DATA_WIDTH (0x3 << 25) #define DMA_DEST_DATA_WIDTH_SHIFT 25 #define DMA_DEST_BST_LEN (0x3 << 22) #define DMA_DEST_BST_LEN_SHIFT 22 #define DMA_DEST_ADDR_MODE (0x1 << 21) #define DMA_DEST_ADDR_MODE_SHIFT 21 #define DMA_DEST_DRQ_TYPE (0x1f << 16) #define DMA_DEST_DRQ_TYPE_SHIFT 16 #define DMA_SRC_DATA_WIDTH (0x3 << 9) #define DMA_SRC_DATA_WIDTH_SHIFT 9 #define DMA_SRC_BST_LEN (0x3 << 6) #define DMA_SRC_BST_LEN_SHIFT 6 #define DMA_SRC_ADDR_MODE (0x1 << 5) #define DMA_SRC_ADDR_MODE_SHIFT 5 #define DMA_SRC_DRQ_TYPE (0x1f << 0) #define DMA_SRC_DRQ_TYPE_SHIFT 0 #define DMA_DATA_WIDTH_8BIT 0 #define DMA_DATA_WIDTH_16BIT 1 #define DMA_DATA_WIDTH_32BIT 2 #define DMA_DATA_WIDTH_64BIT 3 #define DMA_ADDR_MODE_LINEAR 0 #define DMA_ADDR_MODE_IO 1 #define DMA_BST_LEN_1 0 #define DMA_BST_LEN_4 1 #define DMA_BST_LEN_8 2 #define DMA_BST_LEN_16 3 #define DMA_CUR_SRC_REG(n) (0x100 + (n) * 0x40 + 0x10) #define DMA_CUR_DEST_REG(n) (0x100 + (n) * 0x40 + 0x14) #define DMA_BCNT_LEFT_REG(n) (0x100 + (n) * 0x40 + 0x18) #define DMA_PARA_REG(n) (0x100 + (n) * 0x40 + 0x1c) #define WAIT_CYC (0xff << 0) #define WAIT_CYC_SHIFT 0 struct a31dmac_desc { uint32_t config; uint32_t srcaddr; uint32_t dstaddr; uint32_t bcnt; uint32_t para; uint32_t next; #define DMA_NULL 0xfffff800 }; #define DESC_ALIGN 4 #define DESC_SIZE sizeof(struct a31dmac_desc) struct a31dmac_config { u_int nchans; }; static const struct a31dmac_config a31_config = { .nchans = 16 }; static const struct a31dmac_config h3_config = { .nchans = 12 }; static const struct a31dmac_config a83t_config = { .nchans = 8 }; static const struct a31dmac_config a64_config = { .nchans = 8 }; static struct ofw_compat_data compat_data[] = { { "allwinner,sun6i-a31-dma", (uintptr_t)&a31_config }, { "allwinner,sun8i-a83t-dma", (uintptr_t)&a83t_config }, { "allwinner,sun8i-h3-dma", (uintptr_t)&h3_config }, { "allwinner,sun50i-a64-dma", (uintptr_t)&a64_config }, { NULL, (uintptr_t)NULL } }; struct a31dmac_softc; struct a31dmac_channel { struct a31dmac_softc * sc; uint8_t index; void (*callback)(void *); void * callbackarg; bus_dmamap_t dmamap; struct a31dmac_desc *desc; bus_addr_t physaddr; }; struct a31dmac_softc { struct resource * res[2]; struct mtx mtx; void * ih; bus_dma_tag_t dmat; u_int nchans; struct a31dmac_channel * chans; }; static struct resource_spec a31dmac_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; #define DMA_READ(sc, reg) bus_read_4((sc)->res[0], (reg)) #define DMA_WRITE(sc, reg, val) bus_write_4((sc)->res[0], (reg), (val)) static void a31dmac_intr(void *); static void a31dmac_dmamap_cb(void *, bus_dma_segment_t *, int, int); static int a31dmac_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Allwinner DMA controller"); return (BUS_PROBE_DEFAULT); } static int a31dmac_attach(device_t dev) { struct a31dmac_softc *sc; struct a31dmac_config *conf; u_int index; hwreset_t rst; clk_t clk; int error; sc = device_get_softc(dev); conf = (void *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; clk = NULL; rst = NULL; if (bus_alloc_resources(dev, a31dmac_spec, sc->res)) { device_printf(dev, "cannot allocate resources for device\n"); return (ENXIO); } mtx_init(&sc->mtx, "a31 dmac", NULL, MTX_SPIN); /* Clock and reset setup */ if (clk_get_by_ofw_index(dev, 0, 0, &clk) != 0) { device_printf(dev, "cannot get clock\n"); goto fail; } if (clk_enable(clk) != 0) { device_printf(dev, "cannot enable clock\n"); goto fail; } if (hwreset_get_by_ofw_idx(dev, 0, 0, &rst) != 0) { device_printf(dev, "cannot get hwreset\n"); goto fail; } if (hwreset_deassert(rst) != 0) { device_printf(dev, "cannot de-assert reset\n"); goto fail; } /* Descriptor DMA */ error = bus_dma_tag_create( bus_get_dma_tag(dev), /* Parent tag */ DESC_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ DESC_SIZE, 1, /* maxsize, nsegs */ DESC_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->dmat); if (error != 0) { device_printf(dev, "cannot create dma tag\n"); goto fail; } /* Disable all interrupts and clear pending status */ DMA_WRITE(sc, DMA_IRQ_EN_REG0, 0); DMA_WRITE(sc, DMA_IRQ_EN_REG1, 0); DMA_WRITE(sc, DMA_IRQ_PEND_REG0, ~0); DMA_WRITE(sc, DMA_IRQ_PEND_REG1, ~0); /* Initialize channels */ sc->nchans = conf->nchans; sc->chans = malloc(sizeof(*sc->chans) * sc->nchans, M_DEVBUF, M_WAITOK | M_ZERO); for (index = 0; index < sc->nchans; index++) { sc->chans[index].sc = sc; sc->chans[index].index = index; sc->chans[index].callback = NULL; sc->chans[index].callbackarg = NULL; error = bus_dmamem_alloc(sc->dmat, (void **)&sc->chans[index].desc, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &sc->chans[index].dmamap); if (error != 0) { device_printf(dev, "cannot allocate dma mem\n"); goto fail; } error = bus_dmamap_load(sc->dmat, sc->chans[index].dmamap, sc->chans[index].desc, sizeof(*sc->chans[index].desc), a31dmac_dmamap_cb, &sc->chans[index], BUS_DMA_WAITOK); if (error != 0) { device_printf(dev, "cannot load dma map\n"); goto fail; } DMA_WRITE(sc, DMA_EN_REG(index), 0); } error = bus_setup_intr(dev, sc->res[1], INTR_MPSAFE | INTR_TYPE_MISC, NULL, a31dmac_intr, sc, &sc->ih); if (error != 0) { device_printf(dev, "could not setup interrupt handler\n"); bus_release_resources(dev, a31dmac_spec, sc->res); mtx_destroy(&sc->mtx); return (ENXIO); } OF_device_register_xref(OF_xref_from_node(ofw_bus_get_node(dev)), dev); return (0); fail: for (index = 0; index < sc->nchans; index++) if (sc->chans[index].desc != NULL) { bus_dmamap_unload(sc->dmat, sc->chans[index].dmamap); bus_dmamem_free(sc->dmat, sc->chans[index].desc, sc->chans[index].dmamap); } if (sc->chans != NULL) free(sc->chans, M_DEVBUF); if (sc->ih != NULL) bus_teardown_intr(dev, sc->res[1], sc->ih); if (rst != NULL) hwreset_release(rst); if (clk != NULL) clk_release(clk); bus_release_resources(dev, a31dmac_spec, sc->res); return (ENXIO); } static void a31dmac_dmamap_cb(void *priv, bus_dma_segment_t *segs, int nsegs, int error) { struct a31dmac_channel *ch; if (error != 0) return; ch = priv; ch->physaddr = segs[0].ds_addr; } static void a31dmac_intr(void *priv) { struct a31dmac_softc *sc; uint32_t pend0, pend1, bit; uint64_t pend, mask; u_int index; sc = priv; pend0 = DMA_READ(sc, DMA_IRQ_PEND_REG0); pend1 = sc->nchans > 8 ? DMA_READ(sc, DMA_IRQ_PEND_REG1) : 0; if (pend0 == 0 && pend1 == 0) return; if (pend0 != 0) DMA_WRITE(sc, DMA_IRQ_PEND_REG0, pend0); if (pend1 != 0) DMA_WRITE(sc, DMA_IRQ_PEND_REG1, pend1); pend = pend0 | ((uint64_t)pend1 << 32); while ((bit = ffsll(pend & DMA_PKG_IRQ_MASK)) != 0) { mask = (1U << (bit - 1)); pend &= ~mask; index = (bit - 1) / 4; if (index >= sc->nchans) continue; if (sc->chans[index].callback == NULL) continue; sc->chans[index].callback(sc->chans[index].callbackarg); } } static int a31dmac_set_config(device_t dev, void *priv, const struct sunxi_dma_config *cfg) { struct a31dmac_channel *ch; uint32_t config, para; unsigned int dst_dw, dst_bl, dst_wc, dst_am; unsigned int src_dw, src_bl, src_wc, src_am; ch = priv; switch (cfg->dst_width) { case 8: dst_dw = DMA_DATA_WIDTH_8BIT; break; case 16: dst_dw = DMA_DATA_WIDTH_16BIT; break; case 32: dst_dw = DMA_DATA_WIDTH_32BIT; break; case 64: dst_dw = DMA_DATA_WIDTH_64BIT; break; default: return (EINVAL); } switch (cfg->dst_burst_len) { case 1: dst_bl = DMA_BST_LEN_1; break; case 4: dst_bl = DMA_BST_LEN_4; break; case 8: dst_bl = DMA_BST_LEN_8; break; case 16: dst_bl = DMA_BST_LEN_16; break; default: return (EINVAL); } switch (cfg->src_width) { case 8: src_dw = DMA_DATA_WIDTH_8BIT; break; case 16: src_dw = DMA_DATA_WIDTH_16BIT; break; case 32: src_dw = DMA_DATA_WIDTH_32BIT; break; case 64: src_dw = DMA_DATA_WIDTH_64BIT; default: return (EINVAL); } switch (cfg->src_burst_len) { case 1: src_bl = DMA_BST_LEN_1; break; case 4: src_bl = DMA_BST_LEN_4; break; case 8: src_bl = DMA_BST_LEN_8; break; case 16: src_bl = DMA_BST_LEN_16; break; default: return (EINVAL); } dst_am = cfg->dst_noincr ? DMA_ADDR_MODE_IO : DMA_ADDR_MODE_LINEAR; src_am = cfg->src_noincr ? DMA_ADDR_MODE_IO : DMA_ADDR_MODE_LINEAR; dst_wc = cfg->dst_wait_cyc; src_wc = cfg->src_wait_cyc; if (dst_wc != src_wc) return (EINVAL); config = (dst_dw << DMA_DEST_DATA_WIDTH_SHIFT) | (dst_bl << DMA_DEST_BST_LEN_SHIFT) | (dst_am << DMA_DEST_ADDR_MODE_SHIFT) | (cfg->dst_drqtype << DMA_DEST_DRQ_TYPE_SHIFT) | (src_dw << DMA_SRC_DATA_WIDTH_SHIFT) | (src_bl << DMA_SRC_BST_LEN_SHIFT) | (src_am << DMA_SRC_ADDR_MODE_SHIFT) | (cfg->src_drqtype << DMA_SRC_DRQ_TYPE_SHIFT); para = (dst_wc << WAIT_CYC_SHIFT); ch->desc->config = htole32(config); ch->desc->para = htole32(para); return (0); } static void * a31dmac_alloc(device_t dev, bool dedicated, void (*cb)(void *), void *cbarg) { struct a31dmac_softc *sc; struct a31dmac_channel *ch; uint32_t irqen; u_int index; sc = device_get_softc(dev); ch = NULL; mtx_lock_spin(&sc->mtx); for (index = 0; index < sc->nchans; index++) { if (sc->chans[index].callback == NULL) { ch = &sc->chans[index]; ch->callback = cb; ch->callbackarg = cbarg; irqen = DMA_READ(sc, DMA_IRQ_EN_REG(index)); irqen |= DMA_PKG_IRQ_EN(index); DMA_WRITE(sc, DMA_IRQ_EN_REG(index), irqen); break; } } mtx_unlock_spin(&sc->mtx); return (ch); } static void a31dmac_free(device_t dev, void *priv) { struct a31dmac_channel *ch; struct a31dmac_softc *sc; uint32_t irqen; u_int index; ch = priv; sc = ch->sc; index = ch->index; mtx_lock_spin(&sc->mtx); irqen = DMA_READ(sc, DMA_IRQ_EN_REG(index)); irqen &= ~DMA_PKG_IRQ_EN(index); DMA_WRITE(sc, DMA_IRQ_EN_REG(index), irqen); DMA_WRITE(sc, DMA_IRQ_PEND_REG(index), DMA_PKG_IRQ_EN(index)); ch->callback = NULL; ch->callbackarg = NULL; mtx_unlock_spin(&sc->mtx); } static int a31dmac_transfer(device_t dev, void *priv, bus_addr_t src, bus_addr_t dst, size_t nbytes) { struct a31dmac_channel *ch; struct a31dmac_softc *sc; ch = priv; sc = ch->sc; ch->desc->srcaddr = htole32((uint32_t)src); ch->desc->dstaddr = htole32((uint32_t)dst); ch->desc->bcnt = htole32(nbytes); ch->desc->next = htole32(DMA_NULL); DMA_WRITE(sc, DMA_STAR_ADDR_REG(ch->index), (uint32_t)ch->physaddr); DMA_WRITE(sc, DMA_EN_REG(ch->index), DMA_EN); return (0); } static void a31dmac_halt(device_t dev, void *priv) { struct a31dmac_channel *ch; struct a31dmac_softc *sc; ch = priv; sc = ch->sc; DMA_WRITE(sc, DMA_EN_REG(ch->index), 0); } static device_method_t a31dmac_methods[] = { /* Device interface */ DEVMETHOD(device_probe, a31dmac_probe), DEVMETHOD(device_attach, a31dmac_attach), /* sunxi DMA interface */ DEVMETHOD(sunxi_dma_alloc, a31dmac_alloc), DEVMETHOD(sunxi_dma_free, a31dmac_free), DEVMETHOD(sunxi_dma_set_config, a31dmac_set_config), DEVMETHOD(sunxi_dma_transfer, a31dmac_transfer), DEVMETHOD(sunxi_dma_halt, a31dmac_halt), DEVMETHOD_END }; static driver_t a31dmac_driver = { "a31dmac", a31dmac_methods, sizeof(struct a31dmac_softc) }; DRIVER_MODULE(a31dmac, simplebus, a31dmac_driver, 0, 0); diff --git a/sys/arm/allwinner/a33_codec.c b/sys/arm/allwinner/a33_codec.c index a0d455676430..d71562b08c9b 100644 --- a/sys/arm/allwinner/a33_codec.c +++ b/sys/arm/allwinner/a33_codec.c @@ -1,407 +1,407 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Oleksandr Tymoshenko * Copyright (c) 2018 Jared McNeill * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include "opt_snd.h" #include #include #include "audio_dai_if.h" #define SYSCLK_CTL 0x00c #define AIF1CLK_ENA (1 << 11) #define AIF1CLK_SRC_MASK (3 << 8) #define AIF1CLK_SRC_PLL (2 << 8) #define SYSCLK_ENA (1 << 3) #define SYSCLK_SRC (1 << 0) #define MOD_CLK_ENA 0x010 #define MOD_RST_CTL 0x014 #define MOD_AIF1 (1 << 15) #define MOD_ADC (1 << 3) #define MOD_DAC (1 << 2) #define SYS_SR_CTRL 0x018 #define AIF1_FS_MASK (0xf << 12) #define AIF_FS_48KHZ (8 << 12) #define AIF1CLK_CTRL 0x040 #define AIF1_MSTR_MOD (1 << 15) #define AIF1_BCLK_INV (1 << 14) #define AIF1_LRCK_INV (1 << 13) #define AIF1_BCLK_DIV_MASK (0xf << 9) #define AIF1_BCLK_DIV_16 (6 << 9) #define AIF1_LRCK_DIV_MASK (7 << 6) #define AIF1_LRCK_DIV_16 (0 << 6) #define AIF1_LRCK_DIV_64 (2 << 6) #define AIF1_WORD_SIZ_MASK (3 << 4) #define AIF1_WORD_SIZ_16 (1 << 4) #define AIF1_DATA_FMT_MASK (3 << 2) #define AIF1_DATA_FMT_I2S (0 << 2) #define AIF1_DATA_FMT_LJ (1 << 2) #define AIF1_DATA_FMT_RJ (2 << 2) #define AIF1_DATA_FMT_DSP (3 << 2) #define AIF1_ADCDAT_CTRL 0x044 #define AIF1_ADC0L_ENA (1 << 15) #define AIF1_ADC0R_ENA (1 << 14) #define AIF1_DACDAT_CTRL 0x048 #define AIF1_DAC0L_ENA (1 << 15) #define AIF1_DAC0R_ENA (1 << 14) #define AIF1_MXR_SRC 0x04c #define AIF1L_MXR_SRC_MASK (0xf << 12) #define AIF1L_MXR_SRC_AIF1 (0x8 << 12) #define AIF1L_MXR_SRC_ADC (0x2 << 12) #define AIF1R_MXR_SRC_MASK (0xf << 8) #define AIF1R_MXR_SRC_AIF1 (0x8 << 8) #define AIF1R_MXR_SRC_ADC (0x2 << 8) #define ADC_DIG_CTRL 0x100 #define ADC_DIG_CTRL_ENAD (1 << 15) #define HMIC_CTRL1 0x110 #define HMIC_CTRL1_N_MASK (0xf << 8) #define HMIC_CTRL1_N(n) (((n) & 0xf) << 8) #define HMIC_CTRL1_JACK_IN_IRQ_EN (1 << 4) #define HMIC_CTRL1_JACK_OUT_IRQ_EN (1 << 3) #define HMIC_CTRL1_MIC_DET_IRQ_EN (1 << 0) #define HMIC_CTRL2 0x114 #define HMIC_CTRL2_MDATA_THRES __BITS(12,8) #define HMIC_STS 0x118 #define HMIC_STS_MIC_PRESENT (1 << 6) #define HMIC_STS_JACK_DET_OIRQ (1 << 4) #define HMIC_STS_JACK_DET_IIRQ (1 << 3) #define HMIC_STS_MIC_DET_ST (1 << 0) #define DAC_DIG_CTRL 0x120 #define DAC_DIG_CTRL_ENDA (1 << 15) #define DAC_MXR_SRC 0x130 #define DACL_MXR_SRC_MASK (0xf << 12) #define DACL_MXR_SRC_AIF1_DAC0L (0x8 << 12) #define DACR_MXR_SRC_MASK (0xf << 8) #define DACR_MXR_SRC_AIF1_DAC0R (0x8 << 8) static struct ofw_compat_data compat_data[] = { { "allwinner,sun8i-a33-codec", 1}, { NULL, 0 } }; static struct resource_spec sun8i_codec_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; struct sun8i_codec_softc { device_t dev; struct resource *res[2]; struct mtx mtx; clk_t clk_gate; clk_t clk_mod; void * intrhand; }; #define CODEC_LOCK(sc) mtx_lock(&(sc)->mtx) #define CODEC_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define CODEC_READ(sc, reg) bus_read_4((sc)->res[0], (reg)) #define CODEC_WRITE(sc, reg, val) bus_write_4((sc)->res[0], (reg), (val)) static int sun8i_codec_probe(device_t dev); static int sun8i_codec_attach(device_t dev); static int sun8i_codec_detach(device_t dev); static int sun8i_codec_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Allwinner Codec"); return (BUS_PROBE_DEFAULT); } static int sun8i_codec_attach(device_t dev) { struct sun8i_codec_softc *sc; int error; uint32_t val; struct gpiobus_pin *pa_pin; phandle_t node; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); if (bus_alloc_resources(dev, sun8i_codec_spec, sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); error = ENXIO; goto fail; } error = clk_get_by_ofw_name(dev, 0, "mod", &sc->clk_mod); if (error != 0) { device_printf(dev, "cannot get \"mod\" clock\n"); goto fail; } error = clk_get_by_ofw_name(dev, 0, "bus", &sc->clk_gate); if (error != 0) { device_printf(dev, "cannot get \"bus\" clock\n"); goto fail; } error = clk_enable(sc->clk_gate); if (error != 0) { device_printf(dev, "cannot enable \"bus\" clock\n"); goto fail; } /* Enable clocks */ val = CODEC_READ(sc, SYSCLK_CTL); val |= AIF1CLK_ENA; val &= ~AIF1CLK_SRC_MASK; val |= AIF1CLK_SRC_PLL; val |= SYSCLK_ENA; val &= ~SYSCLK_SRC; CODEC_WRITE(sc, SYSCLK_CTL, val); CODEC_WRITE(sc, MOD_CLK_ENA, MOD_AIF1 | MOD_ADC | MOD_DAC); CODEC_WRITE(sc, MOD_RST_CTL, MOD_AIF1 | MOD_ADC | MOD_DAC); /* Enable digital parts */ CODEC_WRITE(sc, DAC_DIG_CTRL, DAC_DIG_CTRL_ENDA); CODEC_WRITE(sc, ADC_DIG_CTRL, ADC_DIG_CTRL_ENAD); /* Set AIF1 to 48 kHz */ val = CODEC_READ(sc, SYS_SR_CTRL); val &= ~AIF1_FS_MASK; val |= AIF_FS_48KHZ; CODEC_WRITE(sc, SYS_SR_CTRL, val); /* Set AIF1 to 16-bit */ val = CODEC_READ(sc, AIF1CLK_CTRL); val &= ~AIF1_WORD_SIZ_MASK; val |= AIF1_WORD_SIZ_16; CODEC_WRITE(sc, AIF1CLK_CTRL, val); /* Enable AIF1 DAC timelot 0 */ val = CODEC_READ(sc, AIF1_DACDAT_CTRL); val |= AIF1_DAC0L_ENA; val |= AIF1_DAC0R_ENA; CODEC_WRITE(sc, AIF1_DACDAT_CTRL, val); /* Enable AIF1 ADC timelot 0 */ val = CODEC_READ(sc, AIF1_ADCDAT_CTRL); val |= AIF1_ADC0L_ENA; val |= AIF1_ADC0R_ENA; CODEC_WRITE(sc, AIF1_ADCDAT_CTRL, val); /* DAC mixer source select */ val = CODEC_READ(sc, DAC_MXR_SRC); val &= ~DACL_MXR_SRC_MASK; val |= DACL_MXR_SRC_AIF1_DAC0L; val &= ~DACR_MXR_SRC_MASK; val |= DACR_MXR_SRC_AIF1_DAC0R; CODEC_WRITE(sc, DAC_MXR_SRC, val); /* ADC mixer source select */ val = CODEC_READ(sc, AIF1_MXR_SRC); val &= ~AIF1L_MXR_SRC_MASK; val |= AIF1L_MXR_SRC_ADC; val &= ~AIF1R_MXR_SRC_MASK; val |= AIF1R_MXR_SRC_ADC; CODEC_WRITE(sc, AIF1_MXR_SRC, val); /* Enable PA power */ /* Unmute PA */ if (gpio_pin_get_by_ofw_property(dev, node, "allwinner,pa-gpios", &pa_pin) == 0) { error = gpio_pin_set_active(pa_pin, 1); if (error != 0) device_printf(dev, "failed to unmute PA\n"); } OF_device_register_xref(OF_xref_from_node(node), dev); return (0); fail: sun8i_codec_detach(dev); return (error); } static int sun8i_codec_detach(device_t dev) { struct sun8i_codec_softc *sc; sc = device_get_softc(dev); if (sc->clk_gate) clk_release(sc->clk_gate); if (sc->clk_mod) clk_release(sc->clk_mod); if (sc->intrhand != NULL) bus_teardown_intr(sc->dev, sc->res[1], sc->intrhand); bus_release_resources(dev, sun8i_codec_spec, sc->res); mtx_destroy(&sc->mtx); return (0); } static int sun8i_codec_dai_init(device_t dev, uint32_t format) { struct sun8i_codec_softc *sc; int fmt, pol, clk; uint32_t val; sc = device_get_softc(dev); fmt = AUDIO_DAI_FORMAT_FORMAT(format); pol = AUDIO_DAI_FORMAT_POLARITY(format); clk = AUDIO_DAI_FORMAT_CLOCK(format); val = CODEC_READ(sc, AIF1CLK_CTRL); val &= ~AIF1_DATA_FMT_MASK; switch (fmt) { case AUDIO_DAI_FORMAT_I2S: val |= AIF1_DATA_FMT_I2S; break; case AUDIO_DAI_FORMAT_RJ: val |= AIF1_DATA_FMT_RJ; break; case AUDIO_DAI_FORMAT_LJ: val |= AIF1_DATA_FMT_LJ; break; case AUDIO_DAI_FORMAT_DSPA: case AUDIO_DAI_FORMAT_DSPB: val |= AIF1_DATA_FMT_DSP; break; default: return EINVAL; } val &= ~(AIF1_BCLK_INV|AIF1_LRCK_INV); /* Codec LRCK polarity is inverted (datasheet is wrong) */ if (!AUDIO_DAI_POLARITY_INVERTED_FRAME(pol)) val |= AIF1_LRCK_INV; if (AUDIO_DAI_POLARITY_INVERTED_BCLK(pol)) val |= AIF1_BCLK_INV; switch (clk) { case AUDIO_DAI_CLOCK_CBM_CFM: val &= ~AIF1_MSTR_MOD; /* codec is master */ break; case AUDIO_DAI_CLOCK_CBS_CFS: val |= AIF1_MSTR_MOD; /* codec is slave */ break; default: return EINVAL; } val &= ~AIF1_LRCK_DIV_MASK; val |= AIF1_LRCK_DIV_64; val &= ~AIF1_BCLK_DIV_MASK; val |= AIF1_BCLK_DIV_16; CODEC_WRITE(sc, AIF1CLK_CTRL, val); return (0); } static int sun8i_codec_dai_trigger(device_t dev, int go, int pcm_dir) { return (0); } static int sun8i_codec_dai_setup_mixer(device_t dev, device_t pcmdev) { /* Do nothing for now */ return (0); } static device_method_t sun8i_codec_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sun8i_codec_probe), DEVMETHOD(device_attach, sun8i_codec_attach), DEVMETHOD(device_detach, sun8i_codec_detach), DEVMETHOD(audio_dai_init, sun8i_codec_dai_init), DEVMETHOD(audio_dai_setup_mixer, sun8i_codec_dai_setup_mixer), DEVMETHOD(audio_dai_trigger, sun8i_codec_dai_trigger), DEVMETHOD_END }; static driver_t sun8i_codec_driver = { "sun8icodec", sun8i_codec_methods, sizeof(struct sun8i_codec_softc), }; DRIVER_MODULE(sun8i_codec, simplebus, sun8i_codec_driver, 0, 0); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/arm/allwinner/a64/sun50i_a64_acodec.c b/sys/arm/allwinner/a64/sun50i_a64_acodec.c index b5ab13efac41..f99d5385d897 100644 --- a/sys/arm/allwinner/a64/sun50i_a64_acodec.c +++ b/sys/arm/allwinner/a64/sun50i_a64_acodec.c @@ -1,481 +1,481 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Oleksandr Tymoshenko * Copyright (c) 2018 Jared McNeill * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include "syscon_if.h" #include "opt_snd.h" #include #include #include "audio_dai_if.h" #include "mixer_if.h" #define A64_PR_CFG 0x00 #define A64_AC_PR_RST (1 << 28) #define A64_AC_PR_RW (1 << 24) #define A64_AC_PR_ADDR_MASK (0x1f << 16) #define A64_AC_PR_ADDR(n) (((n) & 0x1f) << 16) #define A64_ACDA_PR_WDAT_MASK (0xff << 8) #define A64_ACDA_PR_WDAT(n) (((n) & 0xff) << 8) #define A64_ACDA_PR_RDAT(n) ((n) & 0xff) #define A64_HP_CTRL 0x00 #define A64_HPPA_EN (1 << 6) #define A64_HPVOL_MASK 0x3f #define A64_HPVOL(n) ((n) & 0x3f) #define A64_OL_MIX_CTRL 0x01 #define A64_LMIXMUTE_LDAC (1 << 1) #define A64_OR_MIX_CTRL 0x02 #define A64_RMIXMUTE_RDAC (1 << 1) #define A64_LINEOUT_CTRL0 0x05 #define A64_LINEOUT_LEFT_EN (1 << 7) #define A64_LINEOUT_RIGHT_EN (1 << 6) #define A64_LINEOUT_EN (A64_LINEOUT_LEFT_EN|A64_LINEOUT_RIGHT_EN) #define A64_LINEOUT_CTRL1 0x06 #define A64_LINEOUT_VOL __BITS(4,0) #define A64_MIC1_CTRL 0x07 #define A64_MIC1G __BITS(6,4) #define A64_MIC1AMPEN (1 << 3) #define A64_MIC1BOOST __BITS(2,0) #define A64_MIC2_CTRL 0x08 #define A64_MIC2_SEL (1 << 7) #define A64_MIC2G_MASK (7 << 4) #define A64_MIC2G(n) (((n) & 7) << 4) #define A64_MIC2AMPEN (1 << 3) #define A64_MIC2BOOST_MASK (7 << 0) #define A64_MIC2BOOST(n) (((n) & 7) << 0) #define A64_LINEIN_CTRL 0x09 #define A64_LINEING __BITS(6,4) #define A64_MIX_DAC_CTRL 0x0a #define A64_DACAREN (1 << 7) #define A64_DACALEN (1 << 6) #define A64_RMIXEN (1 << 5) #define A64_LMIXEN (1 << 4) #define A64_RHPPAMUTE (1 << 3) #define A64_LHPPAMUTE (1 << 2) #define A64_RHPIS (1 << 1) #define A64_LHPIS (1 << 0) #define A64_L_ADCMIX_SRC 0x0b #define A64_R_ADCMIX_SRC 0x0c #define A64_ADCMIX_SRC_MIC1 (1 << 6) #define A64_ADCMIX_SRC_MIC2 (1 << 5) #define A64_ADCMIX_SRC_LINEIN (1 << 2) #define A64_ADCMIX_SRC_OMIXER (1 << 1) #define A64_ADC_CTRL 0x0d #define A64_ADCREN (1 << 7) #define A64_ADCLEN (1 << 6) #define A64_ADCG __BITS(2,0) #define A64_JACK_MIC_CTRL 0x1d #define A64_JACKDETEN (1 << 7) #define A64_INNERRESEN (1 << 6) #define A64_HMICBIASEN (1 << 5) #define A64_AUTOPLEN (1 << 1) #define A64CODEC_MIXER_DEVS ((1 << SOUND_MIXER_VOLUME) | \ (1 << SOUND_MIXER_MIC)) static struct ofw_compat_data compat_data[] = { { "allwinner,sun50i-a64-codec-analog", 1}, { NULL, 0 } }; struct a64codec_softc { device_t dev; struct resource *res; struct mtx mtx; u_int regaddr; /* address for the sysctl */ }; #define A64CODEC_LOCK(sc) mtx_lock(&(sc)->mtx) #define A64CODEC_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define A64CODEC_READ(sc, reg) bus_read_4((sc)->res, (reg)) #define A64CODEC_WRITE(sc, reg, val) bus_write_4((sc)->res, (reg), (val)) static int a64codec_probe(device_t dev); static int a64codec_attach(device_t dev); static int a64codec_detach(device_t dev); static u_int a64_acodec_pr_read(struct a64codec_softc *sc, u_int addr) { uint32_t val; /* Read current value */ val = A64CODEC_READ(sc, A64_PR_CFG); /* De-assert reset */ val |= A64_AC_PR_RST; A64CODEC_WRITE(sc, A64_PR_CFG, val); /* Read mode */ val &= ~A64_AC_PR_RW; A64CODEC_WRITE(sc, A64_PR_CFG, val); /* Set address */ val &= ~A64_AC_PR_ADDR_MASK; val |= A64_AC_PR_ADDR(addr); A64CODEC_WRITE(sc, A64_PR_CFG, val); /* Read data */ val = A64CODEC_READ(sc, A64_PR_CFG); return A64_ACDA_PR_RDAT(val); } static void a64_acodec_pr_write(struct a64codec_softc *sc, u_int addr, u_int data) { uint32_t val; /* Read current value */ val = A64CODEC_READ(sc, A64_PR_CFG); /* De-assert reset */ val |= A64_AC_PR_RST; A64CODEC_WRITE(sc, A64_PR_CFG, val); /* Set address */ val &= ~A64_AC_PR_ADDR_MASK; val |= A64_AC_PR_ADDR(addr); A64CODEC_WRITE(sc, A64_PR_CFG, val); /* Write data */ val &= ~A64_ACDA_PR_WDAT_MASK; val |= A64_ACDA_PR_WDAT(data); A64CODEC_WRITE(sc, A64_PR_CFG, val); /* Write mode */ val |= A64_AC_PR_RW; A64CODEC_WRITE(sc, A64_PR_CFG, val); /* Clear write mode */ val &= ~A64_AC_PR_RW; A64CODEC_WRITE(sc, A64_PR_CFG, val); } static void a64_acodec_pr_set_clear(struct a64codec_softc *sc, u_int addr, u_int set, u_int clr) { u_int old, new; old = a64_acodec_pr_read(sc, addr); new = set | (old & ~clr); a64_acodec_pr_write(sc, addr, new); } static int a64codec_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Allwinner A64 Analog Codec"); return (BUS_PROBE_DEFAULT); } static int a64codec_attach(device_t dev) { struct a64codec_softc *sc; int error, rid; phandle_t node; regulator_t reg; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); rid = 0; sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->res) { device_printf(dev, "cannot allocate resource for device\n"); error = ENXIO; goto fail; } if (regulator_get_by_ofw_property(dev, 0, "cpvdd-supply", ®) == 0) { error = regulator_enable(reg); if (error != 0) { device_printf(dev, "cannot enable PHY regulator\n"); goto fail; } } /* Right & Left Headphone PA enable */ a64_acodec_pr_set_clear(sc, A64_HP_CTRL, A64_HPPA_EN, 0); /* Microphone BIAS enable */ a64_acodec_pr_set_clear(sc, A64_JACK_MIC_CTRL, A64_HMICBIASEN | A64_INNERRESEN, 0); /* Unmute DAC to output mixer */ a64_acodec_pr_set_clear(sc, A64_OL_MIX_CTRL, A64_LMIXMUTE_LDAC, 0); a64_acodec_pr_set_clear(sc, A64_OR_MIX_CTRL, A64_RMIXMUTE_RDAC, 0); /* For now we work only with headphones */ a64_acodec_pr_set_clear(sc, A64_LINEOUT_CTRL0, 0, A64_LINEOUT_EN); a64_acodec_pr_set_clear(sc, A64_HP_CTRL, A64_HPPA_EN, 0); u_int val = a64_acodec_pr_read(sc, A64_HP_CTRL); val &= ~(0x3f); val |= 0x25; a64_acodec_pr_write(sc, A64_HP_CTRL, val); a64_acodec_pr_set_clear(sc, A64_MIC2_CTRL, A64_MIC2AMPEN | A64_MIC2_SEL | A64_MIC2G(0x3) | A64_MIC2BOOST(0x4), A64_MIC2G_MASK | A64_MIC2BOOST_MASK); a64_acodec_pr_write(sc, A64_L_ADCMIX_SRC, A64_ADCMIX_SRC_MIC2); a64_acodec_pr_write(sc, A64_R_ADCMIX_SRC, A64_ADCMIX_SRC_MIC2); /* Max out MIC2 gain */ val = a64_acodec_pr_read(sc, A64_MIC2_CTRL); val &= ~(0x7); val |= (0x7); val &= ~(7 << 4); val |= (7 << 4); a64_acodec_pr_write(sc, A64_MIC2_CTRL, val); node = ofw_bus_get_node(dev); OF_device_register_xref(OF_xref_from_node(node), dev); return (0); fail: a64codec_detach(dev); return (error); } static int a64codec_detach(device_t dev) { struct a64codec_softc *sc; sc = device_get_softc(dev); if (sc->res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); mtx_destroy(&sc->mtx); return (0); } static int a64codec_mixer_init(struct snd_mixer *m) { mix_setdevs(m, A64CODEC_MIXER_DEVS); return (0); } static int a64codec_mixer_uninit(struct snd_mixer *m) { return (0); } static int a64codec_mixer_reinit(struct snd_mixer *m) { return (0); } static int a64codec_mixer_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct a64codec_softc *sc; struct mtx *mixer_lock; uint8_t do_unlock; u_int val; sc = device_get_softc(mix_getdevinfo(m)); mixer_lock = mixer_get_lock(m); if (mtx_owned(mixer_lock)) { do_unlock = 0; } else { do_unlock = 1; mtx_lock(mixer_lock); } right = left; A64CODEC_LOCK(sc); switch(dev) { case SOUND_MIXER_VOLUME: val = a64_acodec_pr_read(sc, A64_HP_CTRL); val &= ~(A64_HPVOL_MASK); val |= A64_HPVOL(left * 63 / 100); a64_acodec_pr_write(sc, A64_HP_CTRL, val); break; case SOUND_MIXER_MIC: val = a64_acodec_pr_read(sc, A64_MIC2_CTRL); val &= ~(A64_MIC2BOOST_MASK); val |= A64_MIC2BOOST(left * 7 / 100); a64_acodec_pr_write(sc, A64_MIC2_CTRL, val); break; default: break; } A64CODEC_UNLOCK(sc); if (do_unlock) { mtx_unlock(mixer_lock); } return (left | (right << 8)); } static unsigned a64codec_mixer_setrecsrc(struct snd_mixer *m, unsigned src) { return (0); } static kobj_method_t a64codec_mixer_methods[] = { KOBJMETHOD(mixer_init, a64codec_mixer_init), KOBJMETHOD(mixer_uninit, a64codec_mixer_uninit), KOBJMETHOD(mixer_reinit, a64codec_mixer_reinit), KOBJMETHOD(mixer_set, a64codec_mixer_set), KOBJMETHOD(mixer_setrecsrc, a64codec_mixer_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(a64codec_mixer); static int a64codec_dai_init(device_t dev, uint32_t format) { return (0); } static int a64codec_dai_trigger(device_t dev, int go, int pcm_dir) { struct a64codec_softc *sc = device_get_softc(dev); if ((pcm_dir != PCMDIR_PLAY) && (pcm_dir != PCMDIR_REC)) return (EINVAL); switch (go) { case PCMTRIG_START: if (pcm_dir == PCMDIR_PLAY) { /* Enable DAC analog l/r channels, HP PA, and output mixer */ a64_acodec_pr_set_clear(sc, A64_MIX_DAC_CTRL, A64_DACAREN | A64_DACALEN | A64_RMIXEN | A64_LMIXEN | A64_RHPPAMUTE | A64_LHPPAMUTE, 0); } else if (pcm_dir == PCMDIR_REC) { /* Enable ADC analog l/r channels */ a64_acodec_pr_set_clear(sc, A64_ADC_CTRL, A64_ADCREN | A64_ADCLEN, 0); } break; case PCMTRIG_STOP: case PCMTRIG_ABORT: if (pcm_dir == PCMDIR_PLAY) { /* Disable DAC analog l/r channels, HP PA, and output mixer */ a64_acodec_pr_set_clear(sc, A64_MIX_DAC_CTRL, 0, A64_DACAREN | A64_DACALEN | A64_RMIXEN | A64_LMIXEN | A64_RHPPAMUTE | A64_LHPPAMUTE); } else if (pcm_dir == PCMDIR_REC) { /* Disable ADC analog l/r channels */ a64_acodec_pr_set_clear(sc, A64_ADC_CTRL, 0, A64_ADCREN | A64_ADCLEN); } break; } return (0); } static int a64codec_dai_setup_mixer(device_t dev, device_t pcmdev) { mixer_init(pcmdev, &a64codec_mixer_class, dev); return (0); } static device_method_t a64codec_methods[] = { /* Device interface */ DEVMETHOD(device_probe, a64codec_probe), DEVMETHOD(device_attach, a64codec_attach), DEVMETHOD(device_detach, a64codec_detach), DEVMETHOD(audio_dai_init, a64codec_dai_init), DEVMETHOD(audio_dai_setup_mixer, a64codec_dai_setup_mixer), DEVMETHOD(audio_dai_trigger, a64codec_dai_trigger), DEVMETHOD_END }; static driver_t a64codec_driver = { "a64codec", a64codec_methods, sizeof(struct a64codec_softc), }; DRIVER_MODULE(a64codec, simplebus, a64codec_driver, 0, 0); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/arm/allwinner/aw_cir.c b/sys/arm/allwinner/aw_cir.c index bc326e6d6afa..7e9fdfca80bf 100644 --- a/sys/arm/allwinner/aw_cir.c +++ b/sys/arm/allwinner/aw_cir.c @@ -1,558 +1,558 @@ /*- * Copyright (c) 2016 Ganbold Tsagaankhuu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Allwinner Consumer IR controller */ #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #define READ(_sc, _r) bus_read_4((_sc)->res[0], (_r)) #define WRITE(_sc, _r, _v) bus_write_4((_sc)->res[0], (_r), (_v)) /* IR Control */ #define AW_IR_CTL 0x00 /* Global Enable */ #define AW_IR_CTL_GEN (1 << 0) /* RX enable */ #define AW_IR_CTL_RXEN (1 << 1) /* CIR mode enable */ #define AW_IR_CTL_MD (1 << 4) | (1 << 5) /* RX Config Reg */ #define AW_IR_RXCTL 0x10 /* Pulse Polarity Invert flag */ #define AW_IR_RXCTL_RPPI (1 << 2) /* RX Data */ #define AW_IR_RXFIFO 0x20 /* RX Interrupt Control */ #define AW_IR_RXINT 0x2C /* RX FIFO Overflow */ #define AW_IR_RXINT_ROI_EN (1 << 0) /* RX Packet End */ #define AW_IR_RXINT_RPEI_EN (1 << 1) /* RX FIFO Data Available */ #define AW_IR_RXINT_RAI_EN (1 << 4) /* RX FIFO available byte level */ #define AW_IR_RXINT_RAL(val) ((val) << 8) /* RX Interrupt Status Reg */ #define AW_IR_RXSTA 0x30 /* RX FIFO Get Available Counter */ #define AW_IR_RXSTA_COUNTER(val) (((val) >> 8) & (sc->fifo_size * 2 - 1)) /* Clear all interrupt status */ #define AW_IR_RXSTA_CLEARALL 0xff /* IR Sample Configure Reg */ #define AW_IR_CIR 0x34 /* * Frequency sample: 23437.5Hz (Cycle: 42.7us) * Pulse of NEC Remote > 560us */ /* Filter Threshold = 8 * 42.7 = ~341us < 500us */ #define AW_IR_RXFILT_VAL (((8) & 0x3f) << 2) /* Idle Threshold = (2 + 1) * 128 * 42.7 = ~16.4ms > 9ms */ #define AW_IR_RXIDLE_VAL (((2) & 0xff) << 8) /* Bit 15 - value (pulse/space) */ #define VAL_MASK 0x80 /* Bits 0:14 - sample duration */ #define PERIOD_MASK 0x7f /* Clock rate for IR0 or IR1 clock in CIR mode */ #define AW_IR_BASE_CLK 3000000 /* Frequency sample 3MHz/64 = 46875Hz (21.3us) */ #define AW_IR_SAMPLE_64 (0 << 0) /* Frequency sample 3MHz/128 = 23437.5Hz (42.7us) */ #define AW_IR_SAMPLE_128 (1 << 0) #define AW_IR_ERROR_CODE 0xffffffff #define AW_IR_REPEAT_CODE 0x0 /* 80 * 42.7 = ~3.4ms, Lead1(4.5ms) > AW_IR_L1_MIN */ #define AW_IR_L1_MIN 80 /* 40 * 42.7 = ~1.7ms, Lead0(4.5ms) Lead0R(2.25ms) > AW_IR_L0_MIN */ #define AW_IR_L0_MIN 40 /* 26 * 42.7 = ~1109us ~= 561 * 2, Pulse < AW_IR_PMAX */ #define AW_IR_PMAX 26 /* 26 * 42.7 = ~1109us ~= 561 * 2, D1 > AW_IR_DMID, D0 <= AW_IR_DMID */ #define AW_IR_DMID 26 /* 53 * 42.7 = ~2263us ~= 561 * 4, D < AW_IR_DMAX */ #define AW_IR_DMAX 53 /* Active Thresholds */ #define AW_IR_ACTIVE_T_VAL AW_IR_L1_MIN #define AW_IR_ACTIVE_T (((AW_IR_ACTIVE_T_VAL - 1) & 0xff) << 16) #define AW_IR_ACTIVE_T_C_VAL 0 #define AW_IR_ACTIVE_T_C ((AW_IR_ACTIVE_T_C_VAL & 0xff) << 23) /* Code masks */ #define CODE_MASK 0x00ff00ff #define INV_CODE_MASK 0xff00ff00 #define VALID_CODE_MASK 0x00ff0000 enum { A10_IR = 1, A13_IR, A31_IR, }; #define AW_IR_RAW_BUF_SIZE 128 SYSCTL_NODE(_hw, OID_AUTO, aw_cir, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "aw_cir driver"); static int aw_cir_debug = 0; SYSCTL_INT(_hw_aw_cir, OID_AUTO, debug, CTLFLAG_RWTUN, &aw_cir_debug, 0, "Debug 1=on 0=off"); struct aw_ir_softc { device_t dev; struct resource *res[2]; void * intrhand; int fifo_size; int dcnt; /* Packet Count */ unsigned char buf[AW_IR_RAW_BUF_SIZE]; struct evdev_dev *sc_evdev; }; static struct resource_spec aw_ir_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static struct ofw_compat_data compat_data[] = { { "allwinner,sun4i-a10-ir", A10_IR }, { "allwinner,sun5i-a13-ir", A13_IR }, { "allwinner,sun6i-a31-ir", A31_IR }, { NULL, 0 } }; static void aw_ir_buf_reset(struct aw_ir_softc *sc) { sc->dcnt = 0; } static void aw_ir_buf_write(struct aw_ir_softc *sc, unsigned char data) { if (sc->dcnt < AW_IR_RAW_BUF_SIZE) sc->buf[sc->dcnt++] = data; else if (bootverbose) device_printf(sc->dev, "IR RX Buffer Full!\n"); } static int aw_ir_buf_full(struct aw_ir_softc *sc) { return (sc->dcnt >= AW_IR_RAW_BUF_SIZE); } static unsigned char aw_ir_read_data(struct aw_ir_softc *sc) { return (unsigned char)(READ(sc, AW_IR_RXFIFO) & 0xff); } static unsigned long aw_ir_decode_packets(struct aw_ir_softc *sc) { unsigned int len, code; unsigned int active_delay; unsigned char val, last; int i, bitcount; if (bootverbose && __predict_false(aw_cir_debug) != 0) device_printf(sc->dev, "sc->dcnt = %d\n", sc->dcnt); /* Find Lead 1 (bit separator) */ active_delay = AW_IR_ACTIVE_T_VAL * (AW_IR_ACTIVE_T_C_VAL != 0 ? 128 : 1); len = active_delay; if (bootverbose && __predict_false(aw_cir_debug) != 0) device_printf(sc->dev, "Initial len: %d\n", len); for (i = 0; i < sc->dcnt; i++) { val = sc->buf[i]; if (val & VAL_MASK) len += (val & PERIOD_MASK) + 1; else { if (len > AW_IR_L1_MIN) break; len = 0; } } if (bootverbose && __predict_false(aw_cir_debug) != 0) device_printf(sc->dev, "len = %d\n", len); if ((val & VAL_MASK) || (len <= AW_IR_L1_MIN)) { if (bootverbose && __predict_false(aw_cir_debug) != 0) device_printf(sc->dev, "Bit separator error\n"); goto error_code; } /* Find Lead 0 (bit length) */ len = 0; for (; i < sc->dcnt; i++) { val = sc->buf[i]; if (val & VAL_MASK) { if(len > AW_IR_L0_MIN) break; len = 0; } else len += (val & PERIOD_MASK) + 1; } if ((!(val & VAL_MASK)) || (len <= AW_IR_L0_MIN)) { if (bootverbose && __predict_false(aw_cir_debug) != 0) device_printf(sc->dev, "Bit length error\n"); goto error_code; } /* Start decoding */ code = 0; bitcount = 0; last = 1; len = 0; for (; i < sc->dcnt; i++) { val = sc->buf[i]; if (last) { if (val & VAL_MASK) len += (val & PERIOD_MASK) + 1; else { if (len > AW_IR_PMAX) { if (bootverbose) device_printf(sc->dev, "Pulse error, len=%d\n", len); goto error_code; } last = 0; len = (val & PERIOD_MASK) + 1; } } else { if (val & VAL_MASK) { if (len > AW_IR_DMAX) { if (bootverbose) device_printf(sc->dev, "Distance error, len=%d\n", len); goto error_code; } else { if (len > AW_IR_DMID) { /* Decode */ code |= 1 << bitcount; } bitcount++; if (bitcount == 32) break; /* Finish decoding */ } last = 1; len = (val & PERIOD_MASK) + 1; } else len += (val & PERIOD_MASK) + 1; } } return (code); error_code: return (AW_IR_ERROR_CODE); } static int aw_ir_validate_code(unsigned long code) { unsigned long v1, v2; /* Don't check address */ v1 = code & CODE_MASK; v2 = (code & INV_CODE_MASK) >> 8; if (((v1 ^ v2) & VALID_CODE_MASK) == VALID_CODE_MASK) return (0); /* valid */ else return (1); /* invalid */ } static void aw_ir_intr(void *arg) { struct aw_ir_softc *sc; uint32_t val; int i, dcnt; unsigned long ir_code; int stat; sc = (struct aw_ir_softc *)arg; /* Read RX interrupt status */ val = READ(sc, AW_IR_RXSTA); if (bootverbose && __predict_false(aw_cir_debug) != 0) device_printf(sc->dev, "RX interrupt status: %x\n", val); /* Clean all pending interrupt statuses */ WRITE(sc, AW_IR_RXSTA, val | AW_IR_RXSTA_CLEARALL); /* When Rx FIFO Data available or Packet end */ if (val & (AW_IR_RXINT_RAI_EN | AW_IR_RXINT_RPEI_EN)) { if (bootverbose && __predict_false(aw_cir_debug) != 0) device_printf(sc->dev, "RX FIFO Data available or Packet end\n"); /* Get available message count in RX FIFO */ dcnt = AW_IR_RXSTA_COUNTER(val); /* Read FIFO */ for (i = 0; i < dcnt; i++) { if (aw_ir_buf_full(sc)) { if (bootverbose) device_printf(sc->dev, "raw buffer full\n"); break; } else aw_ir_buf_write(sc, aw_ir_read_data(sc)); } } if (val & AW_IR_RXINT_RPEI_EN) { /* RX Packet end */ if (bootverbose && __predict_false(aw_cir_debug) != 0) device_printf(sc->dev, "RX Packet end\n"); ir_code = aw_ir_decode_packets(sc); stat = aw_ir_validate_code(ir_code); if (stat == 0) { evdev_push_event(sc->sc_evdev, EV_MSC, MSC_SCAN, ir_code); evdev_sync(sc->sc_evdev); } if (bootverbose && __predict_false(aw_cir_debug) != 0) { device_printf(sc->dev, "Final IR code: %lx\n", ir_code); device_printf(sc->dev, "IR code status: %d\n", stat); } aw_ir_buf_reset(sc); } if (val & AW_IR_RXINT_ROI_EN) { /* RX FIFO overflow */ if (bootverbose) device_printf(sc->dev, "RX FIFO overflow\n"); /* Flush raw buffer */ aw_ir_buf_reset(sc); } } static int aw_ir_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner CIR controller"); return (BUS_PROBE_DEFAULT); } static int aw_ir_attach(device_t dev) { struct aw_ir_softc *sc; hwreset_t rst_apb; clk_t clk_ir, clk_gate; int err; uint32_t val = 0; clk_ir = clk_gate = NULL; rst_apb = NULL; sc = device_get_softc(dev); sc->dev = dev; if (bus_alloc_resources(dev, aw_ir_spec, sc->res) != 0) { device_printf(dev, "could not allocate memory resource\n"); return (ENXIO); } switch (ofw_bus_search_compatible(dev, compat_data)->ocd_data) { case A10_IR: sc->fifo_size = 16; break; case A13_IR: case A31_IR: sc->fifo_size = 64; break; } /* De-assert reset */ if (hwreset_get_by_ofw_idx(dev, 0, 0, &rst_apb) == 0) { err = hwreset_deassert(rst_apb); if (err != 0) { device_printf(dev, "cannot de-assert reset\n"); goto error; } } /* Reset buffer */ aw_ir_buf_reset(sc); /* Get clocks and enable them */ err = clk_get_by_ofw_name(dev, 0, "apb", &clk_gate); if (err != 0) { device_printf(dev, "Cannot get gate clock\n"); goto error; } err = clk_get_by_ofw_name(dev, 0, "ir", &clk_ir); if (err != 0) { device_printf(dev, "Cannot get IR clock\n"); goto error; } /* Set clock rate */ err = clk_set_freq(clk_ir, AW_IR_BASE_CLK, 0); if (err != 0) { device_printf(dev, "cannot set IR clock rate\n"); goto error; } /* Enable clocks */ err = clk_enable(clk_gate); if (err != 0) { device_printf(dev, "Cannot enable clk gate\n"); goto error; } err = clk_enable(clk_ir); if (err != 0) { device_printf(dev, "Cannot enable IR clock\n"); goto error; } if (bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE, NULL, aw_ir_intr, sc, &sc->intrhand)) { bus_release_resources(dev, aw_ir_spec, sc->res); device_printf(dev, "cannot setup interrupt handler\n"); err = ENXIO; goto error; } /* Enable CIR Mode */ WRITE(sc, AW_IR_CTL, AW_IR_CTL_MD); /* * Set clock sample, filter, idle thresholds. * Frequency sample = 3MHz/128 = 23437.5Hz (42.7us) */ val = AW_IR_SAMPLE_128; val |= (AW_IR_RXFILT_VAL | AW_IR_RXIDLE_VAL); val |= (AW_IR_ACTIVE_T | AW_IR_ACTIVE_T_C); WRITE(sc, AW_IR_CIR, val); /* Invert Input Signal */ WRITE(sc, AW_IR_RXCTL, AW_IR_RXCTL_RPPI); /* Clear All RX Interrupt Status */ WRITE(sc, AW_IR_RXSTA, AW_IR_RXSTA_CLEARALL); /* * Enable RX interrupt in case of overflow, packet end * and FIFO available. * RX FIFO Threshold = FIFO size / 2 */ WRITE(sc, AW_IR_RXINT, AW_IR_RXINT_ROI_EN | AW_IR_RXINT_RPEI_EN | AW_IR_RXINT_RAI_EN | AW_IR_RXINT_RAL((sc->fifo_size >> 1) - 1)); /* Enable IR Module */ val = READ(sc, AW_IR_CTL); WRITE(sc, AW_IR_CTL, val | AW_IR_CTL_GEN | AW_IR_CTL_RXEN); sc->sc_evdev = evdev_alloc(); evdev_set_name(sc->sc_evdev, device_get_desc(sc->dev)); evdev_set_phys(sc->sc_evdev, device_get_nameunit(sc->dev)); evdev_set_id(sc->sc_evdev, BUS_HOST, 0, 0, 0); evdev_support_event(sc->sc_evdev, EV_SYN); evdev_support_event(sc->sc_evdev, EV_MSC); evdev_support_msc(sc->sc_evdev, MSC_SCAN); err = evdev_register(sc->sc_evdev); if (err) { device_printf(dev, "failed to register evdev: error=%d\n", err); goto error; } return (0); error: if (clk_gate != NULL) clk_release(clk_gate); if (clk_ir != NULL) clk_release(clk_ir); if (rst_apb != NULL) hwreset_release(rst_apb); evdev_free(sc->sc_evdev); sc->sc_evdev = NULL; /* Avoid double free */ bus_release_resources(dev, aw_ir_spec, sc->res); return (ENXIO); } static device_method_t aw_ir_methods[] = { DEVMETHOD(device_probe, aw_ir_probe), DEVMETHOD(device_attach, aw_ir_attach), DEVMETHOD_END }; static driver_t aw_ir_driver = { "aw_ir", aw_ir_methods, sizeof(struct aw_ir_softc), }; DRIVER_MODULE(aw_ir, simplebus, aw_ir_driver, 0, 0); MODULE_DEPEND(aw_ir, evdev, 1, 1, 1); diff --git a/sys/arm/allwinner/aw_gmacclk.c b/sys/arm/allwinner/aw_gmacclk.c index 46672c8fcb0f..99e69674bd25 100644 --- a/sys/arm/allwinner/aw_gmacclk.c +++ b/sys/arm/allwinner/aw_gmacclk.c @@ -1,268 +1,268 @@ /*- * Copyright (c) 2016 Jared McNeill * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Allwinner GMAC clock */ #include #include #include #include #include #include #include #include #include #include -#include -#include +#include +#include #include "clkdev_if.h" #define GMAC_CLK_PIT (0x1 << 2) #define GMAC_CLK_PIT_SHIFT 2 #define GMAC_CLK_PIT_MII 0 #define GMAC_CLK_PIT_RGMII 1 #define GMAC_CLK_SRC (0x3 << 0) #define GMAC_CLK_SRC_SHIFT 0 #define GMAC_CLK_SRC_MII 0 #define GMAC_CLK_SRC_EXT_RGMII 1 #define GMAC_CLK_SRC_RGMII 2 #define EMAC_TXC_DIV_CFG (1 << 15) #define EMAC_TXC_DIV_CFG_SHIFT 15 #define EMAC_TXC_DIV_CFG_125MHZ 0 #define EMAC_TXC_DIV_CFG_25MHZ 1 #define EMAC_PHY_SELECT (1 << 16) #define EMAC_PHY_SELECT_SHIFT 16 #define EMAC_PHY_SELECT_INT 0 #define EMAC_PHY_SELECT_EXT 1 #define EMAC_ETXDC (0x7 << 10) #define EMAC_ETXDC_SHIFT 10 #define EMAC_ERXDC (0x1f << 5) #define EMAC_ERXDC_SHIFT 5 #define CLK_IDX_MII 0 #define CLK_IDX_RGMII 1 #define CLK_IDX_COUNT 2 static struct ofw_compat_data compat_data[] = { { "allwinner,sun7i-a20-gmac-clk", 1 }, { NULL, 0 } }; struct aw_gmacclk_sc { device_t clkdev; bus_addr_t reg; int rx_delay; int tx_delay; }; #define GMACCLK_READ(sc, val) CLKDEV_READ_4((sc)->clkdev, (sc)->reg, (val)) #define GMACCLK_WRITE(sc, val) CLKDEV_WRITE_4((sc)->clkdev, (sc)->reg, (val)) #define DEVICE_LOCK(sc) CLKDEV_DEVICE_LOCK((sc)->clkdev) #define DEVICE_UNLOCK(sc) CLKDEV_DEVICE_UNLOCK((sc)->clkdev) static int aw_gmacclk_init(struct clknode *clk, device_t dev) { struct aw_gmacclk_sc *sc; uint32_t val, index; sc = clknode_get_softc(clk); DEVICE_LOCK(sc); GMACCLK_READ(sc, &val); DEVICE_UNLOCK(sc); switch ((val & GMAC_CLK_SRC) >> GMAC_CLK_SRC_SHIFT) { case GMAC_CLK_SRC_MII: index = CLK_IDX_MII; break; case GMAC_CLK_SRC_RGMII: index = CLK_IDX_RGMII; break; default: return (ENXIO); } clknode_init_parent_idx(clk, index); return (0); } static int aw_gmacclk_set_mux(struct clknode *clk, int index) { struct aw_gmacclk_sc *sc; uint32_t val, clk_src, pit; sc = clknode_get_softc(clk); switch (index) { case CLK_IDX_MII: clk_src = GMAC_CLK_SRC_MII; pit = GMAC_CLK_PIT_MII; break; case CLK_IDX_RGMII: clk_src = GMAC_CLK_SRC_RGMII; pit = GMAC_CLK_PIT_RGMII; break; default: return (ENXIO); } DEVICE_LOCK(sc); GMACCLK_READ(sc, &val); val &= ~(GMAC_CLK_SRC | GMAC_CLK_PIT); val |= (clk_src << GMAC_CLK_SRC_SHIFT); val |= (pit << GMAC_CLK_PIT_SHIFT); GMACCLK_WRITE(sc, val); DEVICE_UNLOCK(sc); return (0); } static clknode_method_t aw_gmacclk_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, aw_gmacclk_init), CLKNODEMETHOD(clknode_set_mux, aw_gmacclk_set_mux), CLKNODEMETHOD_END }; DEFINE_CLASS_1(aw_gmacclk_clknode, aw_gmacclk_clknode_class, aw_gmacclk_clknode_methods, sizeof(struct aw_gmacclk_sc), clknode_class); static int aw_gmacclk_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner GMAC Clock"); return (BUS_PROBE_DEFAULT); } static int aw_gmacclk_attach(device_t dev) { struct clknode_init_def def; struct aw_gmacclk_sc *sc; struct clkdom *clkdom; struct clknode *clk; clk_t clk_parent; bus_addr_t paddr; bus_size_t psize; phandle_t node; int error, ncells, i; node = ofw_bus_get_node(dev); if (ofw_reg_to_paddr(node, 0, &paddr, &psize, NULL) != 0) { device_printf(dev, "cannot parse 'reg' property\n"); return (ENXIO); } error = ofw_bus_parse_xref_list_get_length(node, "clocks", "#clock-cells", &ncells); if (error != 0 || ncells != CLK_IDX_COUNT) { device_printf(dev, "couldn't find parent clocks\n"); return (ENXIO); } clkdom = clkdom_create(dev); memset(&def, 0, sizeof(def)); error = clk_parse_ofw_clk_name(dev, node, &def.name); if (error != 0) { device_printf(dev, "cannot parse clock name\n"); error = ENXIO; goto fail; } def.id = 1; def.parent_names = malloc(sizeof(char *) * ncells, M_OFWPROP, M_WAITOK); for (i = 0; i < ncells; i++) { error = clk_get_by_ofw_index(dev, 0, i, &clk_parent); if (error != 0) { device_printf(dev, "cannot get clock %d\n", error); goto fail; } def.parent_names[i] = clk_get_name(clk_parent); clk_release(clk_parent); } def.parent_cnt = ncells; clk = clknode_create(clkdom, &aw_gmacclk_clknode_class, &def); if (clk == NULL) { device_printf(dev, "cannot create clknode\n"); error = ENXIO; goto fail; } sc = clknode_get_softc(clk); sc->reg = paddr; sc->clkdev = device_get_parent(dev); sc->tx_delay = sc->rx_delay = -1; OF_getencprop(node, "tx-delay", &sc->tx_delay, sizeof(sc->tx_delay)); OF_getencprop(node, "rx-delay", &sc->rx_delay, sizeof(sc->rx_delay)); clknode_register(clkdom, clk); if (clkdom_finit(clkdom) != 0) { device_printf(dev, "cannot finalize clkdom initialization\n"); error = ENXIO; goto fail; } if (bootverbose) clkdom_dump(clkdom); return (0); fail: return (error); } static device_method_t aw_gmacclk_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aw_gmacclk_probe), DEVMETHOD(device_attach, aw_gmacclk_attach), DEVMETHOD_END }; static driver_t aw_gmacclk_driver = { "aw_gmacclk", aw_gmacclk_methods, 0 }; EARLY_DRIVER_MODULE(aw_gmacclk, simplebus, aw_gmacclk_driver, 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/arm/allwinner/aw_gpio.c b/sys/arm/allwinner/aw_gpio.c index 5dd9a211acf4..b77972ac0187 100644 --- a/sys/arm/allwinner/aw_gpio.c +++ b/sys/arm/allwinner/aw_gpio.c @@ -1,1483 +1,1483 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013 Ganbold Tsagaankhuu * Copyright (c) 2012 Oleksandr Tymoshenko * Copyright (c) 2012 Luiz Otavio O Souza. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #if defined(__aarch64__) #include "opt_soc.h" #endif #include "pic_if.h" #include "gpio_if.h" #define AW_GPIO_DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | \ GPIO_PIN_PULLUP | GPIO_PIN_PULLDOWN); #define AW_GPIO_INTR_CAPS (GPIO_INTR_LEVEL_LOW | GPIO_INTR_LEVEL_HIGH | \ GPIO_INTR_EDGE_RISING | GPIO_INTR_EDGE_FALLING | GPIO_INTR_EDGE_BOTH) #define AW_GPIO_NONE 0 #define AW_GPIO_PULLUP 1 #define AW_GPIO_PULLDOWN 2 #define AW_GPIO_INPUT 0 #define AW_GPIO_OUTPUT 1 #define AW_GPIO_DRV_MASK 0x3 #define AW_GPIO_PUD_MASK 0x3 #define AW_PINCTRL 1 #define AW_R_PINCTRL 2 struct aw_gpio_conf { struct allwinner_padconf *padconf; const char *banks; }; /* Defined in aw_padconf.c */ #ifdef SOC_ALLWINNER_A10 extern struct allwinner_padconf a10_padconf; struct aw_gpio_conf a10_gpio_conf = { .padconf = &a10_padconf, .banks = "abcdefghi", }; #endif /* Defined in a13_padconf.c */ #ifdef SOC_ALLWINNER_A13 extern struct allwinner_padconf a13_padconf; struct aw_gpio_conf a13_gpio_conf = { .padconf = &a13_padconf, .banks = "bcdefg", }; #endif /* Defined in a20_padconf.c */ #ifdef SOC_ALLWINNER_A20 extern struct allwinner_padconf a20_padconf; struct aw_gpio_conf a20_gpio_conf = { .padconf = &a20_padconf, .banks = "abcdefghi", }; #endif /* Defined in a31_padconf.c */ #ifdef SOC_ALLWINNER_A31 extern struct allwinner_padconf a31_padconf; struct aw_gpio_conf a31_gpio_conf = { .padconf = &a31_padconf, .banks = "abcdefgh", }; #endif /* Defined in a31s_padconf.c */ #ifdef SOC_ALLWINNER_A31S extern struct allwinner_padconf a31s_padconf; struct aw_gpio_conf a31s_gpio_conf = { .padconf = &a31s_padconf, .banks = "abcdefgh", }; #endif #if defined(SOC_ALLWINNER_A31) || defined(SOC_ALLWINNER_A31S) extern struct allwinner_padconf a31_r_padconf; struct aw_gpio_conf a31_r_gpio_conf = { .padconf = &a31_r_padconf, .banks = "lm", }; #endif /* Defined in a33_padconf.c */ #ifdef SOC_ALLWINNER_A33 extern struct allwinner_padconf a33_padconf; struct aw_gpio_conf a33_gpio_conf = { .padconf = &a33_padconf, .banks = "bcdefgh", }; #endif /* Defined in h3_padconf.c */ #if defined(SOC_ALLWINNER_H3) || defined(SOC_ALLWINNER_H5) extern struct allwinner_padconf h3_padconf; extern struct allwinner_padconf h3_r_padconf; struct aw_gpio_conf h3_gpio_conf = { .padconf = &h3_padconf, .banks = "acdefg", }; struct aw_gpio_conf h3_r_gpio_conf = { .padconf = &h3_r_padconf, .banks = "l", }; #endif /* Defined in a83t_padconf.c */ #ifdef SOC_ALLWINNER_A83T extern struct allwinner_padconf a83t_padconf; extern struct allwinner_padconf a83t_r_padconf; struct aw_gpio_conf a83t_gpio_conf = { .padconf = &a83t_padconf, .banks = "bcdefgh" }; struct aw_gpio_conf a83t_r_gpio_conf = { .padconf = &a83t_r_padconf, .banks = "l", }; #endif /* Defined in a64_padconf.c */ #ifdef SOC_ALLWINNER_A64 extern struct allwinner_padconf a64_padconf; extern struct allwinner_padconf a64_r_padconf; struct aw_gpio_conf a64_gpio_conf = { .padconf = &a64_padconf, .banks = "bcdefgh", }; struct aw_gpio_conf a64_r_gpio_conf = { .padconf = &a64_r_padconf, .banks = "l", }; #endif /* Defined in h6_padconf.c */ #ifdef SOC_ALLWINNER_H6 extern struct allwinner_padconf h6_padconf; extern struct allwinner_padconf h6_r_padconf; struct aw_gpio_conf h6_gpio_conf = { .padconf = &h6_padconf, .banks = "cdfgh", }; struct aw_gpio_conf h6_r_gpio_conf = { .padconf = &h6_r_padconf, .banks = "lm", }; #endif static struct ofw_compat_data compat_data[] = { #ifdef SOC_ALLWINNER_A10 {"allwinner,sun4i-a10-pinctrl", (uintptr_t)&a10_gpio_conf}, #endif #ifdef SOC_ALLWINNER_A13 {"allwinner,sun5i-a13-pinctrl", (uintptr_t)&a13_gpio_conf}, #endif #ifdef SOC_ALLWINNER_A20 {"allwinner,sun7i-a20-pinctrl", (uintptr_t)&a20_gpio_conf}, #endif #ifdef SOC_ALLWINNER_A31 {"allwinner,sun6i-a31-pinctrl", (uintptr_t)&a31_gpio_conf}, #endif #ifdef SOC_ALLWINNER_A31S {"allwinner,sun6i-a31s-pinctrl", (uintptr_t)&a31s_gpio_conf}, #endif #if defined(SOC_ALLWINNER_A31) || defined(SOC_ALLWINNER_A31S) {"allwinner,sun6i-a31-r-pinctrl", (uintptr_t)&a31_r_gpio_conf}, #endif #ifdef SOC_ALLWINNER_A33 {"allwinner,sun6i-a33-pinctrl", (uintptr_t)&a33_gpio_conf}, #endif #ifdef SOC_ALLWINNER_A83T {"allwinner,sun8i-a83t-pinctrl", (uintptr_t)&a83t_gpio_conf}, {"allwinner,sun8i-a83t-r-pinctrl", (uintptr_t)&a83t_r_gpio_conf}, #endif #if defined(SOC_ALLWINNER_H3) || defined(SOC_ALLWINNER_H5) {"allwinner,sun8i-h3-pinctrl", (uintptr_t)&h3_gpio_conf}, {"allwinner,sun50i-h5-pinctrl", (uintptr_t)&h3_gpio_conf}, {"allwinner,sun8i-h3-r-pinctrl", (uintptr_t)&h3_r_gpio_conf}, #endif #ifdef SOC_ALLWINNER_A64 {"allwinner,sun50i-a64-pinctrl", (uintptr_t)&a64_gpio_conf}, {"allwinner,sun50i-a64-r-pinctrl", (uintptr_t)&a64_r_gpio_conf}, #endif #ifdef SOC_ALLWINNER_H6 {"allwinner,sun50i-h6-pinctrl", (uintptr_t)&h6_gpio_conf}, {"allwinner,sun50i-h6-r-pinctrl", (uintptr_t)&h6_r_gpio_conf}, #endif {NULL, 0} }; struct clk_list { TAILQ_ENTRY(clk_list) next; clk_t clk; }; struct gpio_irqsrc { struct intr_irqsrc isrc; u_int irq; uint32_t mode; uint32_t pin; uint32_t bank; uint32_t intnum; uint32_t intfunc; uint32_t oldfunc; bool enabled; }; #define AW_GPIO_MEMRES 0 #define AW_GPIO_IRQRES 1 #define AW_GPIO_RESSZ 2 struct aw_gpio_softc { device_t sc_dev; device_t sc_busdev; struct resource * sc_res[AW_GPIO_RESSZ]; struct mtx sc_mtx; struct resource * sc_mem_res; struct resource * sc_irq_res; void * sc_intrhand; struct aw_gpio_conf *conf; TAILQ_HEAD(, clk_list) clk_list; struct gpio_irqsrc *gpio_pic_irqsrc; int nirqs; }; static struct resource_spec aw_gpio_res_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; #define AW_GPIO_LOCK(_sc) mtx_lock_spin(&(_sc)->sc_mtx) #define AW_GPIO_UNLOCK(_sc) mtx_unlock_spin(&(_sc)->sc_mtx) #define AW_GPIO_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED) #define AW_GPIO_GP_CFG(_bank, _idx) 0x00 + ((_bank) * 0x24) + ((_idx) << 2) #define AW_GPIO_GP_DAT(_bank) 0x10 + ((_bank) * 0x24) #define AW_GPIO_GP_DRV(_bank, _idx) 0x14 + ((_bank) * 0x24) + ((_idx) << 2) #define AW_GPIO_GP_PUL(_bank, _idx) 0x1c + ((_bank) * 0x24) + ((_idx) << 2) #define AW_GPIO_GP_INT_BASE(_bank) (0x200 + 0x20 * _bank) #define AW_GPIO_GP_INT_CFG(_bank, _pin) (AW_GPIO_GP_INT_BASE(_bank) + (0x4 * ((_pin) / 8))) #define AW_GPIO_GP_INT_CTL(_bank) (AW_GPIO_GP_INT_BASE(_bank) + 0x10) #define AW_GPIO_GP_INT_STA(_bank) (AW_GPIO_GP_INT_BASE(_bank) + 0x14) #define AW_GPIO_GP_INT_DEB(_bank) (AW_GPIO_GP_INT_BASE(_bank) + 0x18) #define AW_GPIO_INT_EDGE_POSITIVE 0x0 #define AW_GPIO_INT_EDGE_NEGATIVE 0x1 #define AW_GPIO_INT_LEVEL_HIGH 0x2 #define AW_GPIO_INT_LEVEL_LOW 0x3 #define AW_GPIO_INT_EDGE_BOTH 0x4 static char *aw_gpio_parse_function(phandle_t node); static const char **aw_gpio_parse_pins(phandle_t node, int *pins_nb); static uint32_t aw_gpio_parse_bias(phandle_t node); static int aw_gpio_parse_drive_strength(phandle_t node, uint32_t *drive); static int aw_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *value); static int aw_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value); static int aw_gpio_pin_get_locked(struct aw_gpio_softc *sc, uint32_t pin, unsigned int *value); static int aw_gpio_pin_set_locked(struct aw_gpio_softc *sc, uint32_t pin, unsigned int value); static void aw_gpio_intr(void *arg); static void aw_gpio_pic_disable_intr(device_t dev, struct intr_irqsrc *isrc); static void aw_gpio_pic_disable_intr_locked(struct aw_gpio_softc *sc, struct intr_irqsrc *isrc); static void aw_gpio_pic_post_filter(device_t dev, struct intr_irqsrc *isrc); static int aw_gpio_register_isrcs(struct aw_gpio_softc *sc); #define AW_GPIO_WRITE(_sc, _off, _val) \ bus_write_4((_sc)->sc_res[AW_GPIO_MEMRES], _off, _val) #define AW_GPIO_READ(_sc, _off) \ bus_read_4((_sc)->sc_res[AW_GPIO_MEMRES], _off) static uint32_t aw_gpio_get_function(struct aw_gpio_softc *sc, uint32_t pin) { uint32_t bank, func, offset; /* Must be called with lock held. */ AW_GPIO_LOCK_ASSERT(sc); if (pin > sc->conf->padconf->npins) return (0); bank = sc->conf->padconf->pins[pin].port; pin = sc->conf->padconf->pins[pin].pin; offset = ((pin & 0x07) << 2); func = AW_GPIO_READ(sc, AW_GPIO_GP_CFG(bank, pin >> 3)); return ((func >> offset) & 0x7); } static int aw_gpio_set_function(struct aw_gpio_softc *sc, uint32_t pin, uint32_t f) { uint32_t bank, data, offset; /* Check if the function exists in the padconf data */ if (sc->conf->padconf->pins[pin].functions[f] == NULL) return (EINVAL); /* Must be called with lock held. */ AW_GPIO_LOCK_ASSERT(sc); bank = sc->conf->padconf->pins[pin].port; pin = sc->conf->padconf->pins[pin].pin; offset = ((pin & 0x07) << 2); data = AW_GPIO_READ(sc, AW_GPIO_GP_CFG(bank, pin >> 3)); data &= ~(7 << offset); data |= (f << offset); AW_GPIO_WRITE(sc, AW_GPIO_GP_CFG(bank, pin >> 3), data); return (0); } static uint32_t aw_gpio_get_pud(struct aw_gpio_softc *sc, uint32_t pin) { uint32_t bank, offset, val; /* Must be called with lock held. */ AW_GPIO_LOCK_ASSERT(sc); bank = sc->conf->padconf->pins[pin].port; pin = sc->conf->padconf->pins[pin].pin; offset = ((pin & 0x0f) << 1); val = AW_GPIO_READ(sc, AW_GPIO_GP_PUL(bank, pin >> 4)); return ((val >> offset) & AW_GPIO_PUD_MASK); } static void aw_gpio_set_pud(struct aw_gpio_softc *sc, uint32_t pin, uint32_t state) { uint32_t bank, offset, val; if (aw_gpio_get_pud(sc, pin) == state) return; /* Must be called with lock held. */ AW_GPIO_LOCK_ASSERT(sc); bank = sc->conf->padconf->pins[pin].port; pin = sc->conf->padconf->pins[pin].pin; offset = ((pin & 0x0f) << 1); val = AW_GPIO_READ(sc, AW_GPIO_GP_PUL(bank, pin >> 4)); val &= ~(AW_GPIO_PUD_MASK << offset); val |= (state << offset); AW_GPIO_WRITE(sc, AW_GPIO_GP_PUL(bank, pin >> 4), val); } static uint32_t aw_gpio_get_drv(struct aw_gpio_softc *sc, uint32_t pin) { uint32_t bank, offset, val; /* Must be called with lock held. */ AW_GPIO_LOCK_ASSERT(sc); bank = sc->conf->padconf->pins[pin].port; pin = sc->conf->padconf->pins[pin].pin; offset = ((pin & 0x0f) << 1); val = AW_GPIO_READ(sc, AW_GPIO_GP_DRV(bank, pin >> 4)); return ((val >> offset) & AW_GPIO_DRV_MASK); } static void aw_gpio_set_drv(struct aw_gpio_softc *sc, uint32_t pin, uint32_t drive) { uint32_t bank, offset, val; if (aw_gpio_get_drv(sc, pin) == drive) return; /* Must be called with lock held. */ AW_GPIO_LOCK_ASSERT(sc); bank = sc->conf->padconf->pins[pin].port; pin = sc->conf->padconf->pins[pin].pin; offset = ((pin & 0x0f) << 1); val = AW_GPIO_READ(sc, AW_GPIO_GP_DRV(bank, pin >> 4)); val &= ~(AW_GPIO_DRV_MASK << offset); val |= (drive << offset); AW_GPIO_WRITE(sc, AW_GPIO_GP_DRV(bank, pin >> 4), val); } static int aw_gpio_pin_configure(struct aw_gpio_softc *sc, uint32_t pin, uint32_t flags) { u_int val; int err = 0; /* Must be called with lock held. */ AW_GPIO_LOCK_ASSERT(sc); if (pin > sc->conf->padconf->npins) return (EINVAL); /* Manage input/output. */ if (flags & GPIO_PIN_INPUT) { err = aw_gpio_set_function(sc, pin, AW_GPIO_INPUT); } else if ((flags & GPIO_PIN_OUTPUT) && aw_gpio_get_function(sc, pin) != AW_GPIO_OUTPUT) { if (flags & GPIO_PIN_PRESET_LOW) { aw_gpio_pin_set_locked(sc, pin, 0); } else if (flags & GPIO_PIN_PRESET_HIGH) { aw_gpio_pin_set_locked(sc, pin, 1); } else { /* Read the pin and preset output to current state. */ err = aw_gpio_set_function(sc, pin, AW_GPIO_INPUT); if (err == 0) { aw_gpio_pin_get_locked(sc, pin, &val); aw_gpio_pin_set_locked(sc, pin, val); } } if (err == 0) err = aw_gpio_set_function(sc, pin, AW_GPIO_OUTPUT); } if (err) return (err); /* Manage Pull-up/pull-down. */ if (flags & GPIO_PIN_PULLUP) aw_gpio_set_pud(sc, pin, AW_GPIO_PULLUP); else if (flags & GPIO_PIN_PULLDOWN) aw_gpio_set_pud(sc, pin, AW_GPIO_PULLDOWN); else aw_gpio_set_pud(sc, pin, AW_GPIO_NONE); return (0); } static device_t aw_gpio_get_bus(device_t dev) { struct aw_gpio_softc *sc; sc = device_get_softc(dev); return (sc->sc_busdev); } static int aw_gpio_pin_max(device_t dev, int *maxpin) { struct aw_gpio_softc *sc; sc = device_get_softc(dev); *maxpin = sc->conf->padconf->npins - 1; return (0); } static int aw_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps) { struct aw_gpio_softc *sc; sc = device_get_softc(dev); if (pin >= sc->conf->padconf->npins) return (EINVAL); *caps = AW_GPIO_DEFAULT_CAPS; if (sc->conf->padconf->pins[pin].eint_func != 0) *caps |= AW_GPIO_INTR_CAPS; return (0); } static int aw_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags) { struct aw_gpio_softc *sc; uint32_t func; uint32_t pud; sc = device_get_softc(dev); if (pin >= sc->conf->padconf->npins) return (EINVAL); AW_GPIO_LOCK(sc); func = aw_gpio_get_function(sc, pin); switch (func) { case AW_GPIO_INPUT: *flags = GPIO_PIN_INPUT; break; case AW_GPIO_OUTPUT: *flags = GPIO_PIN_OUTPUT; break; default: *flags = 0; break; } pud = aw_gpio_get_pud(sc, pin); switch (pud) { case AW_GPIO_PULLDOWN: *flags |= GPIO_PIN_PULLDOWN; break; case AW_GPIO_PULLUP: *flags |= GPIO_PIN_PULLUP; break; default: break; } AW_GPIO_UNLOCK(sc); return (0); } static int aw_gpio_pin_getname(device_t dev, uint32_t pin, char *name) { struct aw_gpio_softc *sc; sc = device_get_softc(dev); if (pin >= sc->conf->padconf->npins) return (EINVAL); snprintf(name, GPIOMAXNAME - 1, "%s", sc->conf->padconf->pins[pin].name); name[GPIOMAXNAME - 1] = '\0'; return (0); } static int aw_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags) { struct aw_gpio_softc *sc; int err; sc = device_get_softc(dev); if (pin > sc->conf->padconf->npins) return (EINVAL); AW_GPIO_LOCK(sc); err = aw_gpio_pin_configure(sc, pin, flags); AW_GPIO_UNLOCK(sc); return (err); } static int aw_gpio_pin_set_locked(struct aw_gpio_softc *sc, uint32_t pin, unsigned int value) { uint32_t bank, data; AW_GPIO_LOCK_ASSERT(sc); if (pin > sc->conf->padconf->npins) return (EINVAL); bank = sc->conf->padconf->pins[pin].port; pin = sc->conf->padconf->pins[pin].pin; data = AW_GPIO_READ(sc, AW_GPIO_GP_DAT(bank)); if (value) data |= (1 << pin); else data &= ~(1 << pin); AW_GPIO_WRITE(sc, AW_GPIO_GP_DAT(bank), data); return (0); } static int aw_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value) { struct aw_gpio_softc *sc; int ret; sc = device_get_softc(dev); AW_GPIO_LOCK(sc); ret = aw_gpio_pin_set_locked(sc, pin, value); AW_GPIO_UNLOCK(sc); return (ret); } static int aw_gpio_pin_get_locked(struct aw_gpio_softc *sc,uint32_t pin, unsigned int *val) { uint32_t bank, reg_data; AW_GPIO_LOCK_ASSERT(sc); if (pin > sc->conf->padconf->npins) return (EINVAL); bank = sc->conf->padconf->pins[pin].port; pin = sc->conf->padconf->pins[pin].pin; reg_data = AW_GPIO_READ(sc, AW_GPIO_GP_DAT(bank)); *val = (reg_data & (1 << pin)) ? 1 : 0; return (0); } static char * aw_gpio_parse_function(phandle_t node) { char *function; if (OF_getprop_alloc(node, "function", (void **)&function) != -1) return (function); if (OF_getprop_alloc(node, "allwinner,function", (void **)&function) != -1) return (function); return (NULL); } static const char ** aw_gpio_parse_pins(phandle_t node, int *pins_nb) { const char **pinlist; *pins_nb = ofw_bus_string_list_to_array(node, "pins", &pinlist); if (*pins_nb > 0) return (pinlist); *pins_nb = ofw_bus_string_list_to_array(node, "allwinner,pins", &pinlist); if (*pins_nb > 0) return (pinlist); return (NULL); } static uint32_t aw_gpio_parse_bias(phandle_t node) { uint32_t bias; if (OF_getencprop(node, "pull", &bias, sizeof(bias)) != -1) return (bias); if (OF_getencprop(node, "allwinner,pull", &bias, sizeof(bias)) != -1) return (bias); if (OF_hasprop(node, "bias-disable")) return (AW_GPIO_NONE); if (OF_hasprop(node, "bias-pull-up")) return (AW_GPIO_PULLUP); if (OF_hasprop(node, "bias-pull-down")) return (AW_GPIO_PULLDOWN); return (AW_GPIO_NONE); } static int aw_gpio_parse_drive_strength(phandle_t node, uint32_t *drive) { uint32_t drive_str; if (OF_getencprop(node, "drive", drive, sizeof(*drive)) != -1) return (0); if (OF_getencprop(node, "allwinner,drive", drive, sizeof(*drive)) != -1) return (0); if (OF_getencprop(node, "drive-strength", &drive_str, sizeof(drive_str)) != -1) { *drive = (drive_str / 10) - 1; return (0); } return (1); } static int aw_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val) { struct aw_gpio_softc *sc; int ret; sc = device_get_softc(dev); AW_GPIO_LOCK(sc); ret = aw_gpio_pin_get_locked(sc, pin, val); AW_GPIO_UNLOCK(sc); return (ret); } static int aw_gpio_pin_toggle(device_t dev, uint32_t pin) { struct aw_gpio_softc *sc; uint32_t bank, data; sc = device_get_softc(dev); if (pin > sc->conf->padconf->npins) return (EINVAL); bank = sc->conf->padconf->pins[pin].port; pin = sc->conf->padconf->pins[pin].pin; AW_GPIO_LOCK(sc); data = AW_GPIO_READ(sc, AW_GPIO_GP_DAT(bank)); if (data & (1 << pin)) data &= ~(1 << pin); else data |= (1 << pin); AW_GPIO_WRITE(sc, AW_GPIO_GP_DAT(bank), data); AW_GPIO_UNLOCK(sc); return (0); } static int aw_gpio_pin_access_32(device_t dev, uint32_t first_pin, uint32_t clear_pins, uint32_t change_pins, uint32_t *orig_pins) { struct aw_gpio_softc *sc; uint32_t bank, data, pin; sc = device_get_softc(dev); if (first_pin > sc->conf->padconf->npins) return (EINVAL); /* * We require that first_pin refers to the first pin in a bank, because * this API is not about convenience, it's for making a set of pins * change simultaneously (required) with reasonably high performance * (desired); we need to do a read-modify-write on a single register. */ bank = sc->conf->padconf->pins[first_pin].port; pin = sc->conf->padconf->pins[first_pin].pin; if (pin != 0) return (EINVAL); AW_GPIO_LOCK(sc); data = AW_GPIO_READ(sc, AW_GPIO_GP_DAT(bank)); if ((clear_pins | change_pins) != 0) AW_GPIO_WRITE(sc, AW_GPIO_GP_DAT(bank), (data & ~clear_pins) ^ change_pins); AW_GPIO_UNLOCK(sc); if (orig_pins != NULL) *orig_pins = data; return (0); } static int aw_gpio_pin_config_32(device_t dev, uint32_t first_pin, uint32_t num_pins, uint32_t *pin_flags) { struct aw_gpio_softc *sc; uint32_t pin; int err; sc = device_get_softc(dev); if (first_pin > sc->conf->padconf->npins) return (EINVAL); if (sc->conf->padconf->pins[first_pin].pin != 0) return (EINVAL); /* * The configuration for a bank of pins is scattered among several * registers; we cannot g'tee to simultaneously change the state of all * the pins in the flags array. So just loop through the array * configuring each pin for now. If there was a strong need, it might * be possible to support some limited simultaneous config, such as * adjacent groups of 8 pins that line up the same as the config regs. */ for (err = 0, pin = first_pin; err == 0 && pin < num_pins; ++pin) { if (pin_flags[pin] & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) err = aw_gpio_pin_configure(sc, pin, pin_flags[pin]); } return (err); } static int aw_gpio_map_gpios(device_t bus, phandle_t dev, phandle_t gparent, int gcells, pcell_t *gpios, uint32_t *pin, uint32_t *flags) { struct aw_gpio_softc *sc; int i; sc = device_get_softc(bus); /* The GPIO pins are mapped as: . */ for (i = 0; i < sc->conf->padconf->npins; i++) if (sc->conf->padconf->pins[i].port == gpios[0] && sc->conf->padconf->pins[i].pin == gpios[1]) { *pin = i; break; } *flags = gpios[gcells - 1]; return (0); } static int aw_find_pinnum_by_name(struct aw_gpio_softc *sc, const char *pinname) { int i; for (i = 0; i < sc->conf->padconf->npins; i++) if (!strcmp(pinname, sc->conf->padconf->pins[i].name)) return i; return (-1); } static int aw_find_pin_func(struct aw_gpio_softc *sc, int pin, const char *func) { int i; for (i = 0; i < AW_MAX_FUNC_BY_PIN; i++) if (sc->conf->padconf->pins[pin].functions[i] && !strcmp(func, sc->conf->padconf->pins[pin].functions[i])) return (i); return (-1); } static int aw_fdt_configure_pins(device_t dev, phandle_t cfgxref) { struct aw_gpio_softc *sc; phandle_t node; const char **pinlist = NULL; char *pin_function = NULL; uint32_t pin_drive, pin_pull; int pins_nb, pin_num, pin_func, i, ret; bool set_drive; sc = device_get_softc(dev); node = OF_node_from_xref(cfgxref); ret = 0; set_drive = false; /* Getting all prop for configuring pins */ pinlist = aw_gpio_parse_pins(node, &pins_nb); if (pinlist == NULL) return (ENOENT); pin_function = aw_gpio_parse_function(node); if (pin_function == NULL) { ret = ENOENT; goto out; } if (aw_gpio_parse_drive_strength(node, &pin_drive) == 0) set_drive = true; pin_pull = aw_gpio_parse_bias(node); /* Configure each pin to the correct function, drive and pull */ for (i = 0; i < pins_nb; i++) { pin_num = aw_find_pinnum_by_name(sc, pinlist[i]); if (pin_num == -1) { ret = ENOENT; goto out; } pin_func = aw_find_pin_func(sc, pin_num, pin_function); if (pin_func == -1) { ret = ENOENT; goto out; } AW_GPIO_LOCK(sc); if (aw_gpio_get_function(sc, pin_num) != pin_func) aw_gpio_set_function(sc, pin_num, pin_func); if (set_drive) aw_gpio_set_drv(sc, pin_num, pin_drive); if (pin_pull != AW_GPIO_NONE) aw_gpio_set_pud(sc, pin_num, pin_pull); AW_GPIO_UNLOCK(sc); } out: OF_prop_free(pinlist); OF_prop_free(pin_function); return (ret); } static void aw_gpio_enable_bank_supply(void *arg) { struct aw_gpio_softc *sc = arg; regulator_t vcc_supply; char bank_reg_name[16]; int i, nbanks; nbanks = strlen(sc->conf->banks); for (i = 0; i < nbanks; i++) { snprintf(bank_reg_name, sizeof(bank_reg_name), "vcc-p%c-supply", sc->conf->banks[i]); if (regulator_get_by_ofw_property(sc->sc_dev, 0, bank_reg_name, &vcc_supply) == 0) { if (bootverbose) device_printf(sc->sc_dev, "Enabling regulator for gpio bank %c\n", sc->conf->banks[i]); if (regulator_enable(vcc_supply) != 0) { device_printf(sc->sc_dev, "Cannot enable regulator for bank %c\n", sc->conf->banks[i]); } } } } static int aw_gpio_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner GPIO/Pinmux controller"); return (BUS_PROBE_DEFAULT); } static int aw_gpio_attach(device_t dev) { int error; phandle_t gpio; struct aw_gpio_softc *sc; struct clk_list *clkp, *clkp_tmp; clk_t clk; hwreset_t rst = NULL; int off, err, clkret; sc = device_get_softc(dev); sc->sc_dev = dev; mtx_init(&sc->sc_mtx, "aw gpio", "gpio", MTX_SPIN); if (bus_alloc_resources(dev, aw_gpio_res_spec, sc->sc_res) != 0) { device_printf(dev, "cannot allocate device resources\n"); return (ENXIO); } if (bus_setup_intr(dev, sc->sc_res[AW_GPIO_IRQRES], INTR_TYPE_CLK | INTR_MPSAFE, NULL, aw_gpio_intr, sc, &sc->sc_intrhand)) { device_printf(dev, "cannot setup interrupt handler\n"); goto fail; } /* Find our node. */ gpio = ofw_bus_get_node(sc->sc_dev); if (!OF_hasprop(gpio, "gpio-controller")) /* Node is not a GPIO controller. */ goto fail; /* Use the right pin data for the current SoC */ sc->conf = (struct aw_gpio_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; if (hwreset_get_by_ofw_idx(dev, 0, 0, &rst) == 0) { error = hwreset_deassert(rst); if (error != 0) { device_printf(dev, "cannot de-assert reset\n"); goto fail; } } TAILQ_INIT(&sc->clk_list); for (off = 0, clkret = 0; clkret == 0; off++) { clkret = clk_get_by_ofw_index(dev, 0, off, &clk); if (clkret != 0) break; err = clk_enable(clk); if (err != 0) { device_printf(dev, "Could not enable clock %s\n", clk_get_name(clk)); goto fail; } clkp = malloc(sizeof(*clkp), M_DEVBUF, M_WAITOK | M_ZERO); clkp->clk = clk; TAILQ_INSERT_TAIL(&sc->clk_list, clkp, next); } if (clkret != 0 && clkret != ENOENT) { device_printf(dev, "Could not find clock at offset %d (%d)\n", off, clkret); goto fail; } aw_gpio_register_isrcs(sc); intr_pic_register(dev, OF_xref_from_node(ofw_bus_get_node(dev))); sc->sc_busdev = gpiobus_attach_bus(dev); if (sc->sc_busdev == NULL) goto fail; /* * Register as a pinctrl device */ fdt_pinctrl_register(dev, "pins"); fdt_pinctrl_configure_tree(dev); fdt_pinctrl_register(dev, "allwinner,pins"); fdt_pinctrl_configure_tree(dev); config_intrhook_oneshot(aw_gpio_enable_bank_supply, sc); return (0); fail: if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); mtx_destroy(&sc->sc_mtx); /* Disable clock */ TAILQ_FOREACH_SAFE(clkp, &sc->clk_list, next, clkp_tmp) { err = clk_disable(clkp->clk); if (err != 0) device_printf(dev, "Could not disable clock %s\n", clk_get_name(clkp->clk)); err = clk_release(clkp->clk); if (err != 0) device_printf(dev, "Could not release clock %s\n", clk_get_name(clkp->clk)); TAILQ_REMOVE(&sc->clk_list, clkp, next); free(clkp, M_DEVBUF); } /* Assert resets */ if (rst) { hwreset_assert(rst); hwreset_release(rst); } return (ENXIO); } static int aw_gpio_detach(device_t dev) { return (EBUSY); } static void aw_gpio_intr(void *arg) { struct aw_gpio_softc *sc; struct intr_irqsrc *isrc; uint32_t reg; int irq; sc = (struct aw_gpio_softc *)arg; AW_GPIO_LOCK(sc); for (irq = 0; irq < sc->nirqs; irq++) { if (!sc->gpio_pic_irqsrc[irq].enabled) continue; reg = AW_GPIO_READ(sc, AW_GPIO_GP_INT_STA(sc->gpio_pic_irqsrc[irq].bank)); if (!(reg & (1 << sc->gpio_pic_irqsrc[irq].intnum))) continue; isrc = &sc->gpio_pic_irqsrc[irq].isrc; if (intr_isrc_dispatch(isrc, curthread->td_intr_frame) != 0) { aw_gpio_pic_disable_intr_locked(sc, isrc); aw_gpio_pic_post_filter(sc->sc_dev, isrc); device_printf(sc->sc_dev, "Stray irq %u disabled\n", irq); } } AW_GPIO_UNLOCK(sc); } /* * Interrupts support */ static int aw_gpio_register_isrcs(struct aw_gpio_softc *sc) { const char *name; int nirqs; int pin; int err; name = device_get_nameunit(sc->sc_dev); for (nirqs = 0, pin = 0; pin < sc->conf->padconf->npins; pin++) { if (sc->conf->padconf->pins[pin].eint_func == 0) continue; nirqs++; } sc->gpio_pic_irqsrc = malloc(sizeof(*sc->gpio_pic_irqsrc) * nirqs, M_DEVBUF, M_WAITOK | M_ZERO); for (nirqs = 0, pin = 0; pin < sc->conf->padconf->npins; pin++) { if (sc->conf->padconf->pins[pin].eint_func == 0) continue; sc->gpio_pic_irqsrc[nirqs].pin = pin; sc->gpio_pic_irqsrc[nirqs].bank = sc->conf->padconf->pins[pin].eint_bank; sc->gpio_pic_irqsrc[nirqs].intnum = sc->conf->padconf->pins[pin].eint_num; sc->gpio_pic_irqsrc[nirqs].intfunc = sc->conf->padconf->pins[pin].eint_func; sc->gpio_pic_irqsrc[nirqs].irq = nirqs; sc->gpio_pic_irqsrc[nirqs].mode = GPIO_INTR_CONFORM; err = intr_isrc_register(&sc->gpio_pic_irqsrc[nirqs].isrc, sc->sc_dev, 0, "%s,%s", name, sc->conf->padconf->pins[pin].functions[sc->conf->padconf->pins[pin].eint_func]); if (err) { device_printf(sc->sc_dev, "intr_isrs_register failed for irq %d\n", nirqs); } nirqs++; } sc->nirqs = nirqs; return (0); } static void aw_gpio_pic_disable_intr_locked(struct aw_gpio_softc *sc, struct intr_irqsrc *isrc) { u_int irq; uint32_t reg; AW_GPIO_LOCK_ASSERT(sc); irq = ((struct gpio_irqsrc *)isrc)->irq; reg = AW_GPIO_READ(sc, AW_GPIO_GP_INT_CTL(sc->gpio_pic_irqsrc[irq].bank)); reg &= ~(1 << sc->gpio_pic_irqsrc[irq].intnum); AW_GPIO_WRITE(sc, AW_GPIO_GP_INT_CTL(sc->gpio_pic_irqsrc[irq].bank), reg); sc->gpio_pic_irqsrc[irq].enabled = false; } static void aw_gpio_pic_disable_intr(device_t dev, struct intr_irqsrc *isrc) { struct aw_gpio_softc *sc; sc = device_get_softc(dev); AW_GPIO_LOCK(sc); aw_gpio_pic_disable_intr_locked(sc, isrc); AW_GPIO_UNLOCK(sc); } static void aw_gpio_pic_enable_intr(device_t dev, struct intr_irqsrc *isrc) { struct aw_gpio_softc *sc; u_int irq; uint32_t reg; sc = device_get_softc(dev); irq = ((struct gpio_irqsrc *)isrc)->irq; AW_GPIO_LOCK(sc); reg = AW_GPIO_READ(sc, AW_GPIO_GP_INT_CTL(sc->gpio_pic_irqsrc[irq].bank)); reg |= 1 << sc->gpio_pic_irqsrc[irq].intnum; AW_GPIO_WRITE(sc, AW_GPIO_GP_INT_CTL(sc->gpio_pic_irqsrc[irq].bank), reg); AW_GPIO_UNLOCK(sc); sc->gpio_pic_irqsrc[irq].enabled = true; } static int aw_gpio_pic_map_gpio(struct aw_gpio_softc *sc, struct intr_map_data_gpio *dag, u_int *irqp, u_int *mode) { u_int irq; int pin; irq = dag->gpio_pin_num; for (pin = 0; pin < sc->nirqs; pin++) if (sc->gpio_pic_irqsrc[pin].pin == irq) break; if (pin == sc->nirqs) { device_printf(sc->sc_dev, "Invalid interrupt number %u\n", irq); return (EINVAL); } switch (dag->gpio_intr_mode) { case GPIO_INTR_LEVEL_LOW: case GPIO_INTR_LEVEL_HIGH: case GPIO_INTR_EDGE_RISING: case GPIO_INTR_EDGE_FALLING: case GPIO_INTR_EDGE_BOTH: break; default: device_printf(sc->sc_dev, "Unsupported interrupt mode 0x%8x\n", dag->gpio_intr_mode); return (EINVAL); } *irqp = pin; if (mode != NULL) *mode = dag->gpio_intr_mode; return (0); } static int aw_gpio_pic_map_intr(device_t dev, struct intr_map_data *data, struct intr_irqsrc **isrcp) { struct aw_gpio_softc *sc; u_int irq; int err; sc = device_get_softc(dev); switch (data->type) { case INTR_MAP_DATA_GPIO: err = aw_gpio_pic_map_gpio(sc, (struct intr_map_data_gpio *)data, &irq, NULL); break; default: return (ENOTSUP); }; if (err == 0) *isrcp = &sc->gpio_pic_irqsrc[irq].isrc; return (0); } static int aw_gpio_pic_setup_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct aw_gpio_softc *sc; uint32_t irqcfg; uint32_t pinidx, reg; u_int irq, mode; int err; sc = device_get_softc(dev); err = 0; switch (data->type) { case INTR_MAP_DATA_GPIO: err = aw_gpio_pic_map_gpio(sc, (struct intr_map_data_gpio *)data, &irq, &mode); if (err != 0) return (err); break; default: return (ENOTSUP); }; pinidx = (sc->gpio_pic_irqsrc[irq].intnum % 8) * 4; AW_GPIO_LOCK(sc); switch (mode) { case GPIO_INTR_LEVEL_LOW: irqcfg = AW_GPIO_INT_LEVEL_LOW << pinidx; break; case GPIO_INTR_LEVEL_HIGH: irqcfg = AW_GPIO_INT_LEVEL_HIGH << pinidx; break; case GPIO_INTR_EDGE_RISING: irqcfg = AW_GPIO_INT_EDGE_POSITIVE << pinidx; break; case GPIO_INTR_EDGE_FALLING: irqcfg = AW_GPIO_INT_EDGE_NEGATIVE << pinidx; break; case GPIO_INTR_EDGE_BOTH: irqcfg = AW_GPIO_INT_EDGE_BOTH << pinidx; break; } /* Switch the pin to interrupt mode */ sc->gpio_pic_irqsrc[irq].oldfunc = aw_gpio_get_function(sc, sc->gpio_pic_irqsrc[irq].pin); aw_gpio_set_function(sc, sc->gpio_pic_irqsrc[irq].pin, sc->gpio_pic_irqsrc[irq].intfunc); /* Write interrupt mode */ reg = AW_GPIO_READ(sc, AW_GPIO_GP_INT_CFG(sc->gpio_pic_irqsrc[irq].bank, sc->gpio_pic_irqsrc[irq].intnum)); reg &= ~(0xF << pinidx); reg |= irqcfg; AW_GPIO_WRITE(sc, AW_GPIO_GP_INT_CFG(sc->gpio_pic_irqsrc[irq].bank, sc->gpio_pic_irqsrc[irq].intnum), reg); AW_GPIO_UNLOCK(sc); return (0); } static int aw_gpio_pic_teardown_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct aw_gpio_softc *sc; struct gpio_irqsrc *gi; sc = device_get_softc(dev); gi = (struct gpio_irqsrc *)isrc; /* Switch back the pin to it's original function */ AW_GPIO_LOCK(sc); aw_gpio_set_function(sc, gi->pin, gi->oldfunc); AW_GPIO_UNLOCK(sc); return (0); } static void aw_gpio_pic_post_filter(device_t dev, struct intr_irqsrc *isrc) { struct aw_gpio_softc *sc; struct gpio_irqsrc *gi; sc = device_get_softc(dev); gi = (struct gpio_irqsrc *)isrc; arm_irq_memory_barrier(0); AW_GPIO_WRITE(sc, AW_GPIO_GP_INT_STA(gi->bank), 1 << gi->intnum); } static void aw_gpio_pic_post_ithread(device_t dev, struct intr_irqsrc *isrc) { struct aw_gpio_softc *sc; struct gpio_irqsrc *gi; sc = device_get_softc(dev); gi = (struct gpio_irqsrc *)isrc; arm_irq_memory_barrier(0); AW_GPIO_WRITE(sc, AW_GPIO_GP_INT_STA(gi->bank), 1 << gi->intnum); aw_gpio_pic_enable_intr(dev, isrc); } static void aw_gpio_pic_pre_ithread(device_t dev, struct intr_irqsrc *isrc) { struct aw_gpio_softc *sc; sc = device_get_softc(dev); aw_gpio_pic_disable_intr_locked(sc, isrc); } /* * OFWBUS Interface */ static phandle_t aw_gpio_get_node(device_t dev, device_t bus) { /* We only have one child, the GPIO bus, which needs our own node. */ return (ofw_bus_get_node(dev)); } static device_method_t aw_gpio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aw_gpio_probe), DEVMETHOD(device_attach, aw_gpio_attach), DEVMETHOD(device_detach, aw_gpio_detach), /* Interrupt controller interface */ DEVMETHOD(pic_disable_intr, aw_gpio_pic_disable_intr), DEVMETHOD(pic_enable_intr, aw_gpio_pic_enable_intr), DEVMETHOD(pic_map_intr, aw_gpio_pic_map_intr), DEVMETHOD(pic_setup_intr, aw_gpio_pic_setup_intr), DEVMETHOD(pic_teardown_intr, aw_gpio_pic_teardown_intr), DEVMETHOD(pic_post_filter, aw_gpio_pic_post_filter), DEVMETHOD(pic_post_ithread, aw_gpio_pic_post_ithread), DEVMETHOD(pic_pre_ithread, aw_gpio_pic_pre_ithread), /* GPIO protocol */ DEVMETHOD(gpio_get_bus, aw_gpio_get_bus), DEVMETHOD(gpio_pin_max, aw_gpio_pin_max), DEVMETHOD(gpio_pin_getname, aw_gpio_pin_getname), DEVMETHOD(gpio_pin_getflags, aw_gpio_pin_getflags), DEVMETHOD(gpio_pin_getcaps, aw_gpio_pin_getcaps), DEVMETHOD(gpio_pin_setflags, aw_gpio_pin_setflags), DEVMETHOD(gpio_pin_get, aw_gpio_pin_get), DEVMETHOD(gpio_pin_set, aw_gpio_pin_set), DEVMETHOD(gpio_pin_toggle, aw_gpio_pin_toggle), DEVMETHOD(gpio_pin_access_32, aw_gpio_pin_access_32), DEVMETHOD(gpio_pin_config_32, aw_gpio_pin_config_32), DEVMETHOD(gpio_map_gpios, aw_gpio_map_gpios), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, aw_gpio_get_node), /* fdt_pinctrl interface */ DEVMETHOD(fdt_pinctrl_configure,aw_fdt_configure_pins), DEVMETHOD_END }; static driver_t aw_gpio_driver = { "gpio", aw_gpio_methods, sizeof(struct aw_gpio_softc), }; EARLY_DRIVER_MODULE(aw_gpio, simplebus, aw_gpio_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE); diff --git a/sys/arm/allwinner/aw_i2s.c b/sys/arm/allwinner/aw_i2s.c index 8c159de7da6d..376405056f51 100644 --- a/sys/arm/allwinner/aw_i2s.c +++ b/sys/arm/allwinner/aw_i2s.c @@ -1,805 +1,805 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Oleksandr Tymoshenko * Copyright (c) 2018 Jared McNeill * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include "syscon_if.h" #include "opt_snd.h" #include #include #include "audio_dai_if.h" #define FIFO_LEVEL 0x40 #define DA_CTL 0x00 #define DA_CTL_BCLK_OUT (1 << 18) /* sun8i */ #define DA_CLK_LRCK_OUT (1 << 17) /* sun8i */ #define DA_CTL_SDO_EN (1 << 8) #define DA_CTL_MS (1 << 5) /* sun4i */ #define DA_CTL_PCM (1 << 4) /* sun4i */ #define DA_CTL_MODE_SEL_MASK (3 << 4) /* sun8i */ #define DA_CTL_MODE_SEL_PCM (0 << 4) /* sun8i */ #define DA_CTL_MODE_SEL_LJ (1 << 4) /* sun8i */ #define DA_CTL_MODE_SEL_RJ (2 << 4) /* sun8i */ #define DA_CTL_TXEN (1 << 2) #define DA_CTL_RXEN (1 << 1) #define DA_CTL_GEN (1 << 0) #define DA_FAT0 0x04 #define DA_FAT0_LRCK_PERIOD_MASK (0x3ff << 8) /* sun8i */ #define DA_FAT0_LRCK_PERIOD(n) (((n) & 0x3fff) << 8) /* sun8i */ #define DA_FAT0_LRCP_MASK (1 << 7) #define DA_LRCP_NORMAL (0 << 7) #define DA_LRCP_INVERTED (1 << 7) #define DA_FAT0_BCP_MASK (1 << 6) #define DA_BCP_NORMAL (0 << 6) #define DA_BCP_INVERTED (1 << 6) #define DA_FAT0_SR __BITS(5,4) #define DA_FAT0_WSS __BITS(3,2) #define DA_FAT0_FMT_MASK (3 << 0) #define DA_FMT_I2S 0 #define DA_FMT_LJ 1 #define DA_FMT_RJ 2 #define DA_FAT1 0x08 #define DA_ISTA 0x0c #define DA_ISTA_TXUI_INT (1 << 6) #define DA_ISTA_TXEI_INT (1 << 4) #define DA_ISTA_RXAI_INT (1 << 0) #define DA_RXFIFO 0x10 #define DA_FCTL 0x14 #define DA_FCTL_HUB_EN (1 << 31) #define DA_FCTL_FTX (1 << 25) #define DA_FCTL_FRX (1 << 24) #define DA_FCTL_TXTL_MASK (0x7f << 12) #define DA_FCTL_TXTL(v) (((v) & 0x7f) << 12) #define DA_FCTL_TXIM (1 << 2) #define DA_FSTA 0x18 #define DA_FSTA_TXE_CNT(v) (((v) >> 16) & 0xff) #define DA_FSTA_RXA_CNT(v) ((v) & 0x3f) #define DA_INT 0x1c #define DA_INT_TX_DRQ (1 << 7) #define DA_INT_TXUI_EN (1 << 6) #define DA_INT_TXEI_EN (1 << 4) #define DA_INT_RX_DRQ (1 << 3) #define DA_INT_RXAI_EN (1 << 0) #define DA_TXFIFO 0x20 #define DA_CLKD 0x24 #define DA_CLKD_MCLKO_EN_SUN8I (1 << 8) #define DA_CLKD_MCLKO_EN_SUN4I (1 << 7) #define DA_CLKD_BCLKDIV_SUN8I(n) (((n) & 0xf) << 4) #define DA_CLKD_BCLKDIV_SUN8I_MASK (0xf << 4) #define DA_CLKD_BCLKDIV_SUN4I(n) (((n) & 7) << 4) #define DA_CLKD_BCLKDIV_SUN4I_MASK (7 << 4) #define DA_CLKD_BCLKDIV_8 3 #define DA_CLKD_BCLKDIV_16 5 #define DA_CLKD_MCLKDIV(n) (((n) & 0xff) << 0) #define DA_CLKD_MCLKDIV_MASK (0xf << 0) #define DA_CLKD_MCLKDIV_1 0 #define DA_TXCNT 0x28 #define DA_RXCNT 0x2c #define DA_CHCFG 0x30 /* sun8i */ #define DA_CHCFG_TX_SLOT_HIZ (1 << 9) #define DA_CHCFG_TXN_STATE (1 << 8) #define DA_CHCFG_RX_SLOT_NUM_MASK (7 << 4) #define DA_CHCFG_RX_SLOT_NUM(n) (((n) & 7) << 4) #define DA_CHCFG_TX_SLOT_NUM_MASK (7 << 0) #define DA_CHCFG_TX_SLOT_NUM(n) (((n) & 7) << 0) #define DA_CHSEL_OFFSET(n) (((n) & 3) << 12) /* sun8i */ #define DA_CHSEL_OFFSET_MASK (3 << 12) /* sun8i */ #define DA_CHSEL_EN(n) (((n) & 0xff) << 4) #define DA_CHSEL_EN_MASK (0xff << 4) #define DA_CHSEL_SEL(n) (((n) & 7) << 0) #define DA_CHSEL_SEL_MASK (7 << 0) #define AUDIO_BUFFER_SIZE 48000 * 4 #define AW_I2S_SAMPLE_RATE 48000 #define AW_I2S_CLK_RATE 24576000 enum sunxi_i2s_type { SUNXI_I2S_SUN4I, SUNXI_I2S_SUN8I, }; struct sunxi_i2s_config { const char *name; enum sunxi_i2s_type type; bus_size_t txchsel; bus_size_t txchmap; bus_size_t rxchsel; bus_size_t rxchmap; }; static const struct sunxi_i2s_config sun50i_a64_codec_config = { .name = "Audio Codec (digital part)", .type = SUNXI_I2S_SUN4I, .txchsel = 0x30, .txchmap = 0x34, .rxchsel = 0x38, .rxchmap = 0x3c, }; static const struct sunxi_i2s_config sun8i_h3_config = { .name = "I2S/PCM controller", .type = SUNXI_I2S_SUN8I, .txchsel = 0x34, .txchmap = 0x44, .rxchsel = 0x54, .rxchmap = 0x58, }; static const u_int sun4i_i2s_bclk_divmap[] = { [0] = 2, [1] = 4, [2] = 6, [3] = 8, [4] = 12, [5] = 16, }; static const u_int sun4i_i2s_mclk_divmap[] = { [0] = 1, [1] = 2, [2] = 4, [3] = 6, [4] = 8, [5] = 12, [6] = 16, [7] = 24, }; static const u_int sun8i_i2s_divmap[] = { [1] = 1, [2] = 2, [3] = 4, [4] = 6, [5] = 8, [6] = 12, [7] = 16, [8] = 24, [9] = 32, [10] = 48, [11] = 64, [12] = 96, [13] = 128, [14] = 176, [15] = 192, }; static struct ofw_compat_data compat_data[] = { { "allwinner,sun50i-a64-codec-i2s", (uintptr_t)&sun50i_a64_codec_config }, { "allwinner,sun8i-h3-i2s", (uintptr_t)&sun8i_h3_config }, { NULL, 0 } }; static struct resource_spec aw_i2s_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; struct aw_i2s_softc { device_t dev; struct resource *res[2]; struct mtx mtx; clk_t clk; struct sunxi_i2s_config *cfg; void * intrhand; /* pointers to playback/capture buffers */ uint32_t play_ptr; uint32_t rec_ptr; }; #define I2S_LOCK(sc) mtx_lock(&(sc)->mtx) #define I2S_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define I2S_READ(sc, reg) bus_read_4((sc)->res[0], (reg)) #define I2S_WRITE(sc, reg, val) bus_write_4((sc)->res[0], (reg), (val)) #define I2S_TYPE(sc) ((sc)->cfg->type) static int aw_i2s_probe(device_t dev); static int aw_i2s_attach(device_t dev); static int aw_i2s_detach(device_t dev); static u_int sunxi_i2s_div_to_regval(const u_int *divmap, u_int divmaplen, u_int div) { u_int n; for (n = 0; n < divmaplen; n++) if (divmap[n] == div) return n; return -1; } static uint32_t sc_fmt[] = { SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps aw_i2s_caps = {AW_I2S_SAMPLE_RATE, AW_I2S_SAMPLE_RATE, sc_fmt, 0}; static int aw_i2s_init(struct aw_i2s_softc *sc) { uint32_t val; int error; error = clk_enable(sc->clk); if (error != 0) { device_printf(sc->dev, "cannot enable mod clock\n"); return (ENXIO); } /* Reset */ val = I2S_READ(sc, DA_CTL); val &= ~(DA_CTL_TXEN|DA_CTL_RXEN|DA_CTL_GEN); I2S_WRITE(sc, DA_CTL, val); val = I2S_READ(sc, DA_FCTL); val &= ~(DA_FCTL_FTX|DA_FCTL_FRX); val &= ~(DA_FCTL_TXTL_MASK); val |= DA_FCTL_TXTL(FIFO_LEVEL); I2S_WRITE(sc, DA_FCTL, val); I2S_WRITE(sc, DA_TXCNT, 0); I2S_WRITE(sc, DA_RXCNT, 0); /* Enable */ val = I2S_READ(sc, DA_CTL); val |= DA_CTL_GEN; I2S_WRITE(sc, DA_CTL, val); val |= DA_CTL_SDO_EN; I2S_WRITE(sc, DA_CTL, val); /* Setup channels */ I2S_WRITE(sc, sc->cfg->txchmap, 0x76543210); val = I2S_READ(sc, sc->cfg->txchsel); val &= ~DA_CHSEL_EN_MASK; val |= DA_CHSEL_EN(3); val &= ~DA_CHSEL_SEL_MASK; val |= DA_CHSEL_SEL(1); I2S_WRITE(sc, sc->cfg->txchsel, val); I2S_WRITE(sc, sc->cfg->rxchmap, 0x76543210); val = I2S_READ(sc, sc->cfg->rxchsel); val &= ~DA_CHSEL_EN_MASK; val |= DA_CHSEL_EN(3); val &= ~DA_CHSEL_SEL_MASK; val |= DA_CHSEL_SEL(1); I2S_WRITE(sc, sc->cfg->rxchsel, val); if (I2S_TYPE(sc) == SUNXI_I2S_SUN8I) { val = I2S_READ(sc, DA_CHCFG); val &= ~DA_CHCFG_TX_SLOT_NUM_MASK; val |= DA_CHCFG_TX_SLOT_NUM(1); val &= ~DA_CHCFG_RX_SLOT_NUM_MASK; val |= DA_CHCFG_RX_SLOT_NUM(1); I2S_WRITE(sc, DA_CHCFG, val); } return (0); } static int aw_i2s_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Allwinner I2S"); return (BUS_PROBE_DEFAULT); } static int aw_i2s_attach(device_t dev) { struct aw_i2s_softc *sc; int error; phandle_t node; hwreset_t rst; clk_t clk; sc = device_get_softc(dev); sc->dev = dev; sc->cfg = (void*)ofw_bus_search_compatible(dev, compat_data)->ocd_data; mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); if (bus_alloc_resources(dev, aw_i2s_spec, sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); error = ENXIO; goto fail; } error = clk_get_by_ofw_name(dev, 0, "mod", &sc->clk); if (error != 0) { device_printf(dev, "cannot get i2s_clk clock\n"); goto fail; } error = clk_get_by_ofw_name(dev, 0, "apb", &clk); if (error != 0) { device_printf(dev, "cannot get APB clock\n"); goto fail; } error = clk_enable(clk); if (error != 0) { device_printf(dev, "cannot enable APB clock\n"); goto fail; } if (hwreset_get_by_ofw_idx(dev, 0, 0, &rst) == 0) { error = hwreset_deassert(rst); if (error != 0) { device_printf(dev, "cannot de-assert reset\n"); goto fail; } } aw_i2s_init(sc); node = ofw_bus_get_node(dev); OF_device_register_xref(OF_xref_from_node(node), dev); return (0); fail: aw_i2s_detach(dev); return (error); } static int aw_i2s_detach(device_t dev) { struct aw_i2s_softc *i2s; i2s = device_get_softc(dev); if (i2s->clk) clk_release(i2s->clk); if (i2s->intrhand != NULL) bus_teardown_intr(i2s->dev, i2s->res[1], i2s->intrhand); bus_release_resources(dev, aw_i2s_spec, i2s->res); mtx_destroy(&i2s->mtx); return (0); } static int aw_i2s_dai_init(device_t dev, uint32_t format) { struct aw_i2s_softc *sc; int fmt, pol; uint32_t ctl, fat0, chsel; u_int offset; sc = device_get_softc(dev); fmt = AUDIO_DAI_FORMAT_FORMAT(format); pol = AUDIO_DAI_FORMAT_POLARITY(format); ctl = I2S_READ(sc, DA_CTL); fat0 = I2S_READ(sc, DA_FAT0); if (I2S_TYPE(sc) == SUNXI_I2S_SUN4I) { fat0 &= ~DA_FAT0_FMT_MASK; switch (fmt) { case AUDIO_DAI_FORMAT_I2S: fat0 |= DA_FMT_I2S; break; case AUDIO_DAI_FORMAT_RJ: fat0 |= DA_FMT_RJ; break; case AUDIO_DAI_FORMAT_LJ: fat0 |= DA_FMT_LJ; break; default: return EINVAL; } ctl &= ~DA_CTL_PCM; } else { ctl &= ~DA_CTL_MODE_SEL_MASK; switch (fmt) { case AUDIO_DAI_FORMAT_I2S: ctl |= DA_CTL_MODE_SEL_LJ; offset = 1; break; case AUDIO_DAI_FORMAT_LJ: ctl |= DA_CTL_MODE_SEL_LJ; offset = 0; break; case AUDIO_DAI_FORMAT_RJ: ctl |= DA_CTL_MODE_SEL_RJ; offset = 0; break; case AUDIO_DAI_FORMAT_DSPA: ctl |= DA_CTL_MODE_SEL_PCM; offset = 1; break; case AUDIO_DAI_FORMAT_DSPB: ctl |= DA_CTL_MODE_SEL_PCM; offset = 0; break; default: return EINVAL; } chsel = I2S_READ(sc, sc->cfg->txchsel); chsel &= ~DA_CHSEL_OFFSET_MASK; chsel |= DA_CHSEL_OFFSET(offset); I2S_WRITE(sc, sc->cfg->txchsel, chsel); chsel = I2S_READ(sc, sc->cfg->rxchsel); chsel &= ~DA_CHSEL_OFFSET_MASK; chsel |= DA_CHSEL_OFFSET(offset); I2S_WRITE(sc, sc->cfg->rxchsel, chsel); } fat0 &= ~(DA_FAT0_LRCP_MASK|DA_FAT0_BCP_MASK); if (I2S_TYPE(sc) == SUNXI_I2S_SUN4I) { if (AUDIO_DAI_POLARITY_INVERTED_BCLK(pol)) fat0 |= DA_BCP_INVERTED; if (AUDIO_DAI_POLARITY_INVERTED_FRAME(pol)) fat0 |= DA_LRCP_INVERTED; } else { if (AUDIO_DAI_POLARITY_INVERTED_BCLK(pol)) fat0 |= DA_BCP_INVERTED; if (!AUDIO_DAI_POLARITY_INVERTED_FRAME(pol)) fat0 |= DA_LRCP_INVERTED; fat0 &= ~DA_FAT0_LRCK_PERIOD_MASK; fat0 |= DA_FAT0_LRCK_PERIOD(32 - 1); } I2S_WRITE(sc, DA_CTL, ctl); I2S_WRITE(sc, DA_FAT0, fat0); return (0); } static int aw_i2s_dai_intr(device_t dev, struct snd_dbuf *play_buf, struct snd_dbuf *rec_buf) { struct aw_i2s_softc *sc; int ret = 0; uint32_t val, status; sc = device_get_softc(dev); I2S_LOCK(sc); status = I2S_READ(sc, DA_ISTA); /* Clear interrupts */ // device_printf(sc->dev, "status: %08x\n", status); I2S_WRITE(sc, DA_ISTA, status); if (status & DA_ISTA_TXEI_INT) { uint8_t *samples; uint32_t count, size, readyptr, written, empty; val = I2S_READ(sc, DA_FSTA); empty = DA_FSTA_TXE_CNT(val); count = sndbuf_getready(play_buf); size = sndbuf_getsize(play_buf); readyptr = sndbuf_getreadyptr(play_buf); samples = (uint8_t*)sndbuf_getbuf(play_buf); written = 0; if (empty > count / 2) empty = count / 2; for (; empty > 0; empty--) { val = (samples[readyptr++ % size] << 16); val |= (samples[readyptr++ % size] << 24); written += 2; I2S_WRITE(sc, DA_TXFIFO, val); } sc->play_ptr += written; sc->play_ptr %= size; ret |= AUDIO_DAI_PLAY_INTR; } if (status & DA_ISTA_RXAI_INT) { uint8_t *samples; uint32_t count, size, freeptr, recorded, available; val = I2S_READ(sc, DA_FSTA); available = DA_FSTA_RXA_CNT(val); count = sndbuf_getfree(rec_buf); size = sndbuf_getsize(rec_buf); freeptr = sndbuf_getfreeptr(rec_buf); samples = (uint8_t*)sndbuf_getbuf(rec_buf); recorded = 0; if (available > count / 2) available = count / 2; for (; available > 0; available--) { val = I2S_READ(sc, DA_RXFIFO); samples[freeptr++ % size] = (val >> 16) & 0xff; samples[freeptr++ % size] = (val >> 24) & 0xff; recorded += 2; } sc->rec_ptr += recorded; sc->rec_ptr %= size; ret |= AUDIO_DAI_REC_INTR; } I2S_UNLOCK(sc); return (ret); } static struct pcmchan_caps * aw_i2s_dai_get_caps(device_t dev) { return (&aw_i2s_caps); } static int aw_i2s_dai_trigger(device_t dev, int go, int pcm_dir) { struct aw_i2s_softc *sc = device_get_softc(dev); uint32_t val; if ((pcm_dir != PCMDIR_PLAY) && (pcm_dir != PCMDIR_REC)) return (EINVAL); switch (go) { case PCMTRIG_START: if (pcm_dir == PCMDIR_PLAY) { /* Flush FIFO */ val = I2S_READ(sc, DA_FCTL); I2S_WRITE(sc, DA_FCTL, val | DA_FCTL_FTX); I2S_WRITE(sc, DA_FCTL, val & ~DA_FCTL_FTX); /* Reset TX sample counter */ I2S_WRITE(sc, DA_TXCNT, 0); /* Enable TX block */ val = I2S_READ(sc, DA_CTL); I2S_WRITE(sc, DA_CTL, val | DA_CTL_TXEN); /* Enable TX underrun interrupt */ val = I2S_READ(sc, DA_INT); I2S_WRITE(sc, DA_INT, val | DA_INT_TXEI_EN); } if (pcm_dir == PCMDIR_REC) { /* Flush FIFO */ val = I2S_READ(sc, DA_FCTL); I2S_WRITE(sc, DA_FCTL, val | DA_FCTL_FRX); I2S_WRITE(sc, DA_FCTL, val & ~DA_FCTL_FRX); /* Reset RX sample counter */ I2S_WRITE(sc, DA_RXCNT, 0); /* Enable RX block */ val = I2S_READ(sc, DA_CTL); I2S_WRITE(sc, DA_CTL, val | DA_CTL_RXEN); /* Enable RX data available interrupt */ val = I2S_READ(sc, DA_INT); I2S_WRITE(sc, DA_INT, val | DA_INT_RXAI_EN); } break; case PCMTRIG_STOP: case PCMTRIG_ABORT: I2S_LOCK(sc); if (pcm_dir == PCMDIR_PLAY) { /* Disable TX block */ val = I2S_READ(sc, DA_CTL); I2S_WRITE(sc, DA_CTL, val & ~DA_CTL_TXEN); /* Enable TX underrun interrupt */ val = I2S_READ(sc, DA_INT); I2S_WRITE(sc, DA_INT, val & ~DA_INT_TXEI_EN); sc->play_ptr = 0; } else { /* Disable RX block */ val = I2S_READ(sc, DA_CTL); I2S_WRITE(sc, DA_CTL, val & ~DA_CTL_RXEN); /* Disable RX data available interrupt */ val = I2S_READ(sc, DA_INT); I2S_WRITE(sc, DA_INT, val & ~DA_INT_RXAI_EN); sc->rec_ptr = 0; } I2S_UNLOCK(sc); break; } return (0); } static uint32_t aw_i2s_dai_get_ptr(device_t dev, int pcm_dir) { struct aw_i2s_softc *sc; uint32_t ptr; sc = device_get_softc(dev); I2S_LOCK(sc); if (pcm_dir == PCMDIR_PLAY) ptr = sc->play_ptr; else ptr = sc->rec_ptr; I2S_UNLOCK(sc); return ptr; } static int aw_i2s_dai_setup_intr(device_t dev, driver_intr_t intr_handler, void *intr_arg) { struct aw_i2s_softc *sc = device_get_softc(dev); if (bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE, NULL, intr_handler, intr_arg, &sc->intrhand)) { device_printf(dev, "cannot setup interrupt handler\n"); return (ENXIO); } return (0); } static uint32_t aw_i2s_dai_set_chanformat(device_t dev, uint32_t format) { return (0); } static int aw_i2s_dai_set_sysclk(device_t dev, unsigned int rate, int dai_dir) { struct aw_i2s_softc *sc; int bclk_val, mclk_val; uint32_t val; int error; sc = device_get_softc(dev); error = clk_set_freq(sc->clk, AW_I2S_CLK_RATE, CLK_SET_ROUND_DOWN); if (error != 0) { device_printf(sc->dev, "couldn't set mod clock rate to %u Hz: %d\n", AW_I2S_CLK_RATE, error); return error; } error = clk_enable(sc->clk); if (error != 0) { device_printf(sc->dev, "couldn't enable mod clock: %d\n", error); return error; } const u_int bclk_prate = I2S_TYPE(sc) == SUNXI_I2S_SUN4I ? rate : AW_I2S_CLK_RATE; const u_int bclk_div = bclk_prate / (2 * 32 * AW_I2S_SAMPLE_RATE); const u_int mclk_div = AW_I2S_CLK_RATE / rate; if (I2S_TYPE(sc) == SUNXI_I2S_SUN4I) { bclk_val = sunxi_i2s_div_to_regval(sun4i_i2s_bclk_divmap, nitems(sun4i_i2s_bclk_divmap), bclk_div); mclk_val = sunxi_i2s_div_to_regval(sun4i_i2s_mclk_divmap, nitems(sun4i_i2s_mclk_divmap), mclk_div); } else { bclk_val = sunxi_i2s_div_to_regval(sun8i_i2s_divmap, nitems(sun8i_i2s_divmap), bclk_div); mclk_val = sunxi_i2s_div_to_regval(sun8i_i2s_divmap, nitems(sun8i_i2s_divmap), mclk_div); } if (bclk_val == -1 || mclk_val == -1) { device_printf(sc->dev, "couldn't configure bclk/mclk dividers\n"); return EIO; } val = I2S_READ(sc, DA_CLKD); if (I2S_TYPE(sc) == SUNXI_I2S_SUN4I) { val |= DA_CLKD_MCLKO_EN_SUN4I; val &= ~DA_CLKD_BCLKDIV_SUN4I_MASK; val |= DA_CLKD_BCLKDIV_SUN4I(bclk_val); } else { val |= DA_CLKD_MCLKO_EN_SUN8I; val &= ~DA_CLKD_BCLKDIV_SUN8I_MASK; val |= DA_CLKD_BCLKDIV_SUN8I(bclk_val); } val &= ~DA_CLKD_MCLKDIV_MASK; val |= DA_CLKD_MCLKDIV(mclk_val); I2S_WRITE(sc, DA_CLKD, val); return (0); } static uint32_t aw_i2s_dai_set_chanspeed(device_t dev, uint32_t speed) { return (speed); } static device_method_t aw_i2s_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aw_i2s_probe), DEVMETHOD(device_attach, aw_i2s_attach), DEVMETHOD(device_detach, aw_i2s_detach), DEVMETHOD(audio_dai_init, aw_i2s_dai_init), DEVMETHOD(audio_dai_setup_intr, aw_i2s_dai_setup_intr), DEVMETHOD(audio_dai_set_sysclk, aw_i2s_dai_set_sysclk), DEVMETHOD(audio_dai_set_chanspeed, aw_i2s_dai_set_chanspeed), DEVMETHOD(audio_dai_set_chanformat, aw_i2s_dai_set_chanformat), DEVMETHOD(audio_dai_intr, aw_i2s_dai_intr), DEVMETHOD(audio_dai_get_caps, aw_i2s_dai_get_caps), DEVMETHOD(audio_dai_trigger, aw_i2s_dai_trigger), DEVMETHOD(audio_dai_get_ptr, aw_i2s_dai_get_ptr), DEVMETHOD_END }; static driver_t aw_i2s_driver = { "i2s", aw_i2s_methods, sizeof(struct aw_i2s_softc), }; DRIVER_MODULE(aw_i2s, simplebus, aw_i2s_driver, 0, 0); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/arm/allwinner/aw_mmc.c b/sys/arm/allwinner/aw_mmc.c index 466ada8942c6..7c783f69880c 100644 --- a/sys/arm/allwinner/aw_mmc.c +++ b/sys/arm/allwinner/aw_mmc.c @@ -1,1520 +1,1520 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * Copyright (c) 2013 Alexander Fedorov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include "opt_mmccam.h" #ifdef MMCCAM #include #include #include #include #include #include #include "mmc_sim_if.h" #endif #include "mmc_pwrseq_if.h" #define AW_MMC_MEMRES 0 #define AW_MMC_IRQRES 1 #define AW_MMC_RESSZ 2 #define AW_MMC_DMA_SEGS (PAGE_SIZE / sizeof(struct aw_mmc_dma_desc)) #define AW_MMC_DMA_DESC_SIZE (sizeof(struct aw_mmc_dma_desc) * AW_MMC_DMA_SEGS) #define AW_MMC_DMA_FTRGLEVEL 0x20070008 #define AW_MMC_RESET_RETRY 1000 #define CARD_ID_FREQUENCY 400000 struct aw_mmc_conf { uint32_t dma_xferlen; bool mask_data0; bool can_calibrate; bool new_timing; }; static const struct aw_mmc_conf a10_mmc_conf = { .dma_xferlen = 0x2000, }; static const struct aw_mmc_conf a13_mmc_conf = { .dma_xferlen = 0x10000, }; static const struct aw_mmc_conf a64_mmc_conf = { .dma_xferlen = 0x10000, .mask_data0 = true, .can_calibrate = true, .new_timing = true, }; static const struct aw_mmc_conf a64_emmc_conf = { .dma_xferlen = 0x2000, .can_calibrate = true, }; static struct ofw_compat_data compat_data[] = { {"allwinner,sun4i-a10-mmc", (uintptr_t)&a10_mmc_conf}, {"allwinner,sun5i-a13-mmc", (uintptr_t)&a13_mmc_conf}, {"allwinner,sun7i-a20-mmc", (uintptr_t)&a13_mmc_conf}, {"allwinner,sun50i-a64-mmc", (uintptr_t)&a64_mmc_conf}, {"allwinner,sun50i-a64-emmc", (uintptr_t)&a64_emmc_conf}, {NULL, 0} }; struct aw_mmc_softc { device_t aw_dev; clk_t aw_clk_ahb; clk_t aw_clk_mmc; hwreset_t aw_rst_ahb; int aw_bus_busy; int aw_resid; int aw_timeout; struct callout aw_timeoutc; struct mmc_host aw_host; struct mmc_helper mmc_helper; #ifdef MMCCAM union ccb * ccb; struct mmc_sim mmc_sim; #else struct mmc_request * aw_req; #endif struct mtx aw_mtx; struct resource * aw_res[AW_MMC_RESSZ]; struct aw_mmc_conf * aw_mmc_conf; uint32_t aw_intr; uint32_t aw_intr_wait; void * aw_intrhand; unsigned int aw_clock; device_t child; /* Fields required for DMA access. */ bus_addr_t aw_dma_desc_phys; bus_dmamap_t aw_dma_map; bus_dma_tag_t aw_dma_tag; void * aw_dma_desc; bus_dmamap_t aw_dma_buf_map; bus_dma_tag_t aw_dma_buf_tag; int aw_dma_map_err; }; static struct resource_spec aw_mmc_res_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; static int aw_mmc_probe(device_t); static int aw_mmc_attach(device_t); static int aw_mmc_detach(device_t); static int aw_mmc_setup_dma(struct aw_mmc_softc *); static void aw_mmc_teardown_dma(struct aw_mmc_softc *sc); static int aw_mmc_reset(struct aw_mmc_softc *); static int aw_mmc_init(struct aw_mmc_softc *); static void aw_mmc_intr(void *); static int aw_mmc_update_clock(struct aw_mmc_softc *, uint32_t); static void aw_mmc_helper_cd_handler(device_t, bool); static void aw_mmc_print_error(uint32_t); static int aw_mmc_update_ios(device_t, device_t); static int aw_mmc_request(device_t, device_t, struct mmc_request *); #ifndef MMCCAM static int aw_mmc_get_ro(device_t, device_t); static int aw_mmc_acquire_host(device_t, device_t); static int aw_mmc_release_host(device_t, device_t); #endif #define AW_MMC_LOCK(_sc) mtx_lock(&(_sc)->aw_mtx) #define AW_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->aw_mtx) #define AW_MMC_READ_4(_sc, _reg) \ bus_read_4((_sc)->aw_res[AW_MMC_MEMRES], _reg) #define AW_MMC_WRITE_4(_sc, _reg, _value) \ bus_write_4((_sc)->aw_res[AW_MMC_MEMRES], _reg, _value) SYSCTL_NODE(_hw, OID_AUTO, aw_mmc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "aw_mmc driver"); static int aw_mmc_debug = 0; SYSCTL_INT(_hw_aw_mmc, OID_AUTO, debug, CTLFLAG_RWTUN, &aw_mmc_debug, 0, "Debug level bit0=card changes bit1=ios changes, bit2=interrupts, bit3=commands"); #define AW_MMC_DEBUG_CARD 0x1 #define AW_MMC_DEBUG_IOS 0x2 #define AW_MMC_DEBUG_INT 0x4 #define AW_MMC_DEBUG_CMD 0x8 #ifdef MMCCAM static int aw_mmc_get_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts) { struct aw_mmc_softc *sc; sc = device_get_softc(dev); cts->host_ocr = sc->aw_host.host_ocr; cts->host_f_min = sc->aw_host.f_min; cts->host_f_max = sc->aw_host.f_max; cts->host_caps = sc->aw_host.caps; cts->host_max_data = (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE; memcpy(&cts->ios, &sc->aw_host.ios, sizeof(struct mmc_ios)); return (0); } static int aw_mmc_set_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts) { struct aw_mmc_softc *sc; struct mmc_ios *ios; struct mmc_ios *new_ios; sc = device_get_softc(dev); ios = &sc->aw_host.ios; new_ios = &cts->ios; /* Update only requested fields */ if (cts->ios_valid & MMC_CLK) { ios->clock = new_ios->clock; if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) device_printf(sc->aw_dev, "Clock => %d\n", ios->clock); } if (cts->ios_valid & MMC_VDD) { ios->vdd = new_ios->vdd; if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) device_printf(sc->aw_dev, "VDD => %d\n", ios->vdd); } if (cts->ios_valid & MMC_CS) { ios->chip_select = new_ios->chip_select; if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) device_printf(sc->aw_dev, "CS => %d\n", ios->chip_select); } if (cts->ios_valid & MMC_BW) { ios->bus_width = new_ios->bus_width; if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) device_printf(sc->aw_dev, "Bus width => %d\n", ios->bus_width); } if (cts->ios_valid & MMC_PM) { ios->power_mode = new_ios->power_mode; if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) device_printf(sc->aw_dev, "Power mode => %d\n", ios->power_mode); } if (cts->ios_valid & MMC_BT) { ios->timing = new_ios->timing; if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) device_printf(sc->aw_dev, "Timing => %d\n", ios->timing); } if (cts->ios_valid & MMC_BM) { ios->bus_mode = new_ios->bus_mode; if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) device_printf(sc->aw_dev, "Bus mode => %d\n", ios->bus_mode); } return (aw_mmc_update_ios(sc->aw_dev, NULL)); } static int aw_mmc_cam_request(device_t dev, union ccb *ccb) { struct aw_mmc_softc *sc; struct ccb_mmcio *mmcio; sc = device_get_softc(dev); mmcio = &ccb->mmcio; AW_MMC_LOCK(sc); if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) { device_printf(sc->aw_dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n", mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags, mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0, mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0); } if (mmcio->cmd.data != NULL) { if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0) panic("data->len = %d, data->flags = %d -- something is b0rked", (int)mmcio->cmd.data->len, mmcio->cmd.data->flags); } if (sc->ccb != NULL) { device_printf(sc->aw_dev, "Controller still has an active command\n"); return (EBUSY); } sc->ccb = ccb; /* aw_mmc_request locks again */ AW_MMC_UNLOCK(sc); aw_mmc_request(sc->aw_dev, NULL, NULL); return (0); } static void aw_mmc_cam_poll(device_t dev) { struct aw_mmc_softc *sc; sc = device_get_softc(dev); aw_mmc_intr(sc); } #endif /* MMCCAM */ static void aw_mmc_helper_cd_handler(device_t dev, bool present) { struct aw_mmc_softc *sc; sc = device_get_softc(dev); #ifdef MMCCAM mmc_cam_sim_discover(&sc->mmc_sim); #else AW_MMC_LOCK(sc); if (present) { if (sc->child == NULL) { if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD)) device_printf(sc->aw_dev, "Card inserted\n"); sc->child = device_add_child(sc->aw_dev, "mmc", -1); AW_MMC_UNLOCK(sc); if (sc->child) { device_set_ivars(sc->child, sc); (void)device_probe_and_attach(sc->child); } } else AW_MMC_UNLOCK(sc); } else { /* Card isn't present, detach if necessary */ if (sc->child != NULL) { if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD)) device_printf(sc->aw_dev, "Card removed\n"); AW_MMC_UNLOCK(sc); device_delete_child(sc->aw_dev, sc->child); sc->child = NULL; } else AW_MMC_UNLOCK(sc); } #endif /* MMCCAM */ } static int aw_mmc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner Integrated MMC/SD controller"); return (BUS_PROBE_DEFAULT); } static int aw_mmc_attach(device_t dev) { struct aw_mmc_softc *sc; struct sysctl_ctx_list *ctx; struct sysctl_oid_list *tree; int error; sc = device_get_softc(dev); sc->aw_dev = dev; sc->aw_mmc_conf = (struct aw_mmc_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; #ifndef MMCCAM sc->aw_req = NULL; #endif if (bus_alloc_resources(dev, aw_mmc_res_spec, sc->aw_res) != 0) { device_printf(dev, "cannot allocate device resources\n"); return (ENXIO); } if (bus_setup_intr(dev, sc->aw_res[AW_MMC_IRQRES], INTR_TYPE_NET | INTR_MPSAFE, NULL, aw_mmc_intr, sc, &sc->aw_intrhand)) { bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res); device_printf(dev, "cannot setup interrupt handler\n"); return (ENXIO); } mtx_init(&sc->aw_mtx, device_get_nameunit(sc->aw_dev), "aw_mmc", MTX_DEF); callout_init_mtx(&sc->aw_timeoutc, &sc->aw_mtx, 0); /* De-assert reset */ if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->aw_rst_ahb) == 0) { error = hwreset_deassert(sc->aw_rst_ahb); if (error != 0) { device_printf(dev, "cannot de-assert reset\n"); goto fail; } } /* Activate the module clock. */ error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->aw_clk_ahb); if (error != 0) { device_printf(dev, "cannot get ahb clock\n"); goto fail; } error = clk_enable(sc->aw_clk_ahb); if (error != 0) { device_printf(dev, "cannot enable ahb clock\n"); goto fail; } error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->aw_clk_mmc); if (error != 0) { device_printf(dev, "cannot get mmc clock\n"); goto fail; } error = clk_set_freq(sc->aw_clk_mmc, CARD_ID_FREQUENCY, CLK_SET_ROUND_DOWN); if (error != 0) { device_printf(dev, "cannot init mmc clock\n"); goto fail; } error = clk_enable(sc->aw_clk_mmc); if (error != 0) { device_printf(dev, "cannot enable mmc clock\n"); goto fail; } sc->aw_timeout = 10; ctx = device_get_sysctl_ctx(dev); tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW, &sc->aw_timeout, 0, "Request timeout in seconds"); /* Soft Reset controller. */ if (aw_mmc_reset(sc) != 0) { device_printf(dev, "cannot reset the controller\n"); goto fail; } if (aw_mmc_setup_dma(sc) != 0) { device_printf(sc->aw_dev, "Couldn't setup DMA!\n"); goto fail; } /* Set some defaults for freq and supported mode */ sc->aw_host.f_min = 400000; sc->aw_host.f_max = 52000000; sc->aw_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; sc->aw_host.caps |= MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330; mmc_fdt_parse(dev, 0, &sc->mmc_helper, &sc->aw_host); mmc_fdt_gpio_setup(dev, 0, &sc->mmc_helper, aw_mmc_helper_cd_handler); #ifdef MMCCAM sc->ccb = NULL; if (mmc_cam_sim_alloc(dev, "aw_mmc", &sc->mmc_sim) != 0) { device_printf(dev, "cannot alloc cam sim\n"); goto fail; } #endif /* MMCCAM */ return (0); fail: callout_drain(&sc->aw_timeoutc); mtx_destroy(&sc->aw_mtx); bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand); bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res); return (ENXIO); } static int aw_mmc_detach(device_t dev) { struct aw_mmc_softc *sc; device_t d; sc = device_get_softc(dev); clk_disable(sc->aw_clk_mmc); clk_disable(sc->aw_clk_ahb); hwreset_assert(sc->aw_rst_ahb); mmc_fdt_gpio_teardown(&sc->mmc_helper); callout_drain(&sc->aw_timeoutc); AW_MMC_LOCK(sc); d = sc->child; sc->child = NULL; AW_MMC_UNLOCK(sc); if (d != NULL) device_delete_child(sc->aw_dev, d); aw_mmc_teardown_dma(sc); mtx_destroy(&sc->aw_mtx); bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand); bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res); #ifdef MMCCAM mmc_cam_sim_free(&sc->mmc_sim); #endif return (0); } static void aw_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err) { struct aw_mmc_softc *sc; sc = (struct aw_mmc_softc *)arg; if (err) { sc->aw_dma_map_err = err; return; } sc->aw_dma_desc_phys = segs[0].ds_addr; } static int aw_mmc_setup_dma(struct aw_mmc_softc *sc) { int error; /* Allocate the DMA descriptor memory. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->aw_dev), /* parent */ AW_MMC_DMA_ALIGN, 0, /* align, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg*/ AW_MMC_DMA_DESC_SIZE, 1, /* maxsize, nsegment */ AW_MMC_DMA_DESC_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lock, lockarg*/ &sc->aw_dma_tag); if (error) return (error); error = bus_dmamem_alloc(sc->aw_dma_tag, &sc->aw_dma_desc, BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->aw_dma_map); if (error) return (error); error = bus_dmamap_load(sc->aw_dma_tag, sc->aw_dma_map, sc->aw_dma_desc, AW_MMC_DMA_DESC_SIZE, aw_dma_desc_cb, sc, 0); if (error) return (error); if (sc->aw_dma_map_err) return (sc->aw_dma_map_err); /* Create the DMA map for data transfers. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->aw_dev), /* parent */ AW_MMC_DMA_ALIGN, 0, /* align, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg*/ sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS, AW_MMC_DMA_SEGS, /* maxsize, nsegments */ sc->aw_mmc_conf->dma_xferlen, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lock, lockarg*/ &sc->aw_dma_buf_tag); if (error) return (error); error = bus_dmamap_create(sc->aw_dma_buf_tag, 0, &sc->aw_dma_buf_map); if (error) return (error); return (0); } static void aw_mmc_teardown_dma(struct aw_mmc_softc *sc) { bus_dmamap_unload(sc->aw_dma_tag, sc->aw_dma_map); bus_dmamem_free(sc->aw_dma_tag, sc->aw_dma_desc, sc->aw_dma_map); if (bus_dma_tag_destroy(sc->aw_dma_tag) != 0) device_printf(sc->aw_dev, "Cannot destroy the dma tag\n"); bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map); bus_dmamap_destroy(sc->aw_dma_buf_tag, sc->aw_dma_buf_map); if (bus_dma_tag_destroy(sc->aw_dma_buf_tag) != 0) device_printf(sc->aw_dev, "Cannot destroy the dma buf tag\n"); } static void aw_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err) { int i; struct aw_mmc_dma_desc *dma_desc; struct aw_mmc_softc *sc; sc = (struct aw_mmc_softc *)arg; sc->aw_dma_map_err = err; if (err) return; dma_desc = sc->aw_dma_desc; for (i = 0; i < nsegs; i++) { if (segs[i].ds_len == sc->aw_mmc_conf->dma_xferlen) dma_desc[i].buf_size = 0; /* Size of 0 indicate max len */ else dma_desc[i].buf_size = segs[i].ds_len; dma_desc[i].buf_addr = segs[i].ds_addr; dma_desc[i].config = AW_MMC_DMA_CONFIG_CH | AW_MMC_DMA_CONFIG_OWN | AW_MMC_DMA_CONFIG_DIC; dma_desc[i].next = sc->aw_dma_desc_phys + ((i + 1) * sizeof(struct aw_mmc_dma_desc)); } dma_desc[0].config |= AW_MMC_DMA_CONFIG_FD; dma_desc[nsegs - 1].config |= AW_MMC_DMA_CONFIG_LD | AW_MMC_DMA_CONFIG_ER; dma_desc[nsegs - 1].config &= ~AW_MMC_DMA_CONFIG_DIC; dma_desc[nsegs - 1].next = 0; } static int aw_mmc_prepare_dma(struct aw_mmc_softc *sc) { bus_dmasync_op_t sync_op; int error; struct mmc_command *cmd; uint32_t val; #ifdef MMCCAM cmd = &sc->ccb->mmcio.cmd; #else cmd = sc->aw_req->cmd; #endif if (cmd->data->len > (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS)) return (EFBIG); error = bus_dmamap_load(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, cmd->data->data, cmd->data->len, aw_dma_cb, sc, 0); if (error) return (error); if (sc->aw_dma_map_err) return (sc->aw_dma_map_err); if (cmd->data->flags & MMC_DATA_WRITE) sync_op = BUS_DMASYNC_PREWRITE; else sync_op = BUS_DMASYNC_PREREAD; bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op); bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_PREWRITE); /* Enable DMA */ val = AW_MMC_READ_4(sc, AW_MMC_GCTL); val &= ~AW_MMC_GCTL_FIFO_AC_MOD; val |= AW_MMC_GCTL_DMA_ENB; AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val); /* Reset DMA */ val |= AW_MMC_GCTL_DMA_RST; AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val); AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_SOFT_RST); AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_IDMA_ON | AW_MMC_DMAC_IDMAC_FIX_BURST); /* Enable RX or TX DMA interrupt */ val = AW_MMC_READ_4(sc, AW_MMC_IDIE); if (cmd->data->flags & MMC_DATA_WRITE) val |= AW_MMC_IDST_TX_INT; else val |= AW_MMC_IDST_RX_INT; AW_MMC_WRITE_4(sc, AW_MMC_IDIE, val); /* Set DMA descritptor list address */ AW_MMC_WRITE_4(sc, AW_MMC_DLBA, sc->aw_dma_desc_phys); /* FIFO trigger level */ AW_MMC_WRITE_4(sc, AW_MMC_FWLR, AW_MMC_DMA_FTRGLEVEL); return (0); } static int aw_mmc_reset(struct aw_mmc_softc *sc) { uint32_t reg; int timeout; reg = AW_MMC_READ_4(sc, AW_MMC_GCTL); reg |= AW_MMC_GCTL_RESET; AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg); timeout = AW_MMC_RESET_RETRY; while (--timeout > 0) { if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_GCTL_RESET) == 0) break; DELAY(100); } if (timeout == 0) return (ETIMEDOUT); return (0); } static int aw_mmc_init(struct aw_mmc_softc *sc) { uint32_t reg; int ret; ret = aw_mmc_reset(sc); if (ret != 0) return (ret); /* Set the timeout. */ AW_MMC_WRITE_4(sc, AW_MMC_TMOR, AW_MMC_TMOR_DTO_LMT_SHIFT(AW_MMC_TMOR_DTO_LMT_MASK) | AW_MMC_TMOR_RTO_LMT_SHIFT(AW_MMC_TMOR_RTO_LMT_MASK)); /* Unmask interrupts. */ AW_MMC_WRITE_4(sc, AW_MMC_IMKR, 0); /* Clear pending interrupts. */ AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff); /* Debug register, undocumented */ AW_MMC_WRITE_4(sc, AW_MMC_DBGC, 0xdeb); /* Function select register */ AW_MMC_WRITE_4(sc, AW_MMC_FUNS, 0xceaa0000); AW_MMC_WRITE_4(sc, AW_MMC_IDST, 0xffffffff); /* Enable interrupts and disable AHB access. */ reg = AW_MMC_READ_4(sc, AW_MMC_GCTL); reg |= AW_MMC_GCTL_INT_ENB; reg &= ~AW_MMC_GCTL_FIFO_AC_MOD; reg &= ~AW_MMC_GCTL_WAIT_MEM_ACCESS; AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg); return (0); } static void aw_mmc_req_done(struct aw_mmc_softc *sc) { struct mmc_command *cmd; #ifdef MMCCAM union ccb *ccb; #else struct mmc_request *req; #endif uint32_t val, mask; int retry; #ifdef MMCCAM ccb = sc->ccb; cmd = &ccb->mmcio.cmd; #else cmd = sc->aw_req->cmd; #endif if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) { device_printf(sc->aw_dev, "%s: cmd %d err %d\n", __func__, cmd->opcode, cmd->error); } if (cmd->error != MMC_ERR_NONE) { /* Reset the FIFO and DMA engines. */ mask = AW_MMC_GCTL_FIFO_RST | AW_MMC_GCTL_DMA_RST; val = AW_MMC_READ_4(sc, AW_MMC_GCTL); AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val | mask); retry = AW_MMC_RESET_RETRY; while (--retry > 0) { if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_GCTL_RESET) == 0) break; DELAY(100); } if (retry == 0) device_printf(sc->aw_dev, "timeout resetting DMA/FIFO\n"); aw_mmc_update_clock(sc, 1); } if (!dumping) callout_stop(&sc->aw_timeoutc); sc->aw_intr = 0; sc->aw_resid = 0; sc->aw_dma_map_err = 0; sc->aw_intr_wait = 0; #ifdef MMCCAM sc->ccb = NULL; ccb->ccb_h.status = (ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR); xpt_done(ccb); #else req = sc->aw_req; sc->aw_req = NULL; req->done(req); #endif } static void aw_mmc_req_ok(struct aw_mmc_softc *sc) { int timeout; struct mmc_command *cmd; uint32_t status; timeout = 1000; while (--timeout > 0) { status = AW_MMC_READ_4(sc, AW_MMC_STAR); if ((status & AW_MMC_STAR_CARD_BUSY) == 0) break; DELAY(1000); } #ifdef MMCCAM cmd = &sc->ccb->mmcio.cmd; #else cmd = sc->aw_req->cmd; #endif if (timeout == 0) { cmd->error = MMC_ERR_FAILED; aw_mmc_req_done(sc); return; } if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP3); cmd->resp[1] = AW_MMC_READ_4(sc, AW_MMC_RESP2); cmd->resp[2] = AW_MMC_READ_4(sc, AW_MMC_RESP1); cmd->resp[3] = AW_MMC_READ_4(sc, AW_MMC_RESP0); } else cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP0); } /* All data has been transferred ? */ if (cmd->data != NULL && (sc->aw_resid << 2) < cmd->data->len) cmd->error = MMC_ERR_FAILED; aw_mmc_req_done(sc); } static inline void set_mmc_error(struct aw_mmc_softc *sc, int error_code) { #ifdef MMCCAM sc->ccb->mmcio.cmd.error = error_code; #else sc->aw_req->cmd->error = error_code; #endif } static void aw_mmc_timeout(void *arg) { struct aw_mmc_softc *sc; sc = (struct aw_mmc_softc *)arg; #ifdef MMCCAM if (sc->ccb != NULL) { #else if (sc->aw_req != NULL) { #endif device_printf(sc->aw_dev, "controller timeout\n"); set_mmc_error(sc, MMC_ERR_TIMEOUT); aw_mmc_req_done(sc); } else device_printf(sc->aw_dev, "Spurious timeout - no active request\n"); } static void aw_mmc_print_error(uint32_t err) { if(err & AW_MMC_INT_RESP_ERR) printf("AW_MMC_INT_RESP_ERR "); if (err & AW_MMC_INT_RESP_CRC_ERR) printf("AW_MMC_INT_RESP_CRC_ERR "); if (err & AW_MMC_INT_DATA_CRC_ERR) printf("AW_MMC_INT_DATA_CRC_ERR "); if (err & AW_MMC_INT_RESP_TIMEOUT) printf("AW_MMC_INT_RESP_TIMEOUT "); if (err & AW_MMC_INT_FIFO_RUN_ERR) printf("AW_MMC_INT_FIFO_RUN_ERR "); if (err & AW_MMC_INT_CMD_BUSY) printf("AW_MMC_INT_CMD_BUSY "); if (err & AW_MMC_INT_DATA_START_ERR) printf("AW_MMC_INT_DATA_START_ERR "); if (err & AW_MMC_INT_DATA_END_BIT_ERR) printf("AW_MMC_INT_DATA_END_BIT_ERR"); printf("\n"); } static void aw_mmc_intr(void *arg) { bus_dmasync_op_t sync_op; struct aw_mmc_softc *sc; struct mmc_data *data; uint32_t idst, imask, rint; sc = (struct aw_mmc_softc *)arg; AW_MMC_LOCK(sc); rint = AW_MMC_READ_4(sc, AW_MMC_RISR); idst = AW_MMC_READ_4(sc, AW_MMC_IDST); imask = AW_MMC_READ_4(sc, AW_MMC_IMKR); if (idst == 0 && imask == 0 && rint == 0) { AW_MMC_UNLOCK(sc); return; } if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT)) { device_printf(sc->aw_dev, "idst: %#x, imask: %#x, rint: %#x\n", idst, imask, rint); } #ifdef MMCCAM if (sc->ccb == NULL) { #else if (sc->aw_req == NULL) { #endif device_printf(sc->aw_dev, "Spurious interrupt - no active request, rint: 0x%08X\n", rint); aw_mmc_print_error(rint); goto end; } if (rint & AW_MMC_INT_ERR_BIT) { if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT)) { device_printf(sc->aw_dev, "error rint: 0x%08X\n", rint); aw_mmc_print_error(rint); } if (rint & AW_MMC_INT_RESP_TIMEOUT) set_mmc_error(sc, MMC_ERR_TIMEOUT); else set_mmc_error(sc, MMC_ERR_FAILED); aw_mmc_req_done(sc); goto end; } if (idst & AW_MMC_IDST_ERROR) { if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT)) device_printf(sc->aw_dev, "error idst: 0x%08x\n", idst); set_mmc_error(sc, MMC_ERR_FAILED); aw_mmc_req_done(sc); goto end; } sc->aw_intr |= rint; #ifdef MMCCAM data = sc->ccb->mmcio.cmd.data; #else data = sc->aw_req->cmd->data; #endif if (data != NULL && (idst & AW_MMC_IDST_COMPLETE) != 0) { if (data->flags & MMC_DATA_WRITE) sync_op = BUS_DMASYNC_POSTWRITE; else sync_op = BUS_DMASYNC_POSTREAD; bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op); bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map); sc->aw_resid = data->len >> 2; } if ((sc->aw_intr & sc->aw_intr_wait) == sc->aw_intr_wait) aw_mmc_req_ok(sc); end: AW_MMC_WRITE_4(sc, AW_MMC_IDST, idst); AW_MMC_WRITE_4(sc, AW_MMC_RISR, rint); AW_MMC_UNLOCK(sc); } static int aw_mmc_request(device_t bus, device_t child, struct mmc_request *req) { int blksz; struct aw_mmc_softc *sc; struct mmc_command *cmd; uint32_t cmdreg, imask; int err; sc = device_get_softc(bus); AW_MMC_LOCK(sc); #ifdef MMCCAM KASSERT(req == NULL, ("req should be NULL in MMCCAM case!")); /* * For MMCCAM, sc->ccb has been NULL-checked and populated * by aw_mmc_cam_request() already. */ cmd = &sc->ccb->mmcio.cmd; #else if (sc->aw_req) { AW_MMC_UNLOCK(sc); return (EBUSY); } sc->aw_req = req; cmd = req->cmd; if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) { device_printf(sc->aw_dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n", cmd->opcode, cmd->arg, cmd->flags, cmd->data != NULL ? (unsigned int)cmd->data->len : 0, cmd->data != NULL ? cmd->data->flags: 0); } #endif cmdreg = AW_MMC_CMDR_LOAD; imask = AW_MMC_INT_ERR_BIT; sc->aw_intr_wait = 0; sc->aw_intr = 0; sc->aw_resid = 0; cmd->error = MMC_ERR_NONE; if (cmd->opcode == MMC_GO_IDLE_STATE) cmdreg |= AW_MMC_CMDR_SEND_INIT_SEQ; if (cmd->flags & MMC_RSP_PRESENT) cmdreg |= AW_MMC_CMDR_RESP_RCV; if (cmd->flags & MMC_RSP_136) cmdreg |= AW_MMC_CMDR_LONG_RESP; if (cmd->flags & MMC_RSP_CRC) cmdreg |= AW_MMC_CMDR_CHK_RESP_CRC; if (cmd->data) { cmdreg |= AW_MMC_CMDR_DATA_TRANS | AW_MMC_CMDR_WAIT_PRE_OVER; if (cmd->data->flags & MMC_DATA_MULTI) { cmdreg |= AW_MMC_CMDR_STOP_CMD_FLAG; imask |= AW_MMC_INT_AUTO_STOP_DONE; sc->aw_intr_wait |= AW_MMC_INT_AUTO_STOP_DONE; } else { sc->aw_intr_wait |= AW_MMC_INT_DATA_OVER; imask |= AW_MMC_INT_DATA_OVER; } if (cmd->data->flags & MMC_DATA_WRITE) cmdreg |= AW_MMC_CMDR_DIR_WRITE; #ifdef MMCCAM if (cmd->data->flags & MMC_DATA_BLOCK_SIZE) { AW_MMC_WRITE_4(sc, AW_MMC_BKSR, cmd->data->block_size); AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len); } else #endif { blksz = min(cmd->data->len, MMC_SECTOR_SIZE); AW_MMC_WRITE_4(sc, AW_MMC_BKSR, blksz); AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len); } } else { imask |= AW_MMC_INT_CMD_DONE; } /* Enable the interrupts we are interested in */ AW_MMC_WRITE_4(sc, AW_MMC_IMKR, imask); AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff); /* Enable auto stop if needed */ AW_MMC_WRITE_4(sc, AW_MMC_A12A, cmdreg & AW_MMC_CMDR_STOP_CMD_FLAG ? 0 : 0xffff); /* Write the command argument */ AW_MMC_WRITE_4(sc, AW_MMC_CAGR, cmd->arg); /* * If we don't have data start the request * if we do prepare the dma request and start the request */ if (cmd->data == NULL) { AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode); } else { err = aw_mmc_prepare_dma(sc); if (err != 0) device_printf(sc->aw_dev, "prepare_dma failed: %d\n", err); AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode); } if (!dumping) { callout_reset(&sc->aw_timeoutc, sc->aw_timeout * hz, aw_mmc_timeout, sc); } AW_MMC_UNLOCK(sc); return (0); } static int aw_mmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct aw_mmc_softc *sc; sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: *(int *)result = sc->aw_host.ios.bus_mode; break; case MMCBR_IVAR_BUS_WIDTH: *(int *)result = sc->aw_host.ios.bus_width; break; case MMCBR_IVAR_CHIP_SELECT: *(int *)result = sc->aw_host.ios.chip_select; break; case MMCBR_IVAR_CLOCK: *(int *)result = sc->aw_host.ios.clock; break; case MMCBR_IVAR_F_MIN: *(int *)result = sc->aw_host.f_min; break; case MMCBR_IVAR_F_MAX: *(int *)result = sc->aw_host.f_max; break; case MMCBR_IVAR_HOST_OCR: *(int *)result = sc->aw_host.host_ocr; break; case MMCBR_IVAR_MODE: *(int *)result = sc->aw_host.mode; break; case MMCBR_IVAR_OCR: *(int *)result = sc->aw_host.ocr; break; case MMCBR_IVAR_POWER_MODE: *(int *)result = sc->aw_host.ios.power_mode; break; case MMCBR_IVAR_VDD: *(int *)result = sc->aw_host.ios.vdd; break; case MMCBR_IVAR_VCCQ: *(int *)result = sc->aw_host.ios.vccq; break; case MMCBR_IVAR_CAPS: *(int *)result = sc->aw_host.caps; break; case MMCBR_IVAR_TIMING: *(int *)result = sc->aw_host.ios.timing; break; case MMCBR_IVAR_MAX_DATA: *(int *)result = (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE; break; case MMCBR_IVAR_RETUNE_REQ: *(int *)result = retune_req_none; break; } return (0); } static int aw_mmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct aw_mmc_softc *sc; sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: sc->aw_host.ios.bus_mode = value; break; case MMCBR_IVAR_BUS_WIDTH: sc->aw_host.ios.bus_width = value; break; case MMCBR_IVAR_CHIP_SELECT: sc->aw_host.ios.chip_select = value; break; case MMCBR_IVAR_CLOCK: sc->aw_host.ios.clock = value; break; case MMCBR_IVAR_MODE: sc->aw_host.mode = value; break; case MMCBR_IVAR_OCR: sc->aw_host.ocr = value; break; case MMCBR_IVAR_POWER_MODE: sc->aw_host.ios.power_mode = value; break; case MMCBR_IVAR_VDD: sc->aw_host.ios.vdd = value; break; case MMCBR_IVAR_VCCQ: sc->aw_host.ios.vccq = value; break; case MMCBR_IVAR_TIMING: sc->aw_host.ios.timing = value; break; /* These are read-only */ case MMCBR_IVAR_CAPS: case MMCBR_IVAR_HOST_OCR: case MMCBR_IVAR_F_MIN: case MMCBR_IVAR_F_MAX: case MMCBR_IVAR_MAX_DATA: return (EINVAL); } return (0); } static int aw_mmc_update_clock(struct aw_mmc_softc *sc, uint32_t clkon) { uint32_t reg; int retry; reg = AW_MMC_READ_4(sc, AW_MMC_CKCR); reg &= ~(AW_MMC_CKCR_ENB | AW_MMC_CKCR_LOW_POWER | AW_MMC_CKCR_MASK_DATA0); if (clkon) reg |= AW_MMC_CKCR_ENB; if (sc->aw_mmc_conf->mask_data0) reg |= AW_MMC_CKCR_MASK_DATA0; AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg); reg = AW_MMC_CMDR_LOAD | AW_MMC_CMDR_PRG_CLK | AW_MMC_CMDR_WAIT_PRE_OVER; AW_MMC_WRITE_4(sc, AW_MMC_CMDR, reg); retry = 0xfffff; while (reg & AW_MMC_CMDR_LOAD && --retry > 0) { reg = AW_MMC_READ_4(sc, AW_MMC_CMDR); DELAY(10); } AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff); if (reg & AW_MMC_CMDR_LOAD) { device_printf(sc->aw_dev, "timeout updating clock\n"); return (ETIMEDOUT); } if (sc->aw_mmc_conf->mask_data0) { reg = AW_MMC_READ_4(sc, AW_MMC_CKCR); reg &= ~AW_MMC_CKCR_MASK_DATA0; AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg); } return (0); } #ifndef MMCCAM static int aw_mmc_switch_vccq(device_t bus, device_t child) { struct aw_mmc_softc *sc; int uvolt, err; sc = device_get_softc(bus); if (sc->mmc_helper.vqmmc_supply == NULL) return EOPNOTSUPP; switch (sc->aw_host.ios.vccq) { case vccq_180: uvolt = 1800000; break; case vccq_330: uvolt = 3300000; break; default: return EINVAL; } err = regulator_set_voltage(sc->mmc_helper.vqmmc_supply, uvolt, uvolt); if (err != 0) { device_printf(sc->aw_dev, "Cannot set vqmmc to %d<->%d\n", uvolt, uvolt); return (err); } return (0); } #endif static int aw_mmc_update_ios(device_t bus, device_t child) { int error; struct aw_mmc_softc *sc; struct mmc_ios *ios; unsigned int clock; uint32_t reg, div = 1; int reg_status; int rv; sc = device_get_softc(bus); ios = &sc->aw_host.ios; /* Set the bus width. */ switch (ios->bus_width) { case bus_width_1: AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR1); break; case bus_width_4: AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR4); break; case bus_width_8: AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR8); break; } switch (ios->power_mode) { case power_on: break; case power_off: if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD)) device_printf(sc->aw_dev, "Powering down sd/mmc\n"); if (sc->mmc_helper.vmmc_supply) { rv = regulator_status(sc->mmc_helper.vmmc_supply, ®_status); if (rv == 0 && reg_status == REGULATOR_STATUS_ENABLED) regulator_disable(sc->mmc_helper.vmmc_supply); } if (sc->mmc_helper.vqmmc_supply) { rv = regulator_status(sc->mmc_helper.vqmmc_supply, ®_status); if (rv == 0 && reg_status == REGULATOR_STATUS_ENABLED) regulator_disable(sc->mmc_helper.vqmmc_supply); } if (sc->mmc_helper.mmc_pwrseq) MMC_PWRSEQ_SET_POWER(sc->mmc_helper.mmc_pwrseq, false); aw_mmc_reset(sc); break; case power_up: if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD)) device_printf(sc->aw_dev, "Powering up sd/mmc\n"); if (sc->mmc_helper.vmmc_supply) { rv = regulator_status(sc->mmc_helper.vmmc_supply, ®_status); if (rv == 0 && reg_status != REGULATOR_STATUS_ENABLED) regulator_enable(sc->mmc_helper.vmmc_supply); } if (sc->mmc_helper.vqmmc_supply) { rv = regulator_status(sc->mmc_helper.vqmmc_supply, ®_status); if (rv == 0 && reg_status != REGULATOR_STATUS_ENABLED) regulator_enable(sc->mmc_helper.vqmmc_supply); } if (sc->mmc_helper.mmc_pwrseq) MMC_PWRSEQ_SET_POWER(sc->mmc_helper.mmc_pwrseq, true); aw_mmc_init(sc); break; }; /* Enable ddr mode if needed */ reg = AW_MMC_READ_4(sc, AW_MMC_GCTL); if (ios->timing == bus_timing_uhs_ddr50 || ios->timing == bus_timing_mmc_ddr52) reg |= AW_MMC_GCTL_DDR_MOD_SEL; else reg &= ~AW_MMC_GCTL_DDR_MOD_SEL; AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg); if (ios->clock && ios->clock != sc->aw_clock) { sc->aw_clock = clock = ios->clock; /* Disable clock */ error = aw_mmc_update_clock(sc, 0); if (error != 0) return (error); if (ios->timing == bus_timing_mmc_ddr52 && (sc->aw_mmc_conf->new_timing || ios->bus_width == bus_width_8)) { div = 2; clock <<= 1; } /* Reset the divider. */ reg = AW_MMC_READ_4(sc, AW_MMC_CKCR); reg &= ~AW_MMC_CKCR_DIV; reg |= div - 1; AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg); /* New timing mode if needed */ if (sc->aw_mmc_conf->new_timing) { reg = AW_MMC_READ_4(sc, AW_MMC_NTSR); reg |= AW_MMC_NTSR_MODE_SELECT; AW_MMC_WRITE_4(sc, AW_MMC_NTSR, reg); } /* Set the MMC clock. */ error = clk_disable(sc->aw_clk_mmc); if (error != 0 && bootverbose) device_printf(sc->aw_dev, "failed to disable mmc clock: %d\n", error); error = clk_set_freq(sc->aw_clk_mmc, clock, CLK_SET_ROUND_DOWN); if (error != 0) { device_printf(sc->aw_dev, "failed to set frequency to %u Hz: %d\n", clock, error); return (error); } error = clk_enable(sc->aw_clk_mmc); if (error != 0 && bootverbose) device_printf(sc->aw_dev, "failed to re-enable mmc clock: %d\n", error); if (sc->aw_mmc_conf->can_calibrate) AW_MMC_WRITE_4(sc, AW_MMC_SAMP_DL, AW_MMC_SAMP_DL_SW_EN); /* Enable clock. */ error = aw_mmc_update_clock(sc, 1); if (error != 0) return (error); } return (0); } #ifndef MMCCAM static int aw_mmc_get_ro(device_t bus, device_t child) { struct aw_mmc_softc *sc; sc = device_get_softc(bus); return (mmc_fdt_gpio_get_readonly(&sc->mmc_helper)); } static int aw_mmc_acquire_host(device_t bus, device_t child) { struct aw_mmc_softc *sc; int error; sc = device_get_softc(bus); AW_MMC_LOCK(sc); while (sc->aw_bus_busy) { error = msleep(sc, &sc->aw_mtx, PCATCH, "mmchw", 0); if (error != 0) { AW_MMC_UNLOCK(sc); return (error); } } sc->aw_bus_busy++; AW_MMC_UNLOCK(sc); return (0); } static int aw_mmc_release_host(device_t bus, device_t child) { struct aw_mmc_softc *sc; sc = device_get_softc(bus); AW_MMC_LOCK(sc); sc->aw_bus_busy--; wakeup(sc); AW_MMC_UNLOCK(sc); return (0); } #endif static device_method_t aw_mmc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aw_mmc_probe), DEVMETHOD(device_attach, aw_mmc_attach), DEVMETHOD(device_detach, aw_mmc_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, aw_mmc_read_ivar), DEVMETHOD(bus_write_ivar, aw_mmc_write_ivar), DEVMETHOD(bus_add_child, bus_generic_add_child), #ifndef MMCCAM /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, aw_mmc_update_ios), DEVMETHOD(mmcbr_request, aw_mmc_request), DEVMETHOD(mmcbr_get_ro, aw_mmc_get_ro), DEVMETHOD(mmcbr_switch_vccq, aw_mmc_switch_vccq), DEVMETHOD(mmcbr_acquire_host, aw_mmc_acquire_host), DEVMETHOD(mmcbr_release_host, aw_mmc_release_host), #endif #ifdef MMCCAM /* MMCCAM interface */ DEVMETHOD(mmc_sim_get_tran_settings, aw_mmc_get_tran_settings), DEVMETHOD(mmc_sim_set_tran_settings, aw_mmc_set_tran_settings), DEVMETHOD(mmc_sim_cam_request, aw_mmc_cam_request), DEVMETHOD(mmc_sim_cam_poll, aw_mmc_cam_poll), #endif DEVMETHOD_END }; static driver_t aw_mmc_driver = { "aw_mmc", aw_mmc_methods, sizeof(struct aw_mmc_softc), }; DRIVER_MODULE(aw_mmc, simplebus, aw_mmc_driver, NULL, NULL); #ifndef MMCCAM MMC_DECLARE_BRIDGE(aw_mmc); #endif SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/arm/allwinner/aw_rsb.c b/sys/arm/allwinner/aw_rsb.c index 40627ebf8448..08522caff725 100644 --- a/sys/arm/allwinner/aw_rsb.c +++ b/sys/arm/allwinner/aw_rsb.c @@ -1,505 +1,505 @@ /*- * Copyright (c) 2016 Jared McNeill * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Allwinner RSB (Reduced Serial Bus) and P2WI (Push-Pull Two Wire Interface) */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include "iicbus_if.h" #define RSB_CTRL 0x00 #define START_TRANS (1 << 7) #define GLOBAL_INT_ENB (1 << 1) #define SOFT_RESET (1 << 0) #define RSB_CCR 0x04 #define RSB_INTE 0x08 #define RSB_INTS 0x0c #define INT_TRANS_ERR_ID(x) (((x) >> 8) & 0xf) #define INT_LOAD_BSY (1 << 2) #define INT_TRANS_ERR (1 << 1) #define INT_TRANS_OVER (1 << 0) #define INT_MASK (INT_LOAD_BSY|INT_TRANS_ERR|INT_TRANS_OVER) #define RSB_DADDR0 0x10 #define RSB_DADDR1 0x14 #define RSB_DLEN 0x18 #define DLEN_READ (1 << 4) #define RSB_DATA0 0x1c #define RSB_DATA1 0x20 #define RSB_PMCR 0x28 #define RSB_PMCR_START (1 << 31) #define RSB_PMCR_DATA(x) (x << 16) #define RSB_PMCR_REG(x) (x << 8) #define RSB_CMD 0x2c #define CMD_SRTA 0xe8 #define CMD_RD8 0x8b #define CMD_RD16 0x9c #define CMD_RD32 0xa6 #define CMD_WR8 0x4e #define CMD_WR16 0x59 #define CMD_WR32 0x63 #define RSB_DAR 0x30 #define DAR_RTA (0xff << 16) #define DAR_RTA_SHIFT 16 #define DAR_DA (0xffff << 0) #define DAR_DA_SHIFT 0 #define RSB_MAXLEN 8 #define RSB_RESET_RETRY 100 #define RSB_I2C_TIMEOUT hz #define RSB_ADDR_PMIC_PRIMARY 0x3a3 #define RSB_ADDR_PMIC_SECONDARY 0x745 #define RSB_ADDR_PERIPH_IC 0xe89 #define PMIC_MODE_REG 0x3e #define PMIC_MODE_I2C 0x00 #define PMIC_MODE_RSB 0x7c #define A31_P2WI 1 #define A23_RSB 2 static struct ofw_compat_data compat_data[] = { { "allwinner,sun6i-a31-p2wi", A31_P2WI }, { "allwinner,sun8i-a23-rsb", A23_RSB }, { NULL, 0 } }; static struct resource_spec rsb_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; /* * Device address to Run-time address mappings. * * Run-time address (RTA) is an 8-bit value used to address the device during * a read or write transaction. The following are valid RTAs: * 0x17 0x2d 0x3a 0x4e 0x59 0x63 0x74 0x8b 0x9c 0xa6 0xb1 0xc5 0xd2 0xe8 0xff * * Allwinner uses RTA 0x2d for the primary PMIC, 0x3a for the secondary PMIC, * and 0x4e for the peripheral IC (where applicable). */ static const struct { uint16_t addr; uint8_t rta; } rsb_rtamap[] = { { .addr = RSB_ADDR_PMIC_PRIMARY, .rta = 0x2d }, { .addr = RSB_ADDR_PMIC_SECONDARY, .rta = 0x3a }, { .addr = RSB_ADDR_PERIPH_IC, .rta = 0x4e }, { .addr = 0, .rta = 0 } }; struct rsb_softc { struct resource *res; struct mtx mtx; clk_t clk; hwreset_t rst; device_t iicbus; int busy; uint32_t status; uint16_t cur_addr; int type; struct iic_msg *msg; }; #define RSB_LOCK(sc) mtx_lock(&(sc)->mtx) #define RSB_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define RSB_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) #define RSB_READ(sc, reg) bus_read_4((sc)->res, (reg)) #define RSB_WRITE(sc, reg, val) bus_write_4((sc)->res, (reg), (val)) static phandle_t rsb_get_node(device_t bus, device_t dev) { return (ofw_bus_get_node(bus)); } static int rsb_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr) { struct rsb_softc *sc; int retry; sc = device_get_softc(dev); RSB_LOCK(sc); /* Write soft-reset bit and wait for it to self-clear. */ RSB_WRITE(sc, RSB_CTRL, SOFT_RESET); for (retry = RSB_RESET_RETRY; retry > 0; retry--) if ((RSB_READ(sc, RSB_CTRL) & SOFT_RESET) == 0) break; RSB_UNLOCK(sc); if (retry == 0) { device_printf(dev, "soft reset timeout\n"); return (ETIMEDOUT); } return (IIC_ENOADDR); } static uint32_t rsb_encode(const uint8_t *buf, u_int len, u_int off) { uint32_t val; u_int n; val = 0; for (n = off; n < MIN(len, 4 + off); n++) val |= ((uint32_t)buf[n] << ((n - off) * NBBY)); return val; } static void rsb_decode(const uint32_t val, uint8_t *buf, u_int len, u_int off) { u_int n; for (n = off; n < MIN(len, 4 + off); n++) buf[n] = (val >> ((n - off) * NBBY)) & 0xff; } static int rsb_start(device_t dev) { struct rsb_softc *sc; int error, retry; sc = device_get_softc(dev); RSB_ASSERT_LOCKED(sc); /* Start the transfer */ RSB_WRITE(sc, RSB_CTRL, GLOBAL_INT_ENB | START_TRANS); /* Wait for transfer to complete */ error = ETIMEDOUT; for (retry = RSB_I2C_TIMEOUT; retry > 0; retry--) { sc->status |= RSB_READ(sc, RSB_INTS); if ((sc->status & INT_TRANS_OVER) != 0) { error = 0; break; } DELAY((1000 * hz) / RSB_I2C_TIMEOUT); } if (error == 0 && (sc->status & INT_TRANS_OVER) == 0) { device_printf(dev, "transfer error, status 0x%08x\n", sc->status); error = EIO; } return (error); } static int rsb_set_rta(device_t dev, uint16_t addr) { struct rsb_softc *sc; uint8_t rta; int i; sc = device_get_softc(dev); RSB_ASSERT_LOCKED(sc); /* Lookup run-time address for given device address */ for (rta = 0, i = 0; rsb_rtamap[i].rta != 0; i++) if (rsb_rtamap[i].addr == addr) { rta = rsb_rtamap[i].rta; break; } if (rta == 0) { device_printf(dev, "RTA not known for address %#x\n", addr); return (ENXIO); } /* Set run-time address */ RSB_WRITE(sc, RSB_INTS, RSB_READ(sc, RSB_INTS)); RSB_WRITE(sc, RSB_DAR, (addr << DAR_DA_SHIFT) | (rta << DAR_RTA_SHIFT)); RSB_WRITE(sc, RSB_CMD, CMD_SRTA); return (rsb_start(dev)); } static int rsb_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs) { struct rsb_softc *sc; uint32_t daddr[2], data[2], dlen; uint16_t device_addr; uint8_t cmd; int error; sc = device_get_softc(dev); /* * P2WI and RSB are not really I2C or SMBus controllers, so there are * some restrictions imposed by the driver. * * Transfers must contain exactly two messages. The first is always * a write, containing a single data byte offset. Data will either * be read from or written to the corresponding data byte in the * second message. The slave address in both messages must be the * same. */ if (nmsgs != 2 || (msgs[0].flags & IIC_M_RD) == IIC_M_RD || (msgs[0].slave >> 1) != (msgs[1].slave >> 1) || msgs[0].len != 1 || msgs[1].len > RSB_MAXLEN) return (EINVAL); /* The RSB controller can read or write 1, 2, or 4 bytes at a time. */ if (sc->type == A23_RSB) { if ((msgs[1].flags & IIC_M_RD) != 0) { switch (msgs[1].len) { case 1: cmd = CMD_RD8; break; case 2: cmd = CMD_RD16; break; case 4: cmd = CMD_RD32; break; default: return (EINVAL); } } else { switch (msgs[1].len) { case 1: cmd = CMD_WR8; break; case 2: cmd = CMD_WR16; break; case 4: cmd = CMD_WR32; break; default: return (EINVAL); } } } RSB_LOCK(sc); while (sc->busy) mtx_sleep(sc, &sc->mtx, 0, "i2cbuswait", 0); sc->busy = 1; sc->status = 0; /* Select current run-time address if necessary */ if (sc->type == A23_RSB) { device_addr = msgs[0].slave >> 1; if (sc->cur_addr != device_addr) { error = rsb_set_rta(dev, device_addr); if (error != 0) goto done; sc->cur_addr = device_addr; sc->status = 0; } } /* Clear interrupt status */ RSB_WRITE(sc, RSB_INTS, RSB_READ(sc, RSB_INTS)); /* Program data access address registers */ daddr[0] = rsb_encode(msgs[0].buf, msgs[0].len, 0); RSB_WRITE(sc, RSB_DADDR0, daddr[0]); /* Write data */ if ((msgs[1].flags & IIC_M_RD) == 0) { data[0] = rsb_encode(msgs[1].buf, msgs[1].len, 0); RSB_WRITE(sc, RSB_DATA0, data[0]); } /* Set command type for RSB */ if (sc->type == A23_RSB) RSB_WRITE(sc, RSB_CMD, cmd); /* Program data length register and transfer direction */ dlen = msgs[0].len - 1; if ((msgs[1].flags & IIC_M_RD) == IIC_M_RD) dlen |= DLEN_READ; RSB_WRITE(sc, RSB_DLEN, dlen); /* Start transfer */ error = rsb_start(dev); if (error != 0) goto done; /* Read data */ if ((msgs[1].flags & IIC_M_RD) == IIC_M_RD) { data[0] = RSB_READ(sc, RSB_DATA0); rsb_decode(data[0], msgs[1].buf, msgs[1].len, 0); } done: sc->msg = NULL; sc->busy = 0; wakeup(sc); RSB_UNLOCK(sc); return (error); } static int rsb_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); switch (ofw_bus_search_compatible(dev, compat_data)->ocd_data) { case A23_RSB: device_set_desc(dev, "Allwinner RSB"); break; case A31_P2WI: device_set_desc(dev, "Allwinner P2WI"); break; default: return (ENXIO); } return (BUS_PROBE_DEFAULT); } static int rsb_attach(device_t dev) { struct rsb_softc *sc; int error; sc = device_get_softc(dev); mtx_init(&sc->mtx, device_get_nameunit(dev), "rsb", MTX_DEF); sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; if (clk_get_by_ofw_index(dev, 0, 0, &sc->clk) == 0) { error = clk_enable(sc->clk); if (error != 0) { device_printf(dev, "cannot enable clock\n"); goto fail; } } if (hwreset_get_by_ofw_idx(dev, 0, 0, &sc->rst) == 0) { error = hwreset_deassert(sc->rst); if (error != 0) { device_printf(dev, "cannot de-assert reset\n"); goto fail; } } if (bus_alloc_resources(dev, rsb_spec, &sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); error = ENXIO; goto fail; } /* Set the PMIC into RSB mode as ATF might have leave it in I2C mode */ RSB_WRITE(sc, RSB_PMCR, RSB_PMCR_REG(PMIC_MODE_REG) | RSB_PMCR_DATA(PMIC_MODE_RSB) | RSB_PMCR_START); sc->iicbus = device_add_child(dev, "iicbus", -1); if (sc->iicbus == NULL) { device_printf(dev, "cannot add iicbus child device\n"); error = ENXIO; goto fail; } bus_generic_attach(dev); return (0); fail: bus_release_resources(dev, rsb_spec, &sc->res); if (sc->rst != NULL) hwreset_release(sc->rst); if (sc->clk != NULL) clk_release(sc->clk); mtx_destroy(&sc->mtx); return (error); } static device_method_t rsb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rsb_probe), DEVMETHOD(device_attach, rsb_attach), /* Bus interface */ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), /* OFW methods */ DEVMETHOD(ofw_bus_get_node, rsb_get_node), /* iicbus interface */ DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_reset, rsb_reset), DEVMETHOD(iicbus_transfer, rsb_transfer), DEVMETHOD_END }; static driver_t rsb_driver = { "iichb", rsb_methods, sizeof(struct rsb_softc), }; EARLY_DRIVER_MODULE(iicbus, rsb, iicbus_driver, 0, 0, BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_MIDDLE); EARLY_DRIVER_MODULE(rsb, simplebus, rsb_driver, 0, 0, BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(rsb, 1); MODULE_DEPEND(rsb, iicbus, 1, 1, 1); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/arm/allwinner/aw_rtc.c b/sys/arm/allwinner/aw_rtc.c index 92bb73efc1f5..9938601f17ce 100644 --- a/sys/arm/allwinner/aw_rtc.c +++ b/sys/arm/allwinner/aw_rtc.c @@ -1,363 +1,363 @@ /*- * Copyright (c) 2019 Emmanuel Vadot * Copyright (c) 2016 Vladimir Belian * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include "clock_if.h" #define LOSC_CTRL_REG 0x00 #define A10_RTC_DATE_REG 0x04 #define A10_RTC_TIME_REG 0x08 #define A31_LOSC_AUTO_SWT_STA 0x04 #define A31_RTC_DATE_REG 0x10 #define A31_RTC_TIME_REG 0x14 #define TIME_MASK 0x001f3f3f #define LOSC_OSC_SRC (1 << 0) #define LOSC_GSM (1 << 3) #define LOSC_AUTO_SW_EN (1 << 14) #define LOSC_MAGIC 0x16aa0000 #define LOSC_BUSY_MASK 0x00000380 #define IS_SUN7I (sc->conf->is_a20 == true) #define YEAR_MIN (IS_SUN7I ? 1970 : 2010) #define YEAR_MAX (IS_SUN7I ? 2100 : 2073) #define YEAR_OFFSET (IS_SUN7I ? 1900 : 2010) #define YEAR_MASK (IS_SUN7I ? 0xff : 0x3f) #define LEAP_BIT (IS_SUN7I ? 24 : 22) #define GET_SEC_VALUE(x) ((x) & 0x0000003f) #define GET_MIN_VALUE(x) (((x) & 0x00003f00) >> 8) #define GET_HOUR_VALUE(x) (((x) & 0x001f0000) >> 16) #define GET_DAY_VALUE(x) ((x) & 0x0000001f) #define GET_MON_VALUE(x) (((x) & 0x00000f00) >> 8) #define GET_YEAR_VALUE(x) (((x) >> 16) & YEAR_MASK) #define SET_DAY_VALUE(x) GET_DAY_VALUE(x) #define SET_MON_VALUE(x) (((x) & 0x0000000f) << 8) #define SET_YEAR_VALUE(x) (((x) & YEAR_MASK) << 16) #define SET_LEAP_VALUE(x) (((x) & 0x00000001) << LEAP_BIT) #define SET_SEC_VALUE(x) GET_SEC_VALUE(x) #define SET_MIN_VALUE(x) (((x) & 0x0000003f) << 8) #define SET_HOUR_VALUE(x) (((x) & 0x0000001f) << 16) #define HALF_OF_SEC_NS 500000000 #define RTC_RES_US 1000000 #define RTC_TIMEOUT 70 #define RTC_READ(sc, reg) bus_read_4((sc)->res, (reg)) #define RTC_WRITE(sc, reg, val) bus_write_4((sc)->res, (reg), (val)) #define IS_LEAP_YEAR(y) (((y) % 400) == 0 || (((y) % 100) != 0 && ((y) % 4) == 0)) struct aw_rtc_conf { uint64_t iosc_freq; bus_size_t rtc_date; bus_size_t rtc_time; bus_size_t rtc_losc_sta; bool is_a20; }; struct aw_rtc_conf a10_conf = { .rtc_date = A10_RTC_DATE_REG, .rtc_time = A10_RTC_TIME_REG, .rtc_losc_sta = LOSC_CTRL_REG, }; struct aw_rtc_conf a20_conf = { .rtc_date = A10_RTC_DATE_REG, .rtc_time = A10_RTC_TIME_REG, .rtc_losc_sta = LOSC_CTRL_REG, .is_a20 = true, }; struct aw_rtc_conf a31_conf = { .iosc_freq = 650000, /* between 600 and 700 Khz */ .rtc_date = A31_RTC_DATE_REG, .rtc_time = A31_RTC_TIME_REG, .rtc_losc_sta = A31_LOSC_AUTO_SWT_STA, }; struct aw_rtc_conf h3_conf = { .iosc_freq = 16000000, .rtc_date = A31_RTC_DATE_REG, .rtc_time = A31_RTC_TIME_REG, .rtc_losc_sta = A31_LOSC_AUTO_SWT_STA, }; static struct ofw_compat_data compat_data[] = { { "allwinner,sun4i-a10-rtc", (uintptr_t) &a10_conf }, { "allwinner,sun7i-a20-rtc", (uintptr_t) &a20_conf }, { "allwinner,sun6i-a31-rtc", (uintptr_t) &a31_conf }, { "allwinner,sun8i-h3-rtc", (uintptr_t) &h3_conf }, { "allwinner,sun50i-h5-rtc", (uintptr_t) &h3_conf }, { "allwinner,sun50i-h6-rtc", (uintptr_t) &h3_conf }, { NULL, 0 } }; struct aw_rtc_softc { struct resource *res; struct aw_rtc_conf *conf; int type; }; static struct clk_fixed_def aw_rtc_osc32k = { .clkdef.id = 0, .freq = 32768, }; static struct clk_fixed_def aw_rtc_iosc = { .clkdef.id = 2, }; static void aw_rtc_install_clocks(struct aw_rtc_softc *sc, device_t dev); static int aw_rtc_probe(device_t dev); static int aw_rtc_attach(device_t dev); static int aw_rtc_detach(device_t dev); static int aw_rtc_gettime(device_t dev, struct timespec *ts); static int aw_rtc_settime(device_t dev, struct timespec *ts); static device_method_t aw_rtc_methods[] = { DEVMETHOD(device_probe, aw_rtc_probe), DEVMETHOD(device_attach, aw_rtc_attach), DEVMETHOD(device_detach, aw_rtc_detach), DEVMETHOD(clock_gettime, aw_rtc_gettime), DEVMETHOD(clock_settime, aw_rtc_settime), DEVMETHOD_END }; static driver_t aw_rtc_driver = { "rtc", aw_rtc_methods, sizeof(struct aw_rtc_softc), }; EARLY_DRIVER_MODULE(aw_rtc, simplebus, aw_rtc_driver, 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_FIRST); MODULE_VERSION(aw_rtc, 1); SIMPLEBUS_PNP_INFO(compat_data); static int aw_rtc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Allwinner RTC"); return (BUS_PROBE_DEFAULT); } static int aw_rtc_attach(device_t dev) { struct aw_rtc_softc *sc = device_get_softc(dev); uint32_t val; int rid = 0; sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->res) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->conf = (struct aw_rtc_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; val = RTC_READ(sc, LOSC_CTRL_REG); val |= LOSC_AUTO_SW_EN; val |= LOSC_MAGIC | LOSC_GSM | LOSC_OSC_SRC; RTC_WRITE(sc, LOSC_CTRL_REG, val); DELAY(100); if (bootverbose) { val = RTC_READ(sc, sc->conf->rtc_losc_sta); if ((val & LOSC_OSC_SRC) == 0) device_printf(dev, "Using internal oscillator\n"); else device_printf(dev, "Using external oscillator\n"); } aw_rtc_install_clocks(sc, dev); clock_register(dev, RTC_RES_US); return (0); } static int aw_rtc_detach(device_t dev) { /* can't support detach, since there's no clock_unregister function */ return (EBUSY); } static void aw_rtc_install_clocks(struct aw_rtc_softc *sc, device_t dev) { struct clkdom *clkdom; const char **clknames; phandle_t node; int nclocks; node = ofw_bus_get_node(dev); nclocks = ofw_bus_string_list_to_array(node, "clock-output-names", &clknames); /* No clocks to export */ if (nclocks <= 0) return; if (nclocks != 3) { device_printf(dev, "Having only %d clocks instead of 3, aborting\n", nclocks); return; } clkdom = clkdom_create(dev); aw_rtc_osc32k.clkdef.name = clknames[0]; if (clknode_fixed_register(clkdom, &aw_rtc_osc32k) != 0) device_printf(dev, "Cannot register osc32k clock\n"); aw_rtc_iosc.clkdef.name = clknames[2]; aw_rtc_iosc.freq = sc->conf->iosc_freq; if (clknode_fixed_register(clkdom, &aw_rtc_iosc) != 0) device_printf(dev, "Cannot register iosc clock\n"); clkdom_finit(clkdom); if (bootverbose) clkdom_dump(clkdom); } static int aw_rtc_gettime(device_t dev, struct timespec *ts) { struct aw_rtc_softc *sc = device_get_softc(dev); struct clocktime ct; uint32_t rdate, rtime; rdate = RTC_READ(sc, sc->conf->rtc_date); rtime = RTC_READ(sc, sc->conf->rtc_time); if ((rtime & TIME_MASK) == 0) rdate = RTC_READ(sc, sc->conf->rtc_date); ct.sec = GET_SEC_VALUE(rtime); ct.min = GET_MIN_VALUE(rtime); ct.hour = GET_HOUR_VALUE(rtime); ct.day = GET_DAY_VALUE(rdate); ct.mon = GET_MON_VALUE(rdate); ct.year = GET_YEAR_VALUE(rdate) + YEAR_OFFSET; ct.dow = -1; /* RTC resolution is 1 sec */ ct.nsec = 0; return (clock_ct_to_ts(&ct, ts)); } static int aw_rtc_settime(device_t dev, struct timespec *ts) { struct aw_rtc_softc *sc = device_get_softc(dev); struct clocktime ct; uint32_t clk, rdate, rtime; /* RTC resolution is 1 sec */ if (ts->tv_nsec >= HALF_OF_SEC_NS) ts->tv_sec++; ts->tv_nsec = 0; clock_ts_to_ct(ts, &ct); if ((ct.year < YEAR_MIN) || (ct.year > YEAR_MAX)) { device_printf(dev, "could not set time, year out of range\n"); return (EINVAL); } for (clk = 0; RTC_READ(sc, LOSC_CTRL_REG) & LOSC_BUSY_MASK; clk++) { if (clk > RTC_TIMEOUT) { device_printf(dev, "could not set time, RTC busy\n"); return (EINVAL); } DELAY(1); } /* reset time register to avoid unexpected date increment */ RTC_WRITE(sc, sc->conf->rtc_time, 0); rdate = SET_DAY_VALUE(ct.day) | SET_MON_VALUE(ct.mon) | SET_YEAR_VALUE(ct.year - YEAR_OFFSET) | SET_LEAP_VALUE(IS_LEAP_YEAR(ct.year)); rtime = SET_SEC_VALUE(ct.sec) | SET_MIN_VALUE(ct.min) | SET_HOUR_VALUE(ct.hour); for (clk = 0; RTC_READ(sc, LOSC_CTRL_REG) & LOSC_BUSY_MASK; clk++) { if (clk > RTC_TIMEOUT) { device_printf(dev, "could not set date, RTC busy\n"); return (EINVAL); } DELAY(1); } RTC_WRITE(sc, sc->conf->rtc_date, rdate); for (clk = 0; RTC_READ(sc, LOSC_CTRL_REG) & LOSC_BUSY_MASK; clk++) { if (clk > RTC_TIMEOUT) { device_printf(dev, "could not set time, RTC busy\n"); return (EINVAL); } DELAY(1); } RTC_WRITE(sc, sc->conf->rtc_time, rtime); DELAY(RTC_TIMEOUT); return (0); } diff --git a/sys/arm/allwinner/aw_thermal.c b/sys/arm/allwinner/aw_thermal.c index 99e302571220..4f1e02612347 100644 --- a/sys/arm/allwinner/aw_thermal.c +++ b/sys/arm/allwinner/aw_thermal.c @@ -1,724 +1,724 @@ /*- * Copyright (c) 2016 Jared McNeill * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Allwinner thermal sensor controller */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "cpufreq_if.h" #include "nvmem_if.h" #define THS_CTRL0 0x00 #define THS_CTRL1 0x04 #define ADC_CALI_EN (1 << 17) #define THS_CTRL2 0x40 #define SENSOR_ACQ1_SHIFT 16 #define SENSOR2_EN (1 << 2) #define SENSOR1_EN (1 << 1) #define SENSOR0_EN (1 << 0) #define THS_INTC 0x44 #define THS_THERMAL_PER_SHIFT 12 #define THS_INTS 0x48 #define THS2_DATA_IRQ_STS (1 << 10) #define THS1_DATA_IRQ_STS (1 << 9) #define THS0_DATA_IRQ_STS (1 << 8) #define SHUT_INT2_STS (1 << 6) #define SHUT_INT1_STS (1 << 5) #define SHUT_INT0_STS (1 << 4) #define ALARM_INT2_STS (1 << 2) #define ALARM_INT1_STS (1 << 1) #define ALARM_INT0_STS (1 << 0) #define THS_ALARM0_CTRL 0x50 #define ALARM_T_HOT_MASK 0xfff #define ALARM_T_HOT_SHIFT 16 #define ALARM_T_HYST_MASK 0xfff #define ALARM_T_HYST_SHIFT 0 #define THS_SHUTDOWN0_CTRL 0x60 #define SHUT_T_HOT_MASK 0xfff #define SHUT_T_HOT_SHIFT 16 #define THS_FILTER 0x70 #define THS_CALIB0 0x74 #define THS_CALIB1 0x78 #define THS_DATA0 0x80 #define THS_DATA1 0x84 #define THS_DATA2 0x88 #define DATA_MASK 0xfff #define A83T_CLK_RATE 24000000 #define A83T_ADC_ACQUIRE_TIME 23 /* 24Mhz/(23 + 1) = 1us */ #define A83T_THERMAL_PER 1 /* 4096 * (1 + 1) / 24Mhz = 341 us */ #define A83T_FILTER 0x5 /* Filter enabled, avg of 4 */ #define A83T_TEMP_BASE 2719000 #define A83T_TEMP_MUL 1000 #define A83T_TEMP_DIV 14186 #define A64_CLK_RATE 4000000 #define A64_ADC_ACQUIRE_TIME 400 /* 4Mhz/(400 + 1) = 100 us */ #define A64_THERMAL_PER 24 /* 4096 * (24 + 1) / 4Mhz = 25.6 ms */ #define A64_FILTER 0x6 /* Filter enabled, avg of 8 */ #define A64_TEMP_BASE 2170000 #define A64_TEMP_MUL 1000 #define A64_TEMP_DIV 8560 #define H3_CLK_RATE 4000000 #define H3_ADC_ACQUIRE_TIME 0x3f #define H3_THERMAL_PER 401 #define H3_FILTER 0x6 /* Filter enabled, avg of 8 */ #define H3_TEMP_BASE 217 #define H3_TEMP_MUL 1000 #define H3_TEMP_DIV 8253 #define H3_TEMP_MINUS 1794000 #define H3_INIT_ALARM 90 /* degC */ #define H3_INIT_SHUT 105 /* degC */ #define H5_CLK_RATE 24000000 #define H5_ADC_ACQUIRE_TIME 479 /* 24Mhz/479 = 20us */ #define H5_THERMAL_PER 58 /* 4096 * (58 + 1) / 24Mhz = 10ms */ #define H5_FILTER 0x6 /* Filter enabled, avg of 8 */ #define H5_TEMP_BASE 233832448 #define H5_TEMP_MUL 124885 #define H5_TEMP_DIV 20 #define H5_TEMP_BASE_CPU 271581184 #define H5_TEMP_MUL_CPU 152253 #define H5_TEMP_BASE_GPU 289406976 #define H5_TEMP_MUL_GPU 166724 #define H5_INIT_CPU_ALARM 80 /* degC */ #define H5_INIT_CPU_SHUT 96 /* degC */ #define H5_INIT_GPU_ALARM 84 /* degC */ #define H5_INIT_GPU_SHUT 100 /* degC */ #define TEMP_C_TO_K 273 #define SENSOR_ENABLE_ALL (SENSOR0_EN|SENSOR1_EN|SENSOR2_EN) #define SHUT_INT_ALL (SHUT_INT0_STS|SHUT_INT1_STS|SHUT_INT2_STS) #define ALARM_INT_ALL (ALARM_INT0_STS) #define MAX_SENSORS 3 #define MAX_CF_LEVELS 64 #define THROTTLE_ENABLE_DEFAULT 1 /* Enable thermal throttling */ static int aw_thermal_throttle_enable = THROTTLE_ENABLE_DEFAULT; TUNABLE_INT("hw.aw_thermal.throttle_enable", &aw_thermal_throttle_enable); struct aw_thermal_sensor { const char *name; const char *desc; int init_alarm; int init_shut; }; struct aw_thermal_config { struct aw_thermal_sensor sensors[MAX_SENSORS]; int nsensors; uint64_t clk_rate; uint32_t adc_acquire_time; int adc_cali_en; uint32_t filter; uint32_t thermal_per; int (*to_temp)(uint32_t, int); uint32_t (*to_reg)(int, int); int temp_base; int temp_mul; int temp_div; int calib0, calib1; uint32_t calib0_mask, calib1_mask; }; static int a83t_to_temp(uint32_t val, int sensor) { return ((A83T_TEMP_BASE - (val * A83T_TEMP_MUL)) / A83T_TEMP_DIV); } static const struct aw_thermal_config a83t_config = { .nsensors = 3, .sensors = { [0] = { .name = "cluster0", .desc = "CPU cluster 0 temperature", }, [1] = { .name = "cluster1", .desc = "CPU cluster 1 temperature", }, [2] = { .name = "gpu", .desc = "GPU temperature", }, }, .clk_rate = A83T_CLK_RATE, .adc_acquire_time = A83T_ADC_ACQUIRE_TIME, .adc_cali_en = 1, .filter = A83T_FILTER, .thermal_per = A83T_THERMAL_PER, .to_temp = a83t_to_temp, .calib0_mask = 0xffffffff, .calib1_mask = 0xffff, }; static int a64_to_temp(uint32_t val, int sensor) { return ((A64_TEMP_BASE - (val * A64_TEMP_MUL)) / A64_TEMP_DIV); } static const struct aw_thermal_config a64_config = { .nsensors = 3, .sensors = { [0] = { .name = "cpu", .desc = "CPU temperature", }, [1] = { .name = "gpu1", .desc = "GPU temperature 1", }, [2] = { .name = "gpu2", .desc = "GPU temperature 2", }, }, .clk_rate = A64_CLK_RATE, .adc_acquire_time = A64_ADC_ACQUIRE_TIME, .adc_cali_en = 1, .filter = A64_FILTER, .thermal_per = A64_THERMAL_PER, .to_temp = a64_to_temp, .calib0_mask = 0xffffffff, .calib1_mask = 0xffff, }; static int h3_to_temp(uint32_t val, int sensor) { return (H3_TEMP_BASE - ((val * H3_TEMP_MUL) / H3_TEMP_DIV)); } static uint32_t h3_to_reg(int val, int sensor) { return ((H3_TEMP_MINUS - (val * H3_TEMP_DIV)) / H3_TEMP_MUL); } static const struct aw_thermal_config h3_config = { .nsensors = 1, .sensors = { [0] = { .name = "cpu", .desc = "CPU temperature", .init_alarm = H3_INIT_ALARM, .init_shut = H3_INIT_SHUT, }, }, .clk_rate = H3_CLK_RATE, .adc_acquire_time = H3_ADC_ACQUIRE_TIME, .adc_cali_en = 1, .filter = H3_FILTER, .thermal_per = H3_THERMAL_PER, .to_temp = h3_to_temp, .to_reg = h3_to_reg, .calib0_mask = 0xffffffff, }; static int h5_to_temp(uint32_t val, int sensor) { int tmp; /* Temp is lower than 70 degrees */ if (val > 0x500) { tmp = H5_TEMP_BASE - (val * H5_TEMP_MUL); tmp >>= H5_TEMP_DIV; return (tmp); } if (sensor == 0) tmp = H5_TEMP_BASE_CPU - (val * H5_TEMP_MUL_CPU); else if (sensor == 1) tmp = H5_TEMP_BASE_GPU - (val * H5_TEMP_MUL_GPU); else { printf("Unknown sensor %d\n", sensor); return (val); } tmp >>= H5_TEMP_DIV; return (tmp); } static uint32_t h5_to_reg(int val, int sensor) { int tmp; if (val < 70) { tmp = H5_TEMP_BASE - (val << H5_TEMP_DIV); tmp /= H5_TEMP_MUL; } else { if (sensor == 0) { tmp = H5_TEMP_BASE_CPU - (val << H5_TEMP_DIV); tmp /= H5_TEMP_MUL_CPU; } else if (sensor == 1) { tmp = H5_TEMP_BASE_GPU - (val << H5_TEMP_DIV); tmp /= H5_TEMP_MUL_GPU; } else { printf("Unknown sensor %d\n", sensor); return (val); } } return ((uint32_t)tmp); } static const struct aw_thermal_config h5_config = { .nsensors = 2, .sensors = { [0] = { .name = "cpu", .desc = "CPU temperature", .init_alarm = H5_INIT_CPU_ALARM, .init_shut = H5_INIT_CPU_SHUT, }, [1] = { .name = "gpu", .desc = "GPU temperature", .init_alarm = H5_INIT_GPU_ALARM, .init_shut = H5_INIT_GPU_SHUT, }, }, .clk_rate = H5_CLK_RATE, .adc_acquire_time = H5_ADC_ACQUIRE_TIME, .filter = H5_FILTER, .thermal_per = H5_THERMAL_PER, .to_temp = h5_to_temp, .to_reg = h5_to_reg, .calib0_mask = 0xffffffff, }; static struct ofw_compat_data compat_data[] = { { "allwinner,sun8i-a83t-ths", (uintptr_t)&a83t_config }, { "allwinner,sun8i-h3-ths", (uintptr_t)&h3_config }, { "allwinner,sun50i-a64-ths", (uintptr_t)&a64_config }, { "allwinner,sun50i-h5-ths", (uintptr_t)&h5_config }, { NULL, (uintptr_t)NULL } }; #define THS_CONF(d) \ (void *)ofw_bus_search_compatible((d), compat_data)->ocd_data struct aw_thermal_softc { device_t dev; struct resource *res[2]; struct aw_thermal_config *conf; struct task cf_task; int throttle; int min_freq; struct cf_level levels[MAX_CF_LEVELS]; eventhandler_tag cf_pre_tag; clk_t clk_apb; clk_t clk_ths; }; static struct resource_spec aw_thermal_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; #define RD4(sc, reg) bus_read_4((sc)->res[0], (reg)) #define WR4(sc, reg, val) bus_write_4((sc)->res[0], (reg), (val)) static int aw_thermal_init(struct aw_thermal_softc *sc) { phandle_t node; uint32_t calib[2]; int error; node = ofw_bus_get_node(sc->dev); if (nvmem_get_cell_len(node, "calibration") > sizeof(calib)) { device_printf(sc->dev, "calibration nvmem cell is too large\n"); return (ENXIO); } error = nvmem_read_cell_by_name(node, "calibration", (void *)&calib, nvmem_get_cell_len(node, "calibration")); /* Read calibration settings from EFUSE */ if (error != 0) { device_printf(sc->dev, "Cannot read THS efuse\n"); return (error); } calib[0] &= sc->conf->calib0_mask; calib[1] &= sc->conf->calib1_mask; /* Write calibration settings to thermal controller */ if (calib[0] != 0) WR4(sc, THS_CALIB0, calib[0]); if (calib[1] != 0) WR4(sc, THS_CALIB1, calib[1]); /* Configure ADC acquire time (CLK_IN/(N+1)) and enable sensors */ WR4(sc, THS_CTRL1, ADC_CALI_EN); WR4(sc, THS_CTRL0, sc->conf->adc_acquire_time); WR4(sc, THS_CTRL2, sc->conf->adc_acquire_time << SENSOR_ACQ1_SHIFT); /* Set thermal period */ WR4(sc, THS_INTC, sc->conf->thermal_per << THS_THERMAL_PER_SHIFT); /* Enable average filter */ WR4(sc, THS_FILTER, sc->conf->filter); /* Enable interrupts */ WR4(sc, THS_INTS, RD4(sc, THS_INTS)); WR4(sc, THS_INTC, RD4(sc, THS_INTC) | SHUT_INT_ALL | ALARM_INT_ALL); /* Enable sensors */ WR4(sc, THS_CTRL2, RD4(sc, THS_CTRL2) | SENSOR_ENABLE_ALL); return (0); } static int aw_thermal_gettemp(struct aw_thermal_softc *sc, int sensor) { uint32_t val; val = RD4(sc, THS_DATA0 + (sensor * 4)); return (sc->conf->to_temp(val, sensor)); } static int aw_thermal_getshut(struct aw_thermal_softc *sc, int sensor) { uint32_t val; val = RD4(sc, THS_SHUTDOWN0_CTRL + (sensor * 4)); val = (val >> SHUT_T_HOT_SHIFT) & SHUT_T_HOT_MASK; return (sc->conf->to_temp(val, sensor)); } static void aw_thermal_setshut(struct aw_thermal_softc *sc, int sensor, int temp) { uint32_t val; val = RD4(sc, THS_SHUTDOWN0_CTRL + (sensor * 4)); val &= ~(SHUT_T_HOT_MASK << SHUT_T_HOT_SHIFT); val |= (sc->conf->to_reg(temp, sensor) << SHUT_T_HOT_SHIFT); WR4(sc, THS_SHUTDOWN0_CTRL + (sensor * 4), val); } static int aw_thermal_gethyst(struct aw_thermal_softc *sc, int sensor) { uint32_t val; val = RD4(sc, THS_ALARM0_CTRL + (sensor * 4)); val = (val >> ALARM_T_HYST_SHIFT) & ALARM_T_HYST_MASK; return (sc->conf->to_temp(val, sensor)); } static int aw_thermal_getalarm(struct aw_thermal_softc *sc, int sensor) { uint32_t val; val = RD4(sc, THS_ALARM0_CTRL + (sensor * 4)); val = (val >> ALARM_T_HOT_SHIFT) & ALARM_T_HOT_MASK; return (sc->conf->to_temp(val, sensor)); } static void aw_thermal_setalarm(struct aw_thermal_softc *sc, int sensor, int temp) { uint32_t val; val = RD4(sc, THS_ALARM0_CTRL + (sensor * 4)); val &= ~(ALARM_T_HOT_MASK << ALARM_T_HOT_SHIFT); val |= (sc->conf->to_reg(temp, sensor) << ALARM_T_HOT_SHIFT); WR4(sc, THS_ALARM0_CTRL + (sensor * 4), val); } static int aw_thermal_sysctl(SYSCTL_HANDLER_ARGS) { struct aw_thermal_softc *sc; int sensor, val; sc = arg1; sensor = arg2; val = aw_thermal_gettemp(sc, sensor) + TEMP_C_TO_K; return sysctl_handle_opaque(oidp, &val, sizeof(val), req); } static void aw_thermal_throttle(struct aw_thermal_softc *sc, int enable) { device_t cf_dev; int count, error; if (enable == sc->throttle) return; if (enable != 0) { /* Set the lowest available frequency */ cf_dev = devclass_get_device(devclass_find("cpufreq"), 0); if (cf_dev == NULL) return; count = MAX_CF_LEVELS; error = CPUFREQ_LEVELS(cf_dev, sc->levels, &count); if (error != 0 || count == 0) return; sc->min_freq = sc->levels[count - 1].total_set.freq; error = CPUFREQ_SET(cf_dev, &sc->levels[count - 1], CPUFREQ_PRIO_USER); if (error != 0) return; } sc->throttle = enable; } static void aw_thermal_cf_task(void *arg, int pending) { struct aw_thermal_softc *sc; sc = arg; aw_thermal_throttle(sc, 1); } static void aw_thermal_cf_pre_change(void *arg, const struct cf_level *level, int *status) { struct aw_thermal_softc *sc; int temp_cur, temp_alarm; sc = arg; if (aw_thermal_throttle_enable == 0 || sc->throttle == 0 || level->total_set.freq == sc->min_freq) return; temp_cur = aw_thermal_gettemp(sc, 0); temp_alarm = aw_thermal_getalarm(sc, 0); if (temp_cur < temp_alarm) aw_thermal_throttle(sc, 0); else *status = ENXIO; } static void aw_thermal_intr(void *arg) { struct aw_thermal_softc *sc; device_t dev; uint32_t ints; dev = arg; sc = device_get_softc(dev); ints = RD4(sc, THS_INTS); WR4(sc, THS_INTS, ints); if ((ints & SHUT_INT_ALL) != 0) { device_printf(dev, "WARNING - current temperature exceeds safe limits\n"); shutdown_nice(RB_POWEROFF); } if ((ints & ALARM_INT_ALL) != 0) taskqueue_enqueue(taskqueue_thread, &sc->cf_task); } static int aw_thermal_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (THS_CONF(dev) == NULL) return (ENXIO); device_set_desc(dev, "Allwinner Thermal Sensor Controller"); return (BUS_PROBE_DEFAULT); } static int aw_thermal_attach(device_t dev) { struct aw_thermal_softc *sc; hwreset_t rst; int i, error; void *ih; sc = device_get_softc(dev); sc->dev = dev; rst = NULL; ih = NULL; sc->conf = THS_CONF(dev); TASK_INIT(&sc->cf_task, 0, aw_thermal_cf_task, sc); if (bus_alloc_resources(dev, aw_thermal_spec, sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); return (ENXIO); } if (clk_get_by_ofw_name(dev, 0, "bus", &sc->clk_apb) == 0) { error = clk_enable(sc->clk_apb); if (error != 0) { device_printf(dev, "cannot enable apb clock\n"); goto fail; } } if (clk_get_by_ofw_name(dev, 0, "mod", &sc->clk_ths) == 0) { error = clk_set_freq(sc->clk_ths, sc->conf->clk_rate, 0); if (error != 0) { device_printf(dev, "cannot set ths clock rate\n"); goto fail; } error = clk_enable(sc->clk_ths); if (error != 0) { device_printf(dev, "cannot enable ths clock\n"); goto fail; } } if (hwreset_get_by_ofw_idx(dev, 0, 0, &rst) == 0) { error = hwreset_deassert(rst); if (error != 0) { device_printf(dev, "cannot de-assert reset\n"); goto fail; } } error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE, NULL, aw_thermal_intr, dev, &ih); if (error != 0) { device_printf(dev, "cannot setup interrupt handler\n"); goto fail; } for (i = 0; i < sc->conf->nsensors; i++) { if (sc->conf->sensors[i].init_alarm > 0) aw_thermal_setalarm(sc, i, sc->conf->sensors[i].init_alarm); if (sc->conf->sensors[i].init_shut > 0) aw_thermal_setshut(sc, i, sc->conf->sensors[i].init_shut); } if (aw_thermal_init(sc) != 0) goto fail; for (i = 0; i < sc->conf->nsensors; i++) SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, sc->conf->sensors[i].name, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, i, aw_thermal_sysctl, "IK0", sc->conf->sensors[i].desc); if (bootverbose) for (i = 0; i < sc->conf->nsensors; i++) { device_printf(dev, "%s: alarm %dC hyst %dC shut %dC\n", sc->conf->sensors[i].name, aw_thermal_getalarm(sc, i), aw_thermal_gethyst(sc, i), aw_thermal_getshut(sc, i)); } sc->cf_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, aw_thermal_cf_pre_change, sc, EVENTHANDLER_PRI_FIRST); return (0); fail: if (ih != NULL) bus_teardown_intr(dev, sc->res[1], ih); if (rst != NULL) hwreset_release(rst); if (sc->clk_apb != NULL) clk_release(sc->clk_apb); if (sc->clk_ths != NULL) clk_release(sc->clk_ths); bus_release_resources(dev, aw_thermal_spec, sc->res); return (ENXIO); } static device_method_t aw_thermal_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aw_thermal_probe), DEVMETHOD(device_attach, aw_thermal_attach), DEVMETHOD_END }; static driver_t aw_thermal_driver = { "aw_thermal", aw_thermal_methods, sizeof(struct aw_thermal_softc), }; DRIVER_MODULE(aw_thermal, simplebus, aw_thermal_driver, 0, 0); MODULE_VERSION(aw_thermal, 1); MODULE_DEPEND(aw_thermal, aw_sid, 1, 1, 1); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/arm/allwinner/aw_usb3phy.c b/sys/arm/allwinner/aw_usb3phy.c index e2bf9c068538..058fce6061a7 100644 --- a/sys/arm/allwinner/aw_usb3phy.c +++ b/sys/arm/allwinner/aw_usb3phy.c @@ -1,294 +1,294 @@ /* $NetBSD: sunxi_usb3phy.c,v 1.1 2018/05/01 23:59:42 jmcneill Exp $ */ /*- * Copyright (c) 2018 Jared McNeill * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Allwinner USB3PHY */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "phynode_if.h" #define USB3PHY_APP 0x00 #define APP_FORCE_VBUS (0x3 << 12) #define USB3PHY_PIPE_CLOCK_CONTROL 0x14 #define PCC_PIPE_CLK_OPEN (1 << 6) #define USB3PHY_PHY_TUNE_LOW 0x18 #define PTL_MAGIC 0x0047fc87 #define USB3PHY_PHY_TUNE_HIGH 0x1c #define PTH_TX_DEEMPH_3P5DB (0x1F << 19) #define PTH_TX_DEEMPH_6DB (0x3F << 13) #define PTH_TX_SWING_FULL (0x7F << 6) #define PTH_LOS_BIAS (0x7 << 3) #define PTH_TX_BOOST_LVL (0x7 << 0) #define USB3PHY_PHY_EXTERNAL_CONTROL 0x20 #define PEC_REF_SSP_EN (1 << 26) #define PEC_SSC_EN (1 << 24) #define PEC_EXTERN_VBUS (0x3 << 1) #define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask)) #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask)) static struct ofw_compat_data compat_data[] = { { "allwinner,sun50i-h6-usb3-phy", 1 }, { NULL, 0 } }; static struct resource_spec aw_usb3phy_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; struct awusb3phy_softc { struct resource * res; regulator_t reg; int mode; }; /* Phy class and methods. */ static int awusb3phy_phy_enable(struct phynode *phy, bool enable); static int awusb3phy_get_mode(struct phynode *phy, int *mode); static int awusb3phy_set_mode(struct phynode *phy, int mode); static phynode_usb_method_t awusb3phy_phynode_methods[] = { PHYNODEMETHOD(phynode_enable, awusb3phy_phy_enable), PHYNODEMETHOD(phynode_usb_get_mode, awusb3phy_get_mode), PHYNODEMETHOD(phynode_usb_set_mode, awusb3phy_set_mode), PHYNODEMETHOD_END }; DEFINE_CLASS_1(awusb3phy_phynode, awusb3phy_phynode_class, awusb3phy_phynode_methods, sizeof(struct phynode_usb_sc), phynode_usb_class); #define RD4(res, o) bus_read_4(res, (o)) #define WR4(res, o, v) bus_write_4(res, (o), (v)) static int awusb3phy_phy_enable(struct phynode *phynode, bool enable) { struct awusb3phy_softc *sc; device_t dev; uint32_t val; int error = 0; dev = phynode_get_device(phynode); sc = device_get_softc(dev); device_printf(dev, "%s: called\n", __func__); if (enable) { val = RD4(sc->res, USB3PHY_PHY_EXTERNAL_CONTROL); device_printf(dev, "EXTERNAL_CONTROL: %x\n", val); val |= PEC_EXTERN_VBUS; val |= PEC_SSC_EN; val |= PEC_REF_SSP_EN; device_printf(dev, "EXTERNAL_CONTROL: %x\n", val); WR4(sc->res, USB3PHY_PHY_EXTERNAL_CONTROL, val); val = RD4(sc->res, USB3PHY_PIPE_CLOCK_CONTROL); device_printf(dev, "PIPE_CONTROL: %x\n", val); val |= PCC_PIPE_CLK_OPEN; device_printf(dev, "PIPE_CONTROL: %x\n", val); WR4(sc->res, USB3PHY_PIPE_CLOCK_CONTROL, val); val = RD4(sc->res, USB3PHY_APP); device_printf(dev, "APP: %x\n", val); val |= APP_FORCE_VBUS; device_printf(dev, "APP: %x\n", val); WR4(sc->res, USB3PHY_APP, val); WR4(sc->res, USB3PHY_PHY_TUNE_LOW, PTL_MAGIC); val = RD4(sc->res, USB3PHY_PHY_TUNE_HIGH); device_printf(dev, "PHY_TUNE_HIGH: %x\n", val); val |= PTH_TX_BOOST_LVL; val |= PTH_LOS_BIAS; val &= ~PTH_TX_SWING_FULL; val |= __SHIFTIN(0x55, PTH_TX_SWING_FULL); val &= ~PTH_TX_DEEMPH_6DB; val |= __SHIFTIN(0x20, PTH_TX_DEEMPH_6DB); val &= ~PTH_TX_DEEMPH_3P5DB; val |= __SHIFTIN(0x15, PTH_TX_DEEMPH_3P5DB); device_printf(dev, "PHY_TUNE_HIGH: %x\n", val); WR4(sc->res, USB3PHY_PHY_TUNE_HIGH, val); if (sc->reg) error = regulator_enable(sc->reg); } else { if (sc->reg) error = regulator_disable(sc->reg); } if (error != 0) { device_printf(dev, "couldn't %s regulator for phy\n", enable ? "enable" : "disable"); return (error); } return (0); } static int awusb3phy_get_mode(struct phynode *phynode, int *mode) { struct awusb3phy_softc *sc; device_t dev; dev = phynode_get_device(phynode); sc = device_get_softc(dev); *mode = sc->mode; return (0); } static int awusb3phy_set_mode(struct phynode *phynode, int mode) { device_t dev; struct awusb3phy_softc *sc; dev = phynode_get_device(phynode); sc = device_get_softc(dev); if (mode != PHY_USB_MODE_HOST) return (EINVAL); sc->mode = mode; return (0); } static int awusb3phy_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner USB3PHY"); return (BUS_PROBE_DEFAULT); } static int awusb3phy_attach(device_t dev) { struct phynode *phynode; struct phynode_init_def phy_init; struct awusb3phy_softc *sc; clk_t clk; hwreset_t rst; phandle_t node; int error, i; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); if (bus_alloc_resources(dev, aw_usb3phy_spec, &sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); return (ENXIO); } /* Enable clocks */ for (i = 0; clk_get_by_ofw_index(dev, 0, i, &clk) == 0; i++) { error = clk_enable(clk); if (error != 0) { device_printf(dev, "couldn't enable clock %s\n", clk_get_name(clk)); return (error); } } /* De-assert resets */ for (i = 0; hwreset_get_by_ofw_idx(dev, 0, i, &rst) == 0; i++) { error = hwreset_deassert(rst); if (error != 0) { device_printf(dev, "couldn't de-assert reset %d\n", i); return (error); } } /* Get regulators */ regulator_get_by_ofw_property(dev, node, "phy-supply", &sc->reg); /* Create the phy */ phy_init.ofw_node = ofw_bus_get_node(dev); phynode = phynode_create(dev, &awusb3phy_phynode_class, &phy_init); if (phynode == NULL) { device_printf(dev, "failed to create USB PHY\n"); return (ENXIO); } if (phynode_register(phynode) == NULL) { device_printf(dev, "failed to create USB PHY\n"); return (ENXIO); } return (error); } static device_method_t awusb3phy_methods[] = { /* Device interface */ DEVMETHOD(device_probe, awusb3phy_probe), DEVMETHOD(device_attach, awusb3phy_attach), DEVMETHOD_END }; static driver_t awusb3phy_driver = { "awusb3phy", awusb3phy_methods, sizeof(struct awusb3phy_softc) }; /* aw_usb3phy needs to come up after regulators/gpio/etc, but before ehci/ohci */ EARLY_DRIVER_MODULE(awusb3phy, simplebus, awusb3phy_driver, 0, 0, BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(awusb3phy, 1); diff --git a/sys/arm/allwinner/aw_usbphy.c b/sys/arm/allwinner/aw_usbphy.c index 4125a13b36c2..33c11e62ef7c 100644 --- a/sys/arm/allwinner/aw_usbphy.c +++ b/sys/arm/allwinner/aw_usbphy.c @@ -1,524 +1,524 @@ /*- * Copyright (c) 2016 Jared McNeill * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Allwinner USB PHY */ #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "phynode_if.h" enum awusbphy_type { AWUSBPHY_TYPE_A10 = 1, AWUSBPHY_TYPE_A13, AWUSBPHY_TYPE_A20, AWUSBPHY_TYPE_A31, AWUSBPHY_TYPE_H3, AWUSBPHY_TYPE_A64, AWUSBPHY_TYPE_A83T, AWUSBPHY_TYPE_H6, }; struct aw_usbphy_conf { int num_phys; enum awusbphy_type phy_type; bool pmu_unk1; bool phy0_route; }; static const struct aw_usbphy_conf a10_usbphy_conf = { .num_phys = 3, .phy_type = AWUSBPHY_TYPE_A10, .pmu_unk1 = false, .phy0_route = false, }; static const struct aw_usbphy_conf a13_usbphy_conf = { .num_phys = 2, .phy_type = AWUSBPHY_TYPE_A13, .pmu_unk1 = false, .phy0_route = false, }; static const struct aw_usbphy_conf a20_usbphy_conf = { .num_phys = 3, .phy_type = AWUSBPHY_TYPE_A20, .pmu_unk1 = false, .phy0_route = false, }; static const struct aw_usbphy_conf a31_usbphy_conf = { .num_phys = 3, .phy_type = AWUSBPHY_TYPE_A31, .pmu_unk1 = false, .phy0_route = false, }; static const struct aw_usbphy_conf h3_usbphy_conf = { .num_phys = 4, .phy_type = AWUSBPHY_TYPE_H3, .pmu_unk1 = true, .phy0_route = true, }; static const struct aw_usbphy_conf a64_usbphy_conf = { .num_phys = 2, .phy_type = AWUSBPHY_TYPE_A64, .pmu_unk1 = true, .phy0_route = true, }; static const struct aw_usbphy_conf a83t_usbphy_conf = { .num_phys = 3, .phy_type = AWUSBPHY_TYPE_A83T, .pmu_unk1 = false, .phy0_route = false, }; static const struct aw_usbphy_conf h6_usbphy_conf = { .num_phys = 4, .phy_type = AWUSBPHY_TYPE_H6, .pmu_unk1 = false, .phy0_route = true, }; static struct ofw_compat_data compat_data[] = { { "allwinner,sun4i-a10-usb-phy", (uintptr_t)&a10_usbphy_conf }, { "allwinner,sun5i-a13-usb-phy", (uintptr_t)&a13_usbphy_conf }, { "allwinner,sun6i-a31-usb-phy", (uintptr_t)&a31_usbphy_conf }, { "allwinner,sun7i-a20-usb-phy", (uintptr_t)&a20_usbphy_conf }, { "allwinner,sun8i-h3-usb-phy", (uintptr_t)&h3_usbphy_conf }, { "allwinner,sun50i-a64-usb-phy", (uintptr_t)&a64_usbphy_conf }, { "allwinner,sun8i-a83t-usb-phy", (uintptr_t)&a83t_usbphy_conf }, { "allwinner,sun50i-h6-usb-phy", (uintptr_t)&h6_usbphy_conf }, { NULL, 0 } }; struct awusbphy_softc { struct resource * phy_ctrl; struct resource ** pmu; regulator_t * reg; gpio_pin_t id_det_pin; int id_det_valid; gpio_pin_t vbus_det_pin; int vbus_det_valid; struct aw_usbphy_conf *phy_conf; int mode; }; /* Phy class and methods. */ static int awusbphy_phy_enable(struct phynode *phy, bool enable); static int awusbphy_get_mode(struct phynode *phy, int *mode); static int awusbphy_set_mode(struct phynode *phy, int mode); static phynode_usb_method_t awusbphy_phynode_methods[] = { PHYNODEMETHOD(phynode_enable, awusbphy_phy_enable), PHYNODEMETHOD(phynode_usb_get_mode, awusbphy_get_mode), PHYNODEMETHOD(phynode_usb_set_mode, awusbphy_set_mode), PHYNODEMETHOD_END }; DEFINE_CLASS_1(awusbphy_phynode, awusbphy_phynode_class, awusbphy_phynode_methods, sizeof(struct phynode_usb_sc), phynode_usb_class); #define RD4(res, o) bus_read_4(res, (o)) #define WR4(res, o, v) bus_write_4(res, (o), (v)) #define CLR4(res, o, m) WR4(res, o, RD4(res, o) & ~(m)) #define SET4(res, o, m) WR4(res, o, RD4(res, o) | (m)) #define PHY_CSR 0x00 #define ID_PULLUP_EN (1 << 17) #define DPDM_PULLUP_EN (1 << 16) #define FORCE_ID (0x3 << 14) #define FORCE_ID_SHIFT 14 #define FORCE_ID_LOW 2 #define FORCE_ID_HIGH 3 #define FORCE_VBUS_VALID (0x3 << 12) #define FORCE_VBUS_VALID_SHIFT 12 #define FORCE_VBUS_VALID_LOW 2 #define FORCE_VBUS_VALID_HIGH 3 #define VBUS_CHANGE_DET (1 << 6) #define ID_CHANGE_DET (1 << 5) #define DPDM_CHANGE_DET (1 << 4) #define OTG_PHY_CFG 0x20 #define OTG_PHY_ROUTE_OTG (1 << 0) #define PMU_IRQ_ENABLE 0x00 #define PMU_AHB_INCR8 (1 << 10) #define PMU_AHB_INCR4 (1 << 9) #define PMU_AHB_INCRX_ALIGN (1 << 8) #define PMU_ULPI_BYPASS (1 << 0) #define PMU_UNK_H3 0x10 #define PMU_UNK_H3_CLR 0x2 static void awusbphy_configure(device_t dev, int phyno) { struct awusbphy_softc *sc; sc = device_get_softc(dev); if (sc->pmu[phyno] == NULL) return; if (sc->phy_conf->pmu_unk1 == true) CLR4(sc->pmu[phyno], PMU_UNK_H3, PMU_UNK_H3_CLR); SET4(sc->pmu[phyno], PMU_IRQ_ENABLE, PMU_ULPI_BYPASS | PMU_AHB_INCR8 | PMU_AHB_INCR4 | PMU_AHB_INCRX_ALIGN); } static int awusbphy_init(device_t dev) { struct awusbphy_softc *sc; phandle_t node; char pname[20]; uint32_t val; int error, off, rid; regulator_t reg; hwreset_t rst; clk_t clk; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); sc->phy_conf = (struct aw_usbphy_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; /* Get phy_ctrl region */ if (ofw_bus_find_string_index(node, "reg-names", "phy_ctrl", &rid) != 0) { device_printf(dev, "Cannot locate phy control resource\n"); return (ENXIO); } sc->phy_ctrl = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->phy_ctrl == NULL) { device_printf(dev, "Cannot allocate resource\n"); return (ENXIO); } /* Enable clocks */ for (off = 0; clk_get_by_ofw_index(dev, 0, off, &clk) == 0; off++) { error = clk_enable(clk); if (error != 0) { device_printf(dev, "couldn't enable clock %s\n", clk_get_name(clk)); return (error); } } /* De-assert resets */ for (off = 0; hwreset_get_by_ofw_idx(dev, 0, off, &rst) == 0; off++) { error = hwreset_deassert(rst); if (error != 0) { device_printf(dev, "couldn't de-assert reset %d\n", off); return (error); } } /* Get GPIOs */ error = gpio_pin_get_by_ofw_property(dev, node, "usb0_id_det-gpios", &sc->id_det_pin); if (error == 0) sc->id_det_valid = 1; error = gpio_pin_get_by_ofw_property(dev, node, "usb0_vbus_det-gpios", &sc->vbus_det_pin); if (error == 0) sc->vbus_det_valid = 1; sc->reg = malloc(sizeof(*(sc->reg)) * sc->phy_conf->num_phys, M_DEVBUF, M_WAITOK | M_ZERO); sc->pmu = malloc(sizeof(*(sc->pmu)) * sc->phy_conf->num_phys, M_DEVBUF, M_WAITOK | M_ZERO); /* Get regulators */ for (off = 0; off < sc->phy_conf->num_phys; off++) { snprintf(pname, sizeof(pname), "usb%d_vbus-supply", off); if (regulator_get_by_ofw_property(dev, 0, pname, ®) == 0) sc->reg[off] = reg; snprintf(pname, sizeof(pname), "pmu%d", off); if (ofw_bus_find_string_index(node, "reg-names", pname, &rid) != 0) continue; sc->pmu[off] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->pmu[off] == NULL) { device_printf(dev, "Cannot allocate resource\n"); return (ENXIO); } } /* Enable OTG PHY for host mode */ val = bus_read_4(sc->phy_ctrl, PHY_CSR); val &= ~(VBUS_CHANGE_DET | ID_CHANGE_DET | DPDM_CHANGE_DET); val |= (ID_PULLUP_EN | DPDM_PULLUP_EN); val &= ~FORCE_ID; val |= (FORCE_ID_LOW << FORCE_ID_SHIFT); val &= ~FORCE_VBUS_VALID; val |= (FORCE_VBUS_VALID_HIGH << FORCE_VBUS_VALID_SHIFT); bus_write_4(sc->phy_ctrl, PHY_CSR, val); return (0); } static int awusbphy_vbus_detect(device_t dev, int *val) { struct awusbphy_softc *sc; bool active; int error; sc = device_get_softc(dev); if (sc->vbus_det_valid) { error = gpio_pin_is_active(sc->vbus_det_pin, &active); if (error != 0) { device_printf(dev, "Cannot get status of id pin %d\n", error); return (error); } *val = active; return (0); } /* TODO check vbus_power-supply. */ /* * If there is no way to detect, assume present. */ *val = 1; return (0); } static int awusbphy_phy_enable(struct phynode *phynode, bool enable) { device_t dev; intptr_t phy; struct awusbphy_softc *sc; regulator_t reg; int error, vbus_det; dev = phynode_get_device(phynode); phy = phynode_get_id(phynode); sc = device_get_softc(dev); if (phy < 0 || phy >= sc->phy_conf->num_phys) return (ERANGE); /* Configure PHY */ awusbphy_configure(dev, phy); /* Regulators are optional. If not found, return success. */ reg = sc->reg[phy]; if (reg == NULL) return (0); if (phy == 0) { /* If an external vbus is detected, do not enable phy 0 */ error = awusbphy_vbus_detect(dev, &vbus_det); if (error) goto out; /* TODO check vbus_power-supply as well. */ if (sc->vbus_det_valid && vbus_det == 1) { if (bootverbose) device_printf(dev, "External VBUS detected, " "not enabling the regulator\n"); return (0); } } if (enable) { /* Depending on the PHY we need to route OTG to OHCI/EHCI */ error = regulator_enable(reg); } else error = regulator_disable(reg); out: if (error != 0) { device_printf(dev, "couldn't %s regulator for phy %jd\n", enable ? "enable" : "disable", (intmax_t)phy); return (error); } return (0); } static int awusbphy_get_mode(struct phynode *phynode, int *mode) { struct awusbphy_softc *sc; device_t dev; dev = phynode_get_device(phynode); sc = device_get_softc(dev); *mode = sc->mode; return (0); } static int awusbphy_set_mode(struct phynode *phynode, int mode) { device_t dev; intptr_t phy; struct awusbphy_softc *sc; uint32_t val; int error, vbus_det; dev = phynode_get_device(phynode); phy = phynode_get_id(phynode); sc = device_get_softc(dev); if (phy != 0) { if (mode != PHY_USB_MODE_HOST) return (EINVAL); return (0); } if (sc->mode == mode) return (0); if (mode == PHY_USB_MODE_OTG) /* TODO */ return (EOPNOTSUPP); error = awusbphy_vbus_detect(dev, &vbus_det); if (error != 0) return (error); val = bus_read_4(sc->phy_ctrl, PHY_CSR); val &= ~(VBUS_CHANGE_DET | ID_CHANGE_DET | DPDM_CHANGE_DET); val |= (ID_PULLUP_EN | DPDM_PULLUP_EN); val &= ~FORCE_VBUS_VALID; val |= (vbus_det ? FORCE_VBUS_VALID_HIGH : FORCE_VBUS_VALID_LOW) << FORCE_VBUS_VALID_SHIFT; val &= ~FORCE_ID; switch (mode) { case PHY_USB_MODE_HOST: val |= (FORCE_ID_LOW << FORCE_ID_SHIFT); if (sc->phy_conf->phy0_route) CLR4(sc->phy_ctrl, OTG_PHY_CFG, OTG_PHY_ROUTE_OTG); break; case PHY_USB_MODE_DEVICE: val |= (FORCE_ID_HIGH << FORCE_ID_SHIFT); if (sc->phy_conf->phy0_route) SET4(sc->phy_ctrl, OTG_PHY_CFG, OTG_PHY_ROUTE_OTG); break; default: return (EINVAL); } bus_write_4(sc->phy_ctrl, PHY_CSR, val); sc->mode = mode; return (0); } static int awusbphy_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner USB PHY"); return (BUS_PROBE_DEFAULT); } static int awusbphy_attach(device_t dev) { int error; struct phynode *phynode; struct phynode_init_def phy_init; struct awusbphy_softc *sc; int i; sc = device_get_softc(dev); error = awusbphy_init(dev); if (error) { device_printf(dev, "failed to initialize USB PHY, error %d\n", error); return (error); } /* Create and register phys. */ for (i = 0; i < sc->phy_conf->num_phys; i++) { bzero(&phy_init, sizeof(phy_init)); phy_init.id = i; phy_init.ofw_node = ofw_bus_get_node(dev); phynode = phynode_create(dev, &awusbphy_phynode_class, &phy_init); if (phynode == NULL) { device_printf(dev, "failed to create USB PHY\n"); return (ENXIO); } if (phynode_register(phynode) == NULL) { device_printf(dev, "failed to create USB PHY\n"); return (ENXIO); } } return (error); } static device_method_t awusbphy_methods[] = { /* Device interface */ DEVMETHOD(device_probe, awusbphy_probe), DEVMETHOD(device_attach, awusbphy_attach), DEVMETHOD_END }; static driver_t awusbphy_driver = { "awusbphy", awusbphy_methods, sizeof(struct awusbphy_softc) }; /* aw_usbphy needs to come up after regulators/gpio/etc, but before ehci/ohci */ EARLY_DRIVER_MODULE(awusbphy, simplebus, awusbphy_driver, 0, 0, BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(awusbphy, 1); diff --git a/sys/arm/allwinner/if_awg.c b/sys/arm/allwinner/if_awg.c index acc99083ad93..516cbefc6272 100644 --- a/sys/arm/allwinner/if_awg.c +++ b/sys/arm/allwinner/if_awg.c @@ -1,2018 +1,2018 @@ /*- * Copyright (c) 2016 Jared McNeill * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Allwinner Gigabit Ethernet MAC (EMAC) controller */ #include "opt_device_polling.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "syscon_if.h" #include "miibus_if.h" #include "gpio_if.h" #define RD4(sc, reg) bus_read_4((sc)->res[_RES_EMAC], (reg)) #define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_EMAC], (reg), (val)) #define AWG_LOCK(sc) mtx_lock(&(sc)->mtx) #define AWG_UNLOCK(sc) mtx_unlock(&(sc)->mtx); #define AWG_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) #define AWG_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED) #define DESC_ALIGN 4 #define TX_DESC_COUNT 1024 #define TX_DESC_SIZE (sizeof(struct emac_desc) * TX_DESC_COUNT) #define RX_DESC_COUNT 256 #define RX_DESC_SIZE (sizeof(struct emac_desc) * RX_DESC_COUNT) #define DESC_OFF(n) ((n) * sizeof(struct emac_desc)) #define TX_NEXT(n) (((n) + 1) & (TX_DESC_COUNT - 1)) #define TX_SKIP(n, o) (((n) + (o)) & (TX_DESC_COUNT - 1)) #define RX_NEXT(n) (((n) + 1) & (RX_DESC_COUNT - 1)) #define TX_MAX_SEGS 20 #define SOFT_RST_RETRY 1000 #define MII_BUSY_RETRY 1000 #define MDIO_FREQ 2500000 #define BURST_LEN_DEFAULT 8 #define RX_TX_PRI_DEFAULT 0 #define PAUSE_TIME_DEFAULT 0x400 #define TX_INTERVAL_DEFAULT 64 #define RX_BATCH_DEFAULT 64 /* syscon EMAC clock register */ #define EMAC_CLK_REG 0x30 #define EMAC_CLK_EPHY_ADDR (0x1f << 20) /* H3 */ #define EMAC_CLK_EPHY_ADDR_SHIFT 20 #define EMAC_CLK_EPHY_LED_POL (1 << 17) /* H3 */ #define EMAC_CLK_EPHY_SHUTDOWN (1 << 16) /* H3 */ #define EMAC_CLK_EPHY_SELECT (1 << 15) /* H3 */ #define EMAC_CLK_RMII_EN (1 << 13) #define EMAC_CLK_ETXDC (0x7 << 10) #define EMAC_CLK_ETXDC_SHIFT 10 #define EMAC_CLK_ERXDC (0x1f << 5) #define EMAC_CLK_ERXDC_SHIFT 5 #define EMAC_CLK_PIT (0x1 << 2) #define EMAC_CLK_PIT_MII (0 << 2) #define EMAC_CLK_PIT_RGMII (1 << 2) #define EMAC_CLK_SRC (0x3 << 0) #define EMAC_CLK_SRC_MII (0 << 0) #define EMAC_CLK_SRC_EXT_RGMII (1 << 0) #define EMAC_CLK_SRC_RGMII (2 << 0) /* Burst length of RX and TX DMA transfers */ static int awg_burst_len = BURST_LEN_DEFAULT; TUNABLE_INT("hw.awg.burst_len", &awg_burst_len); /* RX / TX DMA priority. If 1, RX DMA has priority over TX DMA. */ static int awg_rx_tx_pri = RX_TX_PRI_DEFAULT; TUNABLE_INT("hw.awg.rx_tx_pri", &awg_rx_tx_pri); /* Pause time field in the transmitted control frame */ static int awg_pause_time = PAUSE_TIME_DEFAULT; TUNABLE_INT("hw.awg.pause_time", &awg_pause_time); /* Request a TX interrupt every descriptors */ static int awg_tx_interval = TX_INTERVAL_DEFAULT; TUNABLE_INT("hw.awg.tx_interval", &awg_tx_interval); /* Maximum number of mbufs to send to if_input */ static int awg_rx_batch = RX_BATCH_DEFAULT; TUNABLE_INT("hw.awg.rx_batch", &awg_rx_batch); enum awg_type { EMAC_A83T = 1, EMAC_H3, EMAC_A64, }; static struct ofw_compat_data compat_data[] = { { "allwinner,sun8i-a83t-emac", EMAC_A83T }, { "allwinner,sun8i-h3-emac", EMAC_H3 }, { "allwinner,sun50i-a64-emac", EMAC_A64 }, { NULL, 0 } }; struct awg_bufmap { bus_dmamap_t map; struct mbuf *mbuf; }; struct awg_txring { bus_dma_tag_t desc_tag; bus_dmamap_t desc_map; struct emac_desc *desc_ring; bus_addr_t desc_ring_paddr; bus_dma_tag_t buf_tag; struct awg_bufmap buf_map[TX_DESC_COUNT]; u_int cur, next, queued; u_int segs; }; struct awg_rxring { bus_dma_tag_t desc_tag; bus_dmamap_t desc_map; struct emac_desc *desc_ring; bus_addr_t desc_ring_paddr; bus_dma_tag_t buf_tag; struct awg_bufmap buf_map[RX_DESC_COUNT]; bus_dmamap_t buf_spare_map; u_int cur; }; enum { _RES_EMAC, _RES_IRQ, _RES_SYSCON, _RES_NITEMS }; struct awg_softc { struct resource *res[_RES_NITEMS]; struct mtx mtx; if_t ifp; device_t dev; device_t miibus; struct callout stat_ch; void *ih; u_int mdc_div_ratio_m; int link; int if_flags; enum awg_type type; struct syscon *syscon; struct awg_txring tx; struct awg_rxring rx; }; static struct resource_spec awg_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_OPTIONAL }, { -1, 0 } }; static void awg_txeof(struct awg_softc *sc); static void awg_start_locked(struct awg_softc *sc); static void awg_tick(void *softc); static int awg_parse_delay(device_t dev, uint32_t *tx_delay, uint32_t *rx_delay); static uint32_t syscon_read_emac_clk_reg(device_t dev); static void syscon_write_emac_clk_reg(device_t dev, uint32_t val); static phandle_t awg_get_phy_node(device_t dev); static bool awg_has_internal_phy(device_t dev); /* * MII functions */ static int awg_miibus_readreg(device_t dev, int phy, int reg) { struct awg_softc *sc; int retry, val; sc = device_get_softc(dev); val = 0; WR4(sc, EMAC_MII_CMD, (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | (phy << PHY_ADDR_SHIFT) | (reg << PHY_REG_ADDR_SHIFT) | MII_BUSY); for (retry = MII_BUSY_RETRY; retry > 0; retry--) { if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) { val = RD4(sc, EMAC_MII_DATA); break; } DELAY(10); } if (retry == 0) device_printf(dev, "phy read timeout, phy=%d reg=%d\n", phy, reg); return (val); } static int awg_miibus_writereg(device_t dev, int phy, int reg, int val) { struct awg_softc *sc; int retry; sc = device_get_softc(dev); WR4(sc, EMAC_MII_DATA, val); WR4(sc, EMAC_MII_CMD, (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | (phy << PHY_ADDR_SHIFT) | (reg << PHY_REG_ADDR_SHIFT) | MII_WR | MII_BUSY); for (retry = MII_BUSY_RETRY; retry > 0; retry--) { if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) break; DELAY(10); } if (retry == 0) device_printf(dev, "phy write timeout, phy=%d reg=%d\n", phy, reg); return (0); } static void awg_miibus_statchg(device_t dev) { struct awg_softc *sc; struct mii_data *mii; uint32_t val; sc = device_get_softc(dev); AWG_ASSERT_LOCKED(sc); if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) return; mii = device_get_softc(sc->miibus); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_1000_T: case IFM_1000_SX: case IFM_100_TX: case IFM_10_T: sc->link = 1; break; default: sc->link = 0; break; } } else sc->link = 0; if (sc->link == 0) return; val = RD4(sc, EMAC_BASIC_CTL_0); val &= ~(BASIC_CTL_SPEED | BASIC_CTL_DUPLEX); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) val |= BASIC_CTL_SPEED_1000 << BASIC_CTL_SPEED_SHIFT; else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) val |= BASIC_CTL_SPEED_100 << BASIC_CTL_SPEED_SHIFT; else val |= BASIC_CTL_SPEED_10 << BASIC_CTL_SPEED_SHIFT; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) val |= BASIC_CTL_DUPLEX; WR4(sc, EMAC_BASIC_CTL_0, val); val = RD4(sc, EMAC_RX_CTL_0); val &= ~RX_FLOW_CTL_EN; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) val |= RX_FLOW_CTL_EN; WR4(sc, EMAC_RX_CTL_0, val); val = RD4(sc, EMAC_TX_FLOW_CTL); val &= ~(PAUSE_TIME|TX_FLOW_CTL_EN); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) val |= TX_FLOW_CTL_EN; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) val |= awg_pause_time << PAUSE_TIME_SHIFT; WR4(sc, EMAC_TX_FLOW_CTL, val); } /* * Media functions */ static void awg_media_status(if_t ifp, struct ifmediareq *ifmr) { struct awg_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = device_get_softc(sc->miibus); AWG_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; AWG_UNLOCK(sc); } static int awg_media_change(if_t ifp) { struct awg_softc *sc; struct mii_data *mii; int error; sc = if_getsoftc(ifp); mii = device_get_softc(sc->miibus); AWG_LOCK(sc); error = mii_mediachg(mii); AWG_UNLOCK(sc); return (error); } /* * Core functions */ /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */ static uint32_t bitrev32(uint32_t x) { x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); return (x >> 16) | (x << 16); } static u_int awg_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint32_t crc, hashreg, hashbit, *hash = arg; crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN) & 0x7f; crc = bitrev32(~crc) >> 26; hashreg = (crc >> 5); hashbit = (crc & 0x1f); hash[hashreg] |= (1 << hashbit); return (1); } static void awg_setup_rxfilter(struct awg_softc *sc) { uint32_t val, hash[2], machi, maclo; uint8_t *eaddr; if_t ifp; AWG_ASSERT_LOCKED(sc); ifp = sc->ifp; val = 0; hash[0] = hash[1] = 0; if (if_getflags(ifp) & IFF_PROMISC) val |= DIS_ADDR_FILTER; else if (if_getflags(ifp) & IFF_ALLMULTI) { val |= RX_ALL_MULTICAST; hash[0] = hash[1] = ~0; } else if (if_foreach_llmaddr(ifp, awg_hash_maddr, hash) > 0) val |= HASH_MULTICAST; /* Write our unicast address */ eaddr = if_getlladdr(ifp); machi = (eaddr[5] << 8) | eaddr[4]; maclo = (eaddr[3] << 24) | (eaddr[2] << 16) | (eaddr[1] << 8) | (eaddr[0] << 0); WR4(sc, EMAC_ADDR_HIGH(0), machi); WR4(sc, EMAC_ADDR_LOW(0), maclo); /* Multicast hash filters */ WR4(sc, EMAC_RX_HASH_0, hash[1]); WR4(sc, EMAC_RX_HASH_1, hash[0]); /* RX frame filter config */ WR4(sc, EMAC_RX_FRM_FLT, val); } static void awg_setup_core(struct awg_softc *sc) { uint32_t val; AWG_ASSERT_LOCKED(sc); /* Configure DMA burst length and priorities */ val = awg_burst_len << BASIC_CTL_BURST_LEN_SHIFT; if (awg_rx_tx_pri) val |= BASIC_CTL_RX_TX_PRI; WR4(sc, EMAC_BASIC_CTL_1, val); } static void awg_enable_mac(struct awg_softc *sc, bool enable) { uint32_t tx, rx; AWG_ASSERT_LOCKED(sc); tx = RD4(sc, EMAC_TX_CTL_0); rx = RD4(sc, EMAC_RX_CTL_0); if (enable) { tx |= TX_EN; rx |= RX_EN | CHECK_CRC; } else { tx &= ~TX_EN; rx &= ~(RX_EN | CHECK_CRC); } WR4(sc, EMAC_TX_CTL_0, tx); WR4(sc, EMAC_RX_CTL_0, rx); } static void awg_get_eaddr(device_t dev, uint8_t *eaddr) { struct awg_softc *sc; uint32_t maclo, machi, rnd; u_char rootkey[16]; uint32_t rootkey_size; sc = device_get_softc(dev); machi = RD4(sc, EMAC_ADDR_HIGH(0)) & 0xffff; maclo = RD4(sc, EMAC_ADDR_LOW(0)); rootkey_size = sizeof(rootkey); if (maclo == 0xffffffff && machi == 0xffff) { /* MAC address in hardware is invalid, create one */ if (aw_sid_get_fuse(AW_SID_FUSE_ROOTKEY, rootkey, &rootkey_size) == 0 && (rootkey[3] | rootkey[12] | rootkey[13] | rootkey[14] | rootkey[15]) != 0) { /* MAC address is derived from the root key in SID */ maclo = (rootkey[13] << 24) | (rootkey[12] << 16) | (rootkey[3] << 8) | 0x02; machi = (rootkey[15] << 8) | rootkey[14]; } else { /* Create one */ rnd = arc4random(); maclo = 0x00f2 | (rnd & 0xffff0000); machi = rnd & 0xffff; } } eaddr[0] = maclo & 0xff; eaddr[1] = (maclo >> 8) & 0xff; eaddr[2] = (maclo >> 16) & 0xff; eaddr[3] = (maclo >> 24) & 0xff; eaddr[4] = machi & 0xff; eaddr[5] = (machi >> 8) & 0xff; } /* * DMA functions */ static void awg_enable_dma_intr(struct awg_softc *sc) { /* Enable interrupts */ WR4(sc, EMAC_INT_EN, RX_INT_EN | TX_INT_EN | TX_BUF_UA_INT_EN); } static void awg_disable_dma_intr(struct awg_softc *sc) { /* Disable interrupts */ WR4(sc, EMAC_INT_EN, 0); } static void awg_init_dma(struct awg_softc *sc) { uint32_t val; AWG_ASSERT_LOCKED(sc); /* Enable interrupts */ #ifdef DEVICE_POLLING if ((if_getcapenable(sc->ifp) & IFCAP_POLLING) == 0) awg_enable_dma_intr(sc); else awg_disable_dma_intr(sc); #else awg_enable_dma_intr(sc); #endif /* Enable transmit DMA */ val = RD4(sc, EMAC_TX_CTL_1); WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_EN | TX_MD | TX_NEXT_FRAME); /* Enable receive DMA */ val = RD4(sc, EMAC_RX_CTL_1); WR4(sc, EMAC_RX_CTL_1, val | RX_DMA_EN | RX_MD); } static void awg_stop_dma(struct awg_softc *sc) { uint32_t val; AWG_ASSERT_LOCKED(sc); /* Stop transmit DMA and flush data in the TX FIFO */ val = RD4(sc, EMAC_TX_CTL_1); val &= ~TX_DMA_EN; val |= FLUSH_TX_FIFO; WR4(sc, EMAC_TX_CTL_1, val); /* Disable interrupts */ awg_disable_dma_intr(sc); /* Disable transmit DMA */ val = RD4(sc, EMAC_TX_CTL_1); WR4(sc, EMAC_TX_CTL_1, val & ~TX_DMA_EN); /* Disable receive DMA */ val = RD4(sc, EMAC_RX_CTL_1); WR4(sc, EMAC_RX_CTL_1, val & ~RX_DMA_EN); } static int awg_encap(struct awg_softc *sc, struct mbuf **mp) { bus_dmamap_t map; bus_dma_segment_t segs[TX_MAX_SEGS]; int error, nsegs, cur, first, last, i; u_int csum_flags; uint32_t flags, status; struct mbuf *m; cur = first = sc->tx.cur; map = sc->tx.buf_map[first].map; m = *mp; error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS); if (m == NULL) { device_printf(sc->dev, "awg_encap: m_collapse failed\n"); m_freem(*mp); *mp = NULL; return (ENOMEM); } *mp = m; error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*mp); *mp = NULL; } } if (error != 0) { device_printf(sc->dev, "awg_encap: bus_dmamap_load_mbuf_sg failed\n"); return (error); } if (nsegs == 0) { m_freem(*mp); *mp = NULL; return (EIO); } if (sc->tx.queued + nsegs > TX_DESC_COUNT) { bus_dmamap_unload(sc->tx.buf_tag, map); return (ENOBUFS); } bus_dmamap_sync(sc->tx.buf_tag, map, BUS_DMASYNC_PREWRITE); flags = TX_FIR_DESC; status = 0; if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) { if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) csum_flags = TX_CHECKSUM_CTL_FULL; else csum_flags = TX_CHECKSUM_CTL_IP; flags |= (csum_flags << TX_CHECKSUM_CTL_SHIFT); } for (i = 0; i < nsegs; i++) { sc->tx.segs++; if (i == nsegs - 1) { flags |= TX_LAST_DESC; /* * Can only request TX completion * interrupt on last descriptor. */ if (sc->tx.segs >= awg_tx_interval) { sc->tx.segs = 0; flags |= TX_INT_CTL; } } sc->tx.desc_ring[cur].addr = htole32((uint32_t)segs[i].ds_addr); sc->tx.desc_ring[cur].size = htole32(flags | segs[i].ds_len); sc->tx.desc_ring[cur].status = htole32(status); flags &= ~TX_FIR_DESC; /* * Setting of the valid bit in the first descriptor is * deferred until the whole chain is fully set up. */ status = TX_DESC_CTL; ++sc->tx.queued; cur = TX_NEXT(cur); } sc->tx.cur = cur; /* Store mapping and mbuf in the last segment */ last = TX_SKIP(cur, TX_DESC_COUNT - 1); sc->tx.buf_map[first].map = sc->tx.buf_map[last].map; sc->tx.buf_map[last].map = map; sc->tx.buf_map[last].mbuf = m; /* * The whole mbuf chain has been DMA mapped, * fix the first descriptor. */ sc->tx.desc_ring[first].status = htole32(TX_DESC_CTL); return (0); } static void awg_clean_txbuf(struct awg_softc *sc, int index) { struct awg_bufmap *bmap; --sc->tx.queued; bmap = &sc->tx.buf_map[index]; if (bmap->mbuf != NULL) { bus_dmamap_sync(sc->tx.buf_tag, bmap->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->tx.buf_tag, bmap->map); m_freem(bmap->mbuf); bmap->mbuf = NULL; } } static void awg_setup_rxdesc(struct awg_softc *sc, int index, bus_addr_t paddr) { uint32_t status, size; status = RX_DESC_CTL; size = MCLBYTES - 1; sc->rx.desc_ring[index].addr = htole32((uint32_t)paddr); sc->rx.desc_ring[index].size = htole32(size); sc->rx.desc_ring[index].status = htole32(status); } static void awg_reuse_rxdesc(struct awg_softc *sc, int index) { sc->rx.desc_ring[index].status = htole32(RX_DESC_CTL); } static int awg_newbuf_rx(struct awg_softc *sc, int index) { struct mbuf *m; bus_dma_segment_t seg; bus_dmamap_t map; int nsegs; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; m_adj(m, ETHER_ALIGN); if (bus_dmamap_load_mbuf_sg(sc->rx.buf_tag, sc->rx.buf_spare_map, m, &seg, &nsegs, BUS_DMA_NOWAIT) != 0) { m_freem(m); return (ENOBUFS); } if (sc->rx.buf_map[index].mbuf != NULL) { bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rx.buf_tag, sc->rx.buf_map[index].map); } map = sc->rx.buf_map[index].map; sc->rx.buf_map[index].map = sc->rx.buf_spare_map; sc->rx.buf_spare_map = map; bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, BUS_DMASYNC_PREREAD); sc->rx.buf_map[index].mbuf = m; awg_setup_rxdesc(sc, index, seg.ds_addr); return (0); } static void awg_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; *(bus_addr_t *)arg = segs[0].ds_addr; } static int awg_setup_dma(device_t dev) { struct awg_softc *sc; int error, i; sc = device_get_softc(dev); /* Setup TX ring */ error = bus_dma_tag_create( bus_get_dma_tag(dev), /* Parent tag */ DESC_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ TX_DESC_SIZE, 1, /* maxsize, nsegs */ TX_DESC_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->tx.desc_tag); if (error != 0) { device_printf(dev, "cannot create TX descriptor ring tag\n"); return (error); } error = bus_dmamem_alloc(sc->tx.desc_tag, (void **)&sc->tx.desc_ring, BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->tx.desc_map); if (error != 0) { device_printf(dev, "cannot allocate TX descriptor ring\n"); return (error); } error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map, sc->tx.desc_ring, TX_DESC_SIZE, awg_dmamap_cb, &sc->tx.desc_ring_paddr, 0); if (error != 0) { device_printf(dev, "cannot load TX descriptor ring\n"); return (error); } for (i = 0; i < TX_DESC_COUNT; i++) sc->tx.desc_ring[i].next = htole32(sc->tx.desc_ring_paddr + DESC_OFF(TX_NEXT(i))); error = bus_dma_tag_create( bus_get_dma_tag(dev), /* Parent tag */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, TX_MAX_SEGS, /* maxsize, nsegs */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->tx.buf_tag); if (error != 0) { device_printf(dev, "cannot create TX buffer tag\n"); return (error); } sc->tx.queued = 0; for (i = 0; i < TX_DESC_COUNT; i++) { error = bus_dmamap_create(sc->tx.buf_tag, 0, &sc->tx.buf_map[i].map); if (error != 0) { device_printf(dev, "cannot create TX buffer map\n"); return (error); } } /* Setup RX ring */ error = bus_dma_tag_create( bus_get_dma_tag(dev), /* Parent tag */ DESC_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ RX_DESC_SIZE, 1, /* maxsize, nsegs */ RX_DESC_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rx.desc_tag); if (error != 0) { device_printf(dev, "cannot create RX descriptor ring tag\n"); return (error); } error = bus_dmamem_alloc(sc->rx.desc_tag, (void **)&sc->rx.desc_ring, BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rx.desc_map); if (error != 0) { device_printf(dev, "cannot allocate RX descriptor ring\n"); return (error); } error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map, sc->rx.desc_ring, RX_DESC_SIZE, awg_dmamap_cb, &sc->rx.desc_ring_paddr, 0); if (error != 0) { device_printf(dev, "cannot load RX descriptor ring\n"); return (error); } error = bus_dma_tag_create( bus_get_dma_tag(dev), /* Parent tag */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, 1, /* maxsize, nsegs */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rx.buf_tag); if (error != 0) { device_printf(dev, "cannot create RX buffer tag\n"); return (error); } error = bus_dmamap_create(sc->rx.buf_tag, 0, &sc->rx.buf_spare_map); if (error != 0) { device_printf(dev, "cannot create RX buffer spare map\n"); return (error); } for (i = 0; i < RX_DESC_COUNT; i++) { sc->rx.desc_ring[i].next = htole32(sc->rx.desc_ring_paddr + DESC_OFF(RX_NEXT(i))); error = bus_dmamap_create(sc->rx.buf_tag, 0, &sc->rx.buf_map[i].map); if (error != 0) { device_printf(dev, "cannot create RX buffer map\n"); return (error); } sc->rx.buf_map[i].mbuf = NULL; error = awg_newbuf_rx(sc, i); if (error != 0) { device_printf(dev, "cannot create RX buffer\n"); return (error); } } bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_PREWRITE); /* Write transmit and receive descriptor base address registers */ WR4(sc, EMAC_TX_DMA_LIST, sc->tx.desc_ring_paddr); WR4(sc, EMAC_RX_DMA_LIST, sc->rx.desc_ring_paddr); return (0); } static void awg_dma_start_tx(struct awg_softc *sc) { uint32_t val; AWG_ASSERT_LOCKED(sc); /* Start and run TX DMA */ val = RD4(sc, EMAC_TX_CTL_1); WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_START); } /* * if_ functions */ static void awg_start_locked(struct awg_softc *sc) { struct mbuf *m; if_t ifp; int cnt, err; AWG_ASSERT_LOCKED(sc); if (!sc->link) return; ifp = sc->ifp; if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; for (cnt = 0; ; cnt++) { m = if_dequeue(ifp); if (m == NULL) break; err = awg_encap(sc, &m); if (err != 0) { if (err == ENOBUFS) if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); if (m != NULL) if_sendq_prepend(ifp, m); break; } bpf_mtap_if(ifp, m); } if (cnt != 0) { bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); awg_dma_start_tx(sc); } } static void awg_start(if_t ifp) { struct awg_softc *sc; sc = if_getsoftc(ifp); AWG_LOCK(sc); awg_start_locked(sc); AWG_UNLOCK(sc); } static void awg_init_locked(struct awg_softc *sc) { struct mii_data *mii; if_t ifp; mii = device_get_softc(sc->miibus); ifp = sc->ifp; AWG_ASSERT_LOCKED(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) return; awg_setup_rxfilter(sc); awg_setup_core(sc); awg_enable_mac(sc, true); awg_init_dma(sc); if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); mii_mediachg(mii); callout_reset(&sc->stat_ch, hz, awg_tick, sc); } static void awg_init(void *softc) { struct awg_softc *sc; sc = softc; AWG_LOCK(sc); awg_init_locked(sc); AWG_UNLOCK(sc); } static void awg_stop(struct awg_softc *sc) { if_t ifp; uint32_t val; int i; AWG_ASSERT_LOCKED(sc); ifp = sc->ifp; callout_stop(&sc->stat_ch); awg_stop_dma(sc); awg_enable_mac(sc, false); sc->link = 0; /* Finish handling transmitted buffers */ awg_txeof(sc); /* Release any untransmitted buffers. */ for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) { val = le32toh(sc->tx.desc_ring[i].status); if ((val & TX_DESC_CTL) != 0) break; awg_clean_txbuf(sc, i); } sc->tx.next = i; for (; sc->tx.queued > 0; i = TX_NEXT(i)) { sc->tx.desc_ring[i].status = 0; awg_clean_txbuf(sc, i); } sc->tx.cur = sc->tx.next; bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Setup RX buffers for reuse */ bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (i = sc->rx.cur; ; i = RX_NEXT(i)) { val = le32toh(sc->rx.desc_ring[i].status); if ((val & RX_DESC_CTL) != 0) break; awg_reuse_rxdesc(sc, i); } sc->rx.cur = i; bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } static int awg_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct awg_softc *sc; struct mii_data *mii; struct ifreq *ifr; int flags, mask, error; sc = if_getsoftc(ifp); mii = device_get_softc(sc->miibus); ifr = (struct ifreq *)data; error = 0; switch (cmd) { case SIOCSIFFLAGS: AWG_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { flags = if_getflags(ifp) ^ sc->if_flags; if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) awg_setup_rxfilter(sc); } else awg_init_locked(sc); } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) awg_stop(sc); } sc->if_flags = if_getflags(ifp); AWG_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { AWG_LOCK(sc); awg_setup_rxfilter(sc); AWG_UNLOCK(sc); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { error = ether_poll_register(awg_poll, ifp); if (error != 0) break; AWG_LOCK(sc); awg_disable_dma_intr(sc); if_setcapenablebit(ifp, IFCAP_POLLING, 0); AWG_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); AWG_LOCK(sc); awg_enable_dma_intr(sc); if_setcapenablebit(ifp, 0, IFCAP_POLLING); AWG_UNLOCK(sc); } } #endif if (mask & IFCAP_VLAN_MTU) if_togglecapenable(ifp, IFCAP_VLAN_MTU); if (mask & IFCAP_RXCSUM) if_togglecapenable(ifp, IFCAP_RXCSUM); if (mask & IFCAP_TXCSUM) if_togglecapenable(ifp, IFCAP_TXCSUM); if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) if_sethwassistbits(ifp, CSUM_IP | CSUM_UDP | CSUM_TCP, 0); else if_sethwassistbits(ifp, 0, CSUM_IP | CSUM_UDP | CSUM_TCP); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } /* * Interrupts functions */ static int awg_rxintr(struct awg_softc *sc) { if_t ifp; struct mbuf *m, *mh, *mt; int error, index, len, cnt, npkt; uint32_t status; ifp = sc->ifp; mh = mt = NULL; cnt = 0; npkt = 0; bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (index = sc->rx.cur; ; index = RX_NEXT(index)) { status = le32toh(sc->rx.desc_ring[index].status); if ((status & RX_DESC_CTL) != 0) break; len = (status & RX_FRM_LEN) >> RX_FRM_LEN_SHIFT; if (len == 0) { if ((status & (RX_NO_ENOUGH_BUF_ERR | RX_OVERFLOW_ERR)) != 0) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); awg_reuse_rxdesc(sc, index); continue; } m = sc->rx.buf_map[index].mbuf; error = awg_newbuf_rx(sc, index); if (error != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); awg_reuse_rxdesc(sc, index); continue; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = len; m->m_len = len; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 && (status & RX_FRM_TYPE) != 0) { m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; if ((status & RX_HEADER_ERR) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((status & RX_PAYLOAD_ERR) == 0) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } m->m_nextpkt = NULL; if (mh == NULL) mh = m; else mt->m_nextpkt = m; mt = m; ++cnt; ++npkt; if (cnt == awg_rx_batch) { AWG_UNLOCK(sc); if_input(ifp, mh); AWG_LOCK(sc); mh = mt = NULL; cnt = 0; } } if (index != sc->rx.cur) { bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } if (mh != NULL) { AWG_UNLOCK(sc); if_input(ifp, mh); AWG_LOCK(sc); } sc->rx.cur = index; return (npkt); } static void awg_txeof(struct awg_softc *sc) { struct emac_desc *desc; uint32_t status, size; if_t ifp; int i, prog; AWG_ASSERT_LOCKED(sc); bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); ifp = sc->ifp; prog = 0; for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) { desc = &sc->tx.desc_ring[i]; status = le32toh(desc->status); if ((status & TX_DESC_CTL) != 0) break; size = le32toh(desc->size); if (size & TX_LAST_DESC) { if ((status & (TX_HEADER_ERR | TX_PAYLOAD_ERR)) != 0) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); else if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } prog++; awg_clean_txbuf(sc, i); } if (prog > 0) { sc->tx.next = i; if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); } } static void awg_intr(void *arg) { struct awg_softc *sc; uint32_t val; sc = arg; AWG_LOCK(sc); val = RD4(sc, EMAC_INT_STA); WR4(sc, EMAC_INT_STA, val); if (val & RX_INT) awg_rxintr(sc); if (val & TX_INT) awg_txeof(sc); if (val & (TX_INT | TX_BUF_UA_INT)) { if (!if_sendq_empty(sc->ifp)) awg_start_locked(sc); } AWG_UNLOCK(sc); } #ifdef DEVICE_POLLING static int awg_poll(if_t ifp, enum poll_cmd cmd, int count) { struct awg_softc *sc; uint32_t val; int rx_npkts; sc = if_getsoftc(ifp); rx_npkts = 0; AWG_LOCK(sc); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { AWG_UNLOCK(sc); return (0); } rx_npkts = awg_rxintr(sc); awg_txeof(sc); if (!if_sendq_empty(ifp)) awg_start_locked(sc); if (cmd == POLL_AND_CHECK_STATUS) { val = RD4(sc, EMAC_INT_STA); if (val != 0) WR4(sc, EMAC_INT_STA, val); } AWG_UNLOCK(sc); return (rx_npkts); } #endif /* * syscon functions */ static uint32_t syscon_read_emac_clk_reg(device_t dev) { struct awg_softc *sc; sc = device_get_softc(dev); if (sc->syscon != NULL) return (SYSCON_READ_4(sc->syscon, EMAC_CLK_REG)); else if (sc->res[_RES_SYSCON] != NULL) return (bus_read_4(sc->res[_RES_SYSCON], 0)); return (0); } static void syscon_write_emac_clk_reg(device_t dev, uint32_t val) { struct awg_softc *sc; sc = device_get_softc(dev); if (sc->syscon != NULL) SYSCON_WRITE_4(sc->syscon, EMAC_CLK_REG, val); else if (sc->res[_RES_SYSCON] != NULL) bus_write_4(sc->res[_RES_SYSCON], 0, val); } /* * PHY functions */ static phandle_t awg_get_phy_node(device_t dev) { phandle_t node; pcell_t phy_handle; node = ofw_bus_get_node(dev); if (OF_getencprop(node, "phy-handle", (void *)&phy_handle, sizeof(phy_handle)) <= 0) return (0); return (OF_node_from_xref(phy_handle)); } static bool awg_has_internal_phy(device_t dev) { phandle_t node, phy_node; node = ofw_bus_get_node(dev); /* Legacy binding */ if (OF_hasprop(node, "allwinner,use-internal-phy")) return (true); phy_node = awg_get_phy_node(dev); return (phy_node != 0 && ofw_bus_node_is_compatible(OF_parent(phy_node), "allwinner,sun8i-h3-mdio-internal") != 0); } static int awg_parse_delay(device_t dev, uint32_t *tx_delay, uint32_t *rx_delay) { phandle_t node; uint32_t delay; if (tx_delay == NULL || rx_delay == NULL) return (EINVAL); *tx_delay = *rx_delay = 0; node = ofw_bus_get_node(dev); if (OF_getencprop(node, "tx-delay", &delay, sizeof(delay)) >= 0) *tx_delay = delay; else if (OF_getencprop(node, "allwinner,tx-delay-ps", &delay, sizeof(delay)) >= 0) { if ((delay % 100) != 0) { device_printf(dev, "tx-delay-ps is not a multiple of 100\n"); return (EDOM); } *tx_delay = delay / 100; } if (*tx_delay > 7) { device_printf(dev, "tx-delay out of range\n"); return (ERANGE); } if (OF_getencprop(node, "rx-delay", &delay, sizeof(delay)) >= 0) *rx_delay = delay; else if (OF_getencprop(node, "allwinner,rx-delay-ps", &delay, sizeof(delay)) >= 0) { if ((delay % 100) != 0) { device_printf(dev, "rx-delay-ps is not within documented domain\n"); return (EDOM); } *rx_delay = delay / 100; } if (*rx_delay > 31) { device_printf(dev, "rx-delay out of range\n"); return (ERANGE); } return (0); } static int awg_setup_phy(device_t dev) { struct awg_softc *sc; clk_t clk_tx, clk_tx_parent; const char *tx_parent_name; char *phy_type; phandle_t node; uint32_t reg, tx_delay, rx_delay; int error; bool use_syscon; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); use_syscon = false; if (OF_getprop_alloc(node, "phy-mode", (void **)&phy_type) == 0) return (0); if (sc->syscon != NULL || sc->res[_RES_SYSCON] != NULL) use_syscon = true; if (bootverbose) device_printf(dev, "PHY type: %s, conf mode: %s\n", phy_type, use_syscon ? "reg" : "clk"); if (use_syscon) { /* * Abstract away writing to syscon for devices like the pine64. * For the pine64, we get dtb from U-Boot and it still uses the * legacy setup of specifying syscon register in emac node * rather than as its own node and using an xref in emac. * These abstractions can go away once U-Boot dts is up-to-date. */ reg = syscon_read_emac_clk_reg(dev); reg &= ~(EMAC_CLK_PIT | EMAC_CLK_SRC | EMAC_CLK_RMII_EN); if (strncmp(phy_type, "rgmii", 5) == 0) reg |= EMAC_CLK_PIT_RGMII | EMAC_CLK_SRC_RGMII; else if (strcmp(phy_type, "rmii") == 0) reg |= EMAC_CLK_RMII_EN; else reg |= EMAC_CLK_PIT_MII | EMAC_CLK_SRC_MII; /* * Fail attach if we fail to parse either of the delay * parameters. If we don't have the proper delay to write to * syscon, then awg likely won't function properly anyways. * Lack of delay is not an error! */ error = awg_parse_delay(dev, &tx_delay, &rx_delay); if (error != 0) goto fail; /* Default to 0 and we'll increase it if we need to. */ reg &= ~(EMAC_CLK_ETXDC | EMAC_CLK_ERXDC); if (tx_delay > 0) reg |= (tx_delay << EMAC_CLK_ETXDC_SHIFT); if (rx_delay > 0) reg |= (rx_delay << EMAC_CLK_ERXDC_SHIFT); if (sc->type == EMAC_H3) { if (awg_has_internal_phy(dev)) { reg |= EMAC_CLK_EPHY_SELECT; reg &= ~EMAC_CLK_EPHY_SHUTDOWN; if (OF_hasprop(node, "allwinner,leds-active-low")) reg |= EMAC_CLK_EPHY_LED_POL; else reg &= ~EMAC_CLK_EPHY_LED_POL; /* Set internal PHY addr to 1 */ reg &= ~EMAC_CLK_EPHY_ADDR; reg |= (1 << EMAC_CLK_EPHY_ADDR_SHIFT); } else { reg &= ~EMAC_CLK_EPHY_SELECT; } } if (bootverbose) device_printf(dev, "EMAC clock: 0x%08x\n", reg); syscon_write_emac_clk_reg(dev, reg); } else { if (strncmp(phy_type, "rgmii", 5) == 0) tx_parent_name = "emac_int_tx"; else tx_parent_name = "mii_phy_tx"; /* Get the TX clock */ error = clk_get_by_ofw_name(dev, 0, "tx", &clk_tx); if (error != 0) { device_printf(dev, "cannot get tx clock\n"); goto fail; } /* Find the desired parent clock based on phy-mode property */ error = clk_get_by_name(dev, tx_parent_name, &clk_tx_parent); if (error != 0) { device_printf(dev, "cannot get clock '%s'\n", tx_parent_name); goto fail; } /* Set TX clock parent */ error = clk_set_parent_by_clk(clk_tx, clk_tx_parent); if (error != 0) { device_printf(dev, "cannot set tx clock parent\n"); goto fail; } /* Enable TX clock */ error = clk_enable(clk_tx); if (error != 0) { device_printf(dev, "cannot enable tx clock\n"); goto fail; } } error = 0; fail: OF_prop_free(phy_type); return (error); } static int awg_setup_extres(device_t dev) { struct awg_softc *sc; phandle_t node, phy_node; hwreset_t rst_ahb, rst_ephy; clk_t clk_ahb, clk_ephy; regulator_t reg; uint64_t freq; int error, div; sc = device_get_softc(dev); rst_ahb = rst_ephy = NULL; clk_ahb = clk_ephy = NULL; reg = NULL; node = ofw_bus_get_node(dev); phy_node = awg_get_phy_node(dev); if (phy_node == 0 && OF_hasprop(node, "phy-handle")) { error = ENXIO; device_printf(dev, "cannot get phy handle\n"); goto fail; } /* Get AHB clock and reset resources */ error = hwreset_get_by_ofw_name(dev, 0, "stmmaceth", &rst_ahb); if (error != 0) error = hwreset_get_by_ofw_name(dev, 0, "ahb", &rst_ahb); if (error != 0) { device_printf(dev, "cannot get ahb reset\n"); goto fail; } if (hwreset_get_by_ofw_name(dev, 0, "ephy", &rst_ephy) != 0) if (phy_node == 0 || hwreset_get_by_ofw_idx(dev, phy_node, 0, &rst_ephy) != 0) rst_ephy = NULL; error = clk_get_by_ofw_name(dev, 0, "stmmaceth", &clk_ahb); if (error != 0) error = clk_get_by_ofw_name(dev, 0, "ahb", &clk_ahb); if (error != 0) { device_printf(dev, "cannot get ahb clock\n"); goto fail; } if (clk_get_by_ofw_name(dev, 0, "ephy", &clk_ephy) != 0) if (phy_node == 0 || clk_get_by_ofw_index(dev, phy_node, 0, &clk_ephy) != 0) clk_ephy = NULL; if (OF_hasprop(node, "syscon") && syscon_get_by_ofw_property(dev, node, "syscon", &sc->syscon) != 0) { device_printf(dev, "cannot get syscon driver handle\n"); goto fail; } /* Configure PHY for MII or RGMII mode */ if (awg_setup_phy(dev) != 0) goto fail; /* Enable clocks */ error = clk_enable(clk_ahb); if (error != 0) { device_printf(dev, "cannot enable ahb clock\n"); goto fail; } if (clk_ephy != NULL) { error = clk_enable(clk_ephy); if (error != 0) { device_printf(dev, "cannot enable ephy clock\n"); goto fail; } } /* De-assert reset */ error = hwreset_deassert(rst_ahb); if (error != 0) { device_printf(dev, "cannot de-assert ahb reset\n"); goto fail; } if (rst_ephy != NULL) { /* * The ephy reset is left de-asserted by U-Boot. Assert it * here to make sure that we're in a known good state going * into the PHY reset. */ hwreset_assert(rst_ephy); error = hwreset_deassert(rst_ephy); if (error != 0) { device_printf(dev, "cannot de-assert ephy reset\n"); goto fail; } } /* Enable PHY regulator if applicable */ if (regulator_get_by_ofw_property(dev, 0, "phy-supply", ®) == 0) { error = regulator_enable(reg); if (error != 0) { device_printf(dev, "cannot enable PHY regulator\n"); goto fail; } } /* Determine MDC clock divide ratio based on AHB clock */ error = clk_get_freq(clk_ahb, &freq); if (error != 0) { device_printf(dev, "cannot get AHB clock frequency\n"); goto fail; } div = freq / MDIO_FREQ; if (div <= 16) sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_16; else if (div <= 32) sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_32; else if (div <= 64) sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_64; else if (div <= 128) sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_128; else { device_printf(dev, "cannot determine MDC clock divide ratio\n"); error = ENXIO; goto fail; } if (bootverbose) device_printf(dev, "AHB frequency %ju Hz, MDC div: 0x%x\n", (uintmax_t)freq, sc->mdc_div_ratio_m); return (0); fail: if (reg != NULL) regulator_release(reg); if (clk_ephy != NULL) clk_release(clk_ephy); if (clk_ahb != NULL) clk_release(clk_ahb); if (rst_ephy != NULL) hwreset_release(rst_ephy); if (rst_ahb != NULL) hwreset_release(rst_ahb); return (error); } #ifdef AWG_DEBUG static void awg_dump_regs(device_t dev) { static const struct { const char *name; u_int reg; } regs[] = { { "BASIC_CTL_0", EMAC_BASIC_CTL_0 }, { "BASIC_CTL_1", EMAC_BASIC_CTL_1 }, { "INT_STA", EMAC_INT_STA }, { "INT_EN", EMAC_INT_EN }, { "TX_CTL_0", EMAC_TX_CTL_0 }, { "TX_CTL_1", EMAC_TX_CTL_1 }, { "TX_FLOW_CTL", EMAC_TX_FLOW_CTL }, { "TX_DMA_LIST", EMAC_TX_DMA_LIST }, { "RX_CTL_0", EMAC_RX_CTL_0 }, { "RX_CTL_1", EMAC_RX_CTL_1 }, { "RX_DMA_LIST", EMAC_RX_DMA_LIST }, { "RX_FRM_FLT", EMAC_RX_FRM_FLT }, { "RX_HASH_0", EMAC_RX_HASH_0 }, { "RX_HASH_1", EMAC_RX_HASH_1 }, { "MII_CMD", EMAC_MII_CMD }, { "ADDR_HIGH0", EMAC_ADDR_HIGH(0) }, { "ADDR_LOW0", EMAC_ADDR_LOW(0) }, { "TX_DMA_STA", EMAC_TX_DMA_STA }, { "TX_DMA_CUR_DESC", EMAC_TX_DMA_CUR_DESC }, { "TX_DMA_CUR_BUF", EMAC_TX_DMA_CUR_BUF }, { "RX_DMA_STA", EMAC_RX_DMA_STA }, { "RX_DMA_CUR_DESC", EMAC_RX_DMA_CUR_DESC }, { "RX_DMA_CUR_BUF", EMAC_RX_DMA_CUR_BUF }, { "RGMII_STA", EMAC_RGMII_STA }, }; struct awg_softc *sc; unsigned int n; sc = device_get_softc(dev); for (n = 0; n < nitems(regs); n++) device_printf(dev, " %-20s %08x\n", regs[n].name, RD4(sc, regs[n].reg)); } #endif #define GPIO_ACTIVE_LOW 1 static int awg_phy_reset(device_t dev) { pcell_t gpio_prop[4], delay_prop[3]; phandle_t node, gpio_node; device_t gpio; uint32_t pin, flags; uint32_t pin_value; node = ofw_bus_get_node(dev); if (OF_getencprop(node, "allwinner,reset-gpio", gpio_prop, sizeof(gpio_prop)) <= 0) return (0); if (OF_getencprop(node, "allwinner,reset-delays-us", delay_prop, sizeof(delay_prop)) <= 0) return (ENXIO); gpio_node = OF_node_from_xref(gpio_prop[0]); if ((gpio = OF_device_from_xref(gpio_prop[0])) == NULL) return (ENXIO); if (GPIO_MAP_GPIOS(gpio, node, gpio_node, nitems(gpio_prop) - 1, gpio_prop + 1, &pin, &flags) != 0) return (ENXIO); pin_value = GPIO_PIN_LOW; if (OF_hasprop(node, "allwinner,reset-active-low")) pin_value = GPIO_PIN_HIGH; if (flags & GPIO_ACTIVE_LOW) pin_value = !pin_value; GPIO_PIN_SETFLAGS(gpio, pin, GPIO_PIN_OUTPUT); GPIO_PIN_SET(gpio, pin, pin_value); DELAY(delay_prop[0]); GPIO_PIN_SET(gpio, pin, !pin_value); DELAY(delay_prop[1]); GPIO_PIN_SET(gpio, pin, pin_value); DELAY(delay_prop[2]); return (0); } static int awg_reset(device_t dev) { struct awg_softc *sc; int retry; sc = device_get_softc(dev); /* Reset PHY if necessary */ if (awg_phy_reset(dev) != 0) { device_printf(dev, "failed to reset PHY\n"); return (ENXIO); } /* Soft reset all registers and logic */ WR4(sc, EMAC_BASIC_CTL_1, BASIC_CTL_SOFT_RST); /* Wait for soft reset bit to self-clear */ for (retry = SOFT_RST_RETRY; retry > 0; retry--) { if ((RD4(sc, EMAC_BASIC_CTL_1) & BASIC_CTL_SOFT_RST) == 0) break; DELAY(10); } if (retry == 0) { device_printf(dev, "soft reset timed out\n"); #ifdef AWG_DEBUG awg_dump_regs(dev); #endif return (ETIMEDOUT); } return (0); } /* * Stats */ static void awg_tick(void *softc) { struct awg_softc *sc; struct mii_data *mii; if_t ifp; int link; sc = softc; ifp = sc->ifp; mii = device_get_softc(sc->miibus); AWG_ASSERT_LOCKED(sc); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; link = sc->link; mii_tick(mii); if (sc->link && !link) awg_start_locked(sc); callout_reset(&sc->stat_ch, hz, awg_tick, sc); } /* * Probe/attach functions */ static int awg_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner Gigabit Ethernet"); return (BUS_PROBE_DEFAULT); } static int awg_attach(device_t dev) { uint8_t eaddr[ETHER_ADDR_LEN]; struct awg_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; if (bus_alloc_resources(dev, awg_spec, sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); return (ENXIO); } mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->stat_ch, &sc->mtx, 0); /* Setup clocks and regulators */ error = awg_setup_extres(dev); if (error != 0) return (error); /* Read MAC address before resetting the chip */ awg_get_eaddr(dev, eaddr); /* Soft reset EMAC core */ error = awg_reset(dev); if (error != 0) return (error); /* Setup DMA descriptors */ error = awg_setup_dma(dev); if (error != 0) return (error); /* Install interrupt handler */ error = bus_setup_intr(dev, sc->res[_RES_IRQ], INTR_TYPE_NET | INTR_MPSAFE, NULL, awg_intr, sc, &sc->ih); if (error != 0) { device_printf(dev, "cannot setup interrupt handler\n"); return (error); } /* Setup ethernet interface */ sc->ifp = if_alloc(IFT_ETHER); if_setsoftc(sc->ifp, sc); if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev)); if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setstartfn(sc->ifp, awg_start); if_setioctlfn(sc->ifp, awg_ioctl); if_setinitfn(sc->ifp, awg_init); if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1); if_setsendqready(sc->ifp); if_sethwassist(sc->ifp, CSUM_IP | CSUM_UDP | CSUM_TCP); if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM); if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp)); #ifdef DEVICE_POLLING if_setcapabilitiesbit(sc->ifp, IFCAP_POLLING, 0); #endif /* Attach MII driver */ error = mii_attach(dev, &sc->miibus, sc->ifp, awg_media_change, awg_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE); if (error != 0) { device_printf(dev, "cannot attach PHY\n"); return (error); } /* Attach ethernet interface */ ether_ifattach(sc->ifp, eaddr); return (0); } static device_method_t awg_methods[] = { /* Device interface */ DEVMETHOD(device_probe, awg_probe), DEVMETHOD(device_attach, awg_attach), /* MII interface */ DEVMETHOD(miibus_readreg, awg_miibus_readreg), DEVMETHOD(miibus_writereg, awg_miibus_writereg), DEVMETHOD(miibus_statchg, awg_miibus_statchg), DEVMETHOD_END }; static driver_t awg_driver = { "awg", awg_methods, sizeof(struct awg_softc), }; DRIVER_MODULE(awg, simplebus, awg_driver, 0, 0); DRIVER_MODULE(miibus, awg, miibus_driver, 0, 0); MODULE_DEPEND(awg, ether, 1, 1, 1); MODULE_DEPEND(awg, miibus, 1, 1, 1); MODULE_DEPEND(awg, aw_sid, 1, 1, 1); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/arm/allwinner/if_emac.c b/sys/arm/allwinner/if_emac.c index c704e7830cbc..f581d361d3d9 100644 --- a/sys/arm/allwinner/if_emac.c +++ b/sys/arm/allwinner/if_emac.c @@ -1,1195 +1,1195 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013 Ganbold Tsagaankhuu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* A10/A20 EMAC driver */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #include #include #include #include #include #include #include #include -#include +#include #include "miibus_if.h" #include "gpio_if.h" #include "a10_sramc.h" struct emac_softc { if_t emac_ifp; device_t emac_dev; device_t emac_miibus; bus_space_handle_t emac_handle; bus_space_tag_t emac_tag; struct resource *emac_res; struct resource *emac_irq; void *emac_intrhand; clk_t emac_clk; int emac_if_flags; struct mtx emac_mtx; struct callout emac_tick_ch; int emac_watchdog_timer; int emac_rx_process_limit; int emac_link; uint32_t emac_fifo_mask; }; static int emac_probe(device_t); static int emac_attach(device_t); static int emac_detach(device_t); static int emac_shutdown(device_t); static int emac_suspend(device_t); static int emac_resume(device_t); static int emac_sys_setup(struct emac_softc *); static void emac_reset(struct emac_softc *); static void emac_init_locked(struct emac_softc *); static void emac_start_locked(if_t); static void emac_init(void *); static void emac_stop_locked(struct emac_softc *); static void emac_intr(void *); static int emac_ioctl(if_t, u_long, caddr_t); static void emac_rxeof(struct emac_softc *, int); static void emac_txeof(struct emac_softc *, uint32_t); static int emac_miibus_readreg(device_t, int, int); static int emac_miibus_writereg(device_t, int, int, int); static void emac_miibus_statchg(device_t); static int emac_ifmedia_upd(if_t); static void emac_ifmedia_sts(if_t, struct ifmediareq *); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); static int sysctl_hw_emac_proc_limit(SYSCTL_HANDLER_ARGS); #define EMAC_READ_REG(sc, reg) \ bus_space_read_4(sc->emac_tag, sc->emac_handle, reg) #define EMAC_WRITE_REG(sc, reg, val) \ bus_space_write_4(sc->emac_tag, sc->emac_handle, reg, val) static int emac_sys_setup(struct emac_softc *sc) { int error; /* Activate EMAC clock. */ error = clk_get_by_ofw_index(sc->emac_dev, 0, 0, &sc->emac_clk); if (error != 0) { device_printf(sc->emac_dev, "cannot get clock\n"); return (error); } error = clk_enable(sc->emac_clk); if (error != 0) { device_printf(sc->emac_dev, "cannot enable clock\n"); return (error); } /* Map sram. */ a10_map_to_emac(); return (0); } static void emac_get_hwaddr(struct emac_softc *sc, uint8_t *hwaddr) { uint32_t val0, val1, rnd; u_char rootkey[16]; size_t rootkey_size; /* * Try to get MAC address from running hardware. * If there is something non-zero there just use it. * * Otherwise set the address to a convenient locally assigned address, * using the SID rootkey. * This is was uboot does so we end up with the same mac as if uboot * did set it. * If we can't get the root key, generate a random one, * 'bsd' + random 24 low-order bits. 'b' is 0x62, which has the locally * assigned bit set, and the broadcast/multicast bit clear. */ val0 = EMAC_READ_REG(sc, EMAC_MAC_A0); val1 = EMAC_READ_REG(sc, EMAC_MAC_A1); if ((val0 | val1) != 0 && (val0 | val1) != 0xffffff) { hwaddr[0] = (val1 >> 16) & 0xff; hwaddr[1] = (val1 >> 8) & 0xff; hwaddr[2] = (val1 >> 0) & 0xff; hwaddr[3] = (val0 >> 16) & 0xff; hwaddr[4] = (val0 >> 8) & 0xff; hwaddr[5] = (val0 >> 0) & 0xff; } else { rootkey_size = sizeof(rootkey); if (aw_sid_get_fuse(AW_SID_FUSE_ROOTKEY, rootkey, &rootkey_size) == 0) { hwaddr[0] = 0x2; hwaddr[1] = rootkey[3]; hwaddr[2] = rootkey[12]; hwaddr[3] = rootkey[13]; hwaddr[4] = rootkey[14]; hwaddr[5] = rootkey[15]; } else { rnd = arc4random() & 0x00ffffff; hwaddr[0] = 'b'; hwaddr[1] = 's'; hwaddr[2] = 'd'; hwaddr[3] = (rnd >> 16) & 0xff; hwaddr[4] = (rnd >> 8) & 0xff; hwaddr[5] = (rnd >> 0) & 0xff; } } if (bootverbose) printf("MAC address: %s\n", ether_sprintf(hwaddr)); } static u_int emac_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint32_t h, *hashes = arg; h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; hashes[h >> 5] |= 1 << (h & 0x1f); return (1); } static void emac_set_rx_mode(struct emac_softc *sc) { if_t ifp; uint32_t hashes[2]; uint32_t rcr = 0; EMAC_ASSERT_LOCKED(sc); ifp = sc->emac_ifp; rcr = EMAC_READ_REG(sc, EMAC_RX_CTL); /* Unicast packet and DA filtering */ rcr |= EMAC_RX_UCAD; rcr |= EMAC_RX_DAF; hashes[0] = 0; hashes[1] = 0; if (if_getflags(ifp) & IFF_ALLMULTI) { hashes[0] = 0xffffffff; hashes[1] = 0xffffffff; } else if_foreach_llmaddr(ifp, emac_hash_maddr, hashes); rcr |= EMAC_RX_MCO; rcr |= EMAC_RX_MHF; EMAC_WRITE_REG(sc, EMAC_RX_HASH0, hashes[0]); EMAC_WRITE_REG(sc, EMAC_RX_HASH1, hashes[1]); if (if_getflags(ifp) & IFF_BROADCAST) { rcr |= EMAC_RX_BCO; rcr |= EMAC_RX_MCO; } if (if_getflags(ifp) & IFF_PROMISC) rcr |= EMAC_RX_PA; else rcr |= EMAC_RX_UCAD; EMAC_WRITE_REG(sc, EMAC_RX_CTL, rcr); } static void emac_reset(struct emac_softc *sc) { EMAC_WRITE_REG(sc, EMAC_CTL, 0); DELAY(200); EMAC_WRITE_REG(sc, EMAC_CTL, 1); DELAY(200); } static void emac_drain_rxfifo(struct emac_softc *sc) { while (EMAC_READ_REG(sc, EMAC_RX_FBC) > 0) (void)EMAC_READ_REG(sc, EMAC_RX_IO_DATA); } static void emac_txeof(struct emac_softc *sc, uint32_t status) { if_t ifp; EMAC_ASSERT_LOCKED(sc); ifp = sc->emac_ifp; status &= (EMAC_TX_FIFO0 | EMAC_TX_FIFO1); sc->emac_fifo_mask &= ~status; if (status == (EMAC_TX_FIFO0 | EMAC_TX_FIFO1)) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 2); else if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); /* Unarm watchdog timer if no TX */ sc->emac_watchdog_timer = 0; } static void emac_rxeof(struct emac_softc *sc, int count) { if_t ifp; struct mbuf *m, *m0; uint32_t reg_val, rxcount; int16_t len; uint16_t status; int i; ifp = sc->emac_ifp; for (; count > 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0; count--) { /* * Race warning: The first packet might arrive with * the interrupts disabled, but the second will fix */ rxcount = EMAC_READ_REG(sc, EMAC_RX_FBC); if (!rxcount) { /* Had one stuck? */ rxcount = EMAC_READ_REG(sc, EMAC_RX_FBC); if (!rxcount) return; } /* Check packet header */ reg_val = EMAC_READ_REG(sc, EMAC_RX_IO_DATA); if (reg_val != EMAC_PACKET_HEADER) { /* Packet header is wrong */ if (bootverbose) if_printf(ifp, "wrong packet header\n"); /* Disable RX */ reg_val = EMAC_READ_REG(sc, EMAC_CTL); reg_val &= ~EMAC_CTL_RX_EN; EMAC_WRITE_REG(sc, EMAC_CTL, reg_val); /* Flush RX FIFO */ reg_val = EMAC_READ_REG(sc, EMAC_RX_CTL); reg_val |= EMAC_RX_FLUSH_FIFO; EMAC_WRITE_REG(sc, EMAC_RX_CTL, reg_val); for (i = 100; i > 0; i--) { DELAY(100); if ((EMAC_READ_REG(sc, EMAC_RX_CTL) & EMAC_RX_FLUSH_FIFO) == 0) break; } if (i == 0) { device_printf(sc->emac_dev, "flush FIFO timeout\n"); /* Reinitialize controller */ emac_init_locked(sc); return; } /* Enable RX */ reg_val = EMAC_READ_REG(sc, EMAC_CTL); reg_val |= EMAC_CTL_RX_EN; EMAC_WRITE_REG(sc, EMAC_CTL, reg_val); return; } /* Get packet size and status */ reg_val = EMAC_READ_REG(sc, EMAC_RX_IO_DATA); len = reg_val & 0xffff; status = (reg_val >> 16) & 0xffff; if (len < 64 || (status & EMAC_PKT_OK) == 0) { if (bootverbose) if_printf(ifp, "bad packet: len = %i status = %i\n", len, status); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); emac_drain_rxfifo(sc); continue; } #if 0 if (status & (EMAC_CRCERR | EMAC_LENERR)) { good_packet = 0; if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); if (status & EMAC_CRCERR) if_printf(ifp, "crc error\n"); if (status & EMAC_LENERR) if_printf(ifp, "length error\n"); } #endif m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { emac_drain_rxfifo(sc); return; } m->m_len = m->m_pkthdr.len = MCLBYTES; /* Copy entire frame to mbuf first. */ bus_space_read_multi_4(sc->emac_tag, sc->emac_handle, EMAC_RX_IO_DATA, mtod(m, uint32_t *), roundup2(len, 4) / 4); m->m_pkthdr.rcvif = ifp; m->m_len = m->m_pkthdr.len = len - ETHER_CRC_LEN; /* * Emac controller needs strict alignment, so to avoid * copying over an entire frame to align, we allocate * a new mbuf and copy ethernet header + IP header to * the new mbuf. The new mbuf is prepended into the * existing mbuf chain. */ if (m->m_len <= (MHLEN - ETHER_HDR_LEN)) { bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); m->m_data += ETHER_HDR_LEN; } else if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN) && m->m_len > (MHLEN - ETHER_HDR_LEN)) { MGETHDR(m0, M_NOWAIT, MT_DATA); if (m0 != NULL) { len = ETHER_HDR_LEN + m->m_pkthdr.l2hlen; bcopy(m->m_data, m0->m_data, len); m->m_data += len; m->m_len -= len; m0->m_len = len; M_MOVE_PKTHDR(m0, m); m0->m_next = m; m = m0; } else { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); m_freem(m); m = NULL; continue; } } else if (m->m_len > EMAC_MAC_MAXF) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); m_freem(m); m = NULL; continue; } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); EMAC_UNLOCK(sc); if_input(ifp, m); EMAC_LOCK(sc); } } static void emac_watchdog(struct emac_softc *sc) { if_t ifp; EMAC_ASSERT_LOCKED(sc); if (sc->emac_watchdog_timer == 0 || --sc->emac_watchdog_timer) return; ifp = sc->emac_ifp; if (sc->emac_link == 0) { if (bootverbose) if_printf(sc->emac_ifp, "watchdog timeout " "(missed link)\n"); } else if_printf(sc->emac_ifp, "watchdog timeout -- resetting\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); emac_init_locked(sc); if (!if_sendq_empty(ifp)) emac_start_locked(ifp); } static void emac_tick(void *arg) { struct emac_softc *sc; struct mii_data *mii; sc = (struct emac_softc *)arg; mii = device_get_softc(sc->emac_miibus); mii_tick(mii); emac_watchdog(sc); callout_reset(&sc->emac_tick_ch, hz, emac_tick, sc); } static void emac_init(void *xcs) { struct emac_softc *sc; sc = (struct emac_softc *)xcs; EMAC_LOCK(sc); emac_init_locked(sc); EMAC_UNLOCK(sc); } static void emac_init_locked(struct emac_softc *sc) { if_t ifp; struct mii_data *mii; uint32_t reg_val; uint8_t *eaddr; EMAC_ASSERT_LOCKED(sc); ifp = sc->emac_ifp; if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) return; /* Flush RX FIFO */ reg_val = EMAC_READ_REG(sc, EMAC_RX_CTL); reg_val |= EMAC_RX_FLUSH_FIFO; EMAC_WRITE_REG(sc, EMAC_RX_CTL, reg_val); DELAY(1); /* Soft reset MAC */ reg_val = EMAC_READ_REG(sc, EMAC_MAC_CTL0); reg_val &= (~EMAC_MAC_CTL0_SOFT_RST); EMAC_WRITE_REG(sc, EMAC_MAC_CTL0, reg_val); /* Set MII clock */ reg_val = EMAC_READ_REG(sc, EMAC_MAC_MCFG); reg_val &= (~(0xf << 2)); reg_val |= (0xd << 2); EMAC_WRITE_REG(sc, EMAC_MAC_MCFG, reg_val); /* Clear RX counter */ EMAC_WRITE_REG(sc, EMAC_RX_FBC, 0); /* Disable all interrupt and clear interrupt status */ EMAC_WRITE_REG(sc, EMAC_INT_CTL, 0); reg_val = EMAC_READ_REG(sc, EMAC_INT_STA); EMAC_WRITE_REG(sc, EMAC_INT_STA, reg_val); DELAY(1); /* Set up TX */ reg_val = EMAC_READ_REG(sc, EMAC_TX_MODE); reg_val |= EMAC_TX_AB_M; reg_val &= EMAC_TX_TM; EMAC_WRITE_REG(sc, EMAC_TX_MODE, reg_val); /* Set up RX */ reg_val = EMAC_READ_REG(sc, EMAC_RX_CTL); reg_val |= EMAC_RX_SETUP; reg_val &= EMAC_RX_TM; EMAC_WRITE_REG(sc, EMAC_RX_CTL, reg_val); /* Set up MAC CTL0. */ reg_val = EMAC_READ_REG(sc, EMAC_MAC_CTL0); reg_val |= EMAC_MAC_CTL0_SETUP; EMAC_WRITE_REG(sc, EMAC_MAC_CTL0, reg_val); /* Set up MAC CTL1. */ reg_val = EMAC_READ_REG(sc, EMAC_MAC_CTL1); reg_val |= EMAC_MAC_CTL1_SETUP; EMAC_WRITE_REG(sc, EMAC_MAC_CTL1, reg_val); /* Set up IPGT */ EMAC_WRITE_REG(sc, EMAC_MAC_IPGT, EMAC_MAC_IPGT_FD); /* Set up IPGR */ EMAC_WRITE_REG(sc, EMAC_MAC_IPGR, EMAC_MAC_NBTB_IPG2 | (EMAC_MAC_NBTB_IPG1 << 8)); /* Set up Collison window */ EMAC_WRITE_REG(sc, EMAC_MAC_CLRT, EMAC_MAC_RM | (EMAC_MAC_CW << 8)); /* Set up Max Frame Length */ EMAC_WRITE_REG(sc, EMAC_MAC_MAXF, EMAC_MAC_MFL); /* Setup ethernet address */ eaddr = if_getlladdr(ifp); EMAC_WRITE_REG(sc, EMAC_MAC_A1, eaddr[0] << 16 | eaddr[1] << 8 | eaddr[2]); EMAC_WRITE_REG(sc, EMAC_MAC_A0, eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); /* Setup rx filter */ emac_set_rx_mode(sc); /* Enable RX/TX0/RX Hlevel interrupt */ reg_val = EMAC_READ_REG(sc, EMAC_INT_CTL); reg_val |= EMAC_INT_EN; EMAC_WRITE_REG(sc, EMAC_INT_CTL, reg_val); if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); sc->emac_link = 0; /* Switch to the current media. */ mii = device_get_softc(sc->emac_miibus); mii_mediachg(mii); callout_reset(&sc->emac_tick_ch, hz, emac_tick, sc); } static void emac_start(if_t ifp) { struct emac_softc *sc; sc = if_getsoftc(ifp); EMAC_LOCK(sc); emac_start_locked(ifp); EMAC_UNLOCK(sc); } static void emac_start_locked(if_t ifp) { struct emac_softc *sc; struct mbuf *m, *m0; uint32_t fifo, reg; sc = if_getsoftc(ifp); if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) return; if (sc->emac_fifo_mask == (EMAC_TX_FIFO0 | EMAC_TX_FIFO1)) return; if (sc->emac_link == 0) return; m = if_dequeue(ifp); if (m == NULL) return; /* Select channel */ if (sc->emac_fifo_mask & EMAC_TX_FIFO0) fifo = 1; else fifo = 0; sc->emac_fifo_mask |= (1 << fifo); if (sc->emac_fifo_mask == (EMAC_TX_FIFO0 | EMAC_TX_FIFO1)) if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); EMAC_WRITE_REG(sc, EMAC_TX_INS, fifo); /* * Emac controller wants 4 byte aligned TX buffers. * We have to copy pretty much all the time. */ if (m->m_next != NULL || (mtod(m, uintptr_t) & 3) != 0) { m0 = m_defrag(m, M_NOWAIT); if (m0 == NULL) { m_freem(m); m = NULL; return; } m = m0; } /* Write data */ bus_space_write_multi_4(sc->emac_tag, sc->emac_handle, EMAC_TX_IO_DATA, mtod(m, uint32_t *), roundup2(m->m_len, 4) / 4); /* Send the data lengh. */ reg = (fifo == 0) ? EMAC_TX_PL0 : EMAC_TX_PL1; EMAC_WRITE_REG(sc, reg, m->m_len); /* Start translate from fifo to phy. */ reg = (fifo == 0) ? EMAC_TX_CTL0 : EMAC_TX_CTL1; EMAC_WRITE_REG(sc, reg, EMAC_READ_REG(sc, reg) | 1); /* Set timeout */ sc->emac_watchdog_timer = 5; /* Data have been sent to hardware, it is okay to free the mbuf now. */ BPF_MTAP(ifp, m); m_freem(m); } static void emac_stop_locked(struct emac_softc *sc) { if_t ifp; uint32_t reg_val; EMAC_ASSERT_LOCKED(sc); ifp = sc->emac_ifp; if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); sc->emac_link = 0; /* Disable all interrupt and clear interrupt status */ EMAC_WRITE_REG(sc, EMAC_INT_CTL, 0); reg_val = EMAC_READ_REG(sc, EMAC_INT_STA); EMAC_WRITE_REG(sc, EMAC_INT_STA, reg_val); /* Disable RX/TX */ reg_val = EMAC_READ_REG(sc, EMAC_CTL); reg_val &= ~(EMAC_CTL_RST | EMAC_CTL_TX_EN | EMAC_CTL_RX_EN); EMAC_WRITE_REG(sc, EMAC_CTL, reg_val); callout_stop(&sc->emac_tick_ch); } static void emac_intr(void *arg) { struct emac_softc *sc; if_t ifp; uint32_t reg_val; sc = (struct emac_softc *)arg; EMAC_LOCK(sc); /* Disable all interrupts */ EMAC_WRITE_REG(sc, EMAC_INT_CTL, 0); /* Get EMAC interrupt status */ reg_val = EMAC_READ_REG(sc, EMAC_INT_STA); /* Clear ISR status */ EMAC_WRITE_REG(sc, EMAC_INT_STA, reg_val); /* Received incoming packet */ if (reg_val & EMAC_INT_STA_RX) emac_rxeof(sc, sc->emac_rx_process_limit); /* Transmit Interrupt check */ if (reg_val & EMAC_INT_STA_TX) { emac_txeof(sc, reg_val); ifp = sc->emac_ifp; if (!if_sendq_empty(ifp)) emac_start_locked(ifp); } /* Re-enable interrupt mask */ reg_val = EMAC_READ_REG(sc, EMAC_INT_CTL); reg_val |= EMAC_INT_EN; EMAC_WRITE_REG(sc, EMAC_INT_CTL, reg_val); EMAC_UNLOCK(sc); } static int emac_ioctl(if_t ifp, u_long command, caddr_t data) { struct emac_softc *sc; struct mii_data *mii; struct ifreq *ifr; int error = 0; sc = if_getsoftc(ifp); ifr = (struct ifreq *)data; switch (command) { case SIOCSIFFLAGS: EMAC_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { if ((if_getflags(ifp) ^ sc->emac_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) emac_set_rx_mode(sc); } else emac_init_locked(sc); } else { if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) emac_stop_locked(sc); } sc->emac_if_flags = if_getflags(ifp); EMAC_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: EMAC_LOCK(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { emac_set_rx_mode(sc); } EMAC_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->emac_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static int emac_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "allwinner,sun4i-a10-emac")) return (ENXIO); device_set_desc(dev, "A10/A20 EMAC ethernet controller"); return (BUS_PROBE_DEFAULT); } static int emac_detach(device_t dev) { struct emac_softc *sc; sc = device_get_softc(dev); if_setdrvflagbits(sc->emac_ifp, 0, IFF_DRV_RUNNING); if (device_is_attached(dev)) { ether_ifdetach(sc->emac_ifp); EMAC_LOCK(sc); emac_stop_locked(sc); EMAC_UNLOCK(sc); callout_drain(&sc->emac_tick_ch); } if (sc->emac_intrhand != NULL) bus_teardown_intr(sc->emac_dev, sc->emac_irq, sc->emac_intrhand); if (sc->emac_miibus != NULL) { device_delete_child(sc->emac_dev, sc->emac_miibus); bus_generic_detach(sc->emac_dev); } if (sc->emac_clk != NULL) clk_disable(sc->emac_clk); if (sc->emac_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->emac_res); if (sc->emac_irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->emac_irq); if (sc->emac_ifp != NULL) if_free(sc->emac_ifp); if (mtx_initialized(&sc->emac_mtx)) mtx_destroy(&sc->emac_mtx); return (0); } static int emac_shutdown(device_t dev) { return (emac_suspend(dev)); } static int emac_suspend(device_t dev) { struct emac_softc *sc; if_t ifp; sc = device_get_softc(dev); EMAC_LOCK(sc); ifp = sc->emac_ifp; if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) emac_stop_locked(sc); EMAC_UNLOCK(sc); return (0); } static int emac_resume(device_t dev) { struct emac_softc *sc; if_t ifp; sc = device_get_softc(dev); EMAC_LOCK(sc); ifp = sc->emac_ifp; if ((if_getflags(ifp) & IFF_UP) != 0) { if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); emac_init_locked(sc); } EMAC_UNLOCK(sc); return (0); } static int emac_attach(device_t dev) { struct emac_softc *sc; if_t ifp; int error, rid; uint8_t eaddr[ETHER_ADDR_LEN]; sc = device_get_softc(dev); sc->emac_dev = dev; error = 0; mtx_init(&sc->emac_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->emac_tick_ch, &sc->emac_mtx, 0); rid = 0; sc->emac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->emac_res == NULL) { device_printf(dev, "unable to map memory\n"); error = ENXIO; goto fail; } sc->emac_tag = rman_get_bustag(sc->emac_res); sc->emac_handle = rman_get_bushandle(sc->emac_res); rid = 0; sc->emac_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->emac_irq == NULL) { device_printf(dev, "cannot allocate IRQ resources.\n"); error = ENXIO; goto fail; } /* Create device sysctl node. */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->emac_rx_process_limit, 0, sysctl_hw_emac_proc_limit, "I", "max number of Rx events to process"); sc->emac_rx_process_limit = EMAC_PROC_DEFAULT; error = resource_int_value(device_get_name(dev), device_get_unit(dev), "process_limit", &sc->emac_rx_process_limit); if (error == 0) { if (sc->emac_rx_process_limit < EMAC_PROC_MIN || sc->emac_rx_process_limit > EMAC_PROC_MAX) { device_printf(dev, "process_limit value out of range; " "using default: %d\n", EMAC_PROC_DEFAULT); sc->emac_rx_process_limit = EMAC_PROC_DEFAULT; } } /* Setup EMAC */ error = emac_sys_setup(sc); if (error != 0) goto fail; emac_reset(sc); ifp = sc->emac_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "unable to allocate ifp\n"); error = ENOSPC; goto fail; } if_setsoftc(ifp, sc); /* Setup MII */ error = mii_attach(dev, &sc->emac_miibus, ifp, emac_ifmedia_upd, emac_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (error != 0) { device_printf(dev, "PHY probe failed\n"); goto fail; } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setstartfn(ifp, emac_start); if_setioctlfn(ifp, emac_ioctl); if_setinitfn(ifp, emac_init); if_setsendqlen(ifp, IFQ_MAXLEN); /* Get MAC address */ emac_get_hwaddr(sc, eaddr); ether_ifattach(ifp, eaddr); /* VLAN capability setup. */ if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); if_setcapenable(ifp, if_getcapabilities(ifp)); /* Tell the upper layer we support VLAN over-sized frames. */ if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); error = bus_setup_intr(dev, sc->emac_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, emac_intr, sc, &sc->emac_intrhand); if (error != 0) { device_printf(dev, "could not set up interrupt handler.\n"); ether_ifdetach(ifp); goto fail; } fail: if (error != 0) emac_detach(dev); return (error); } static bool emac_miibus_iowait(struct emac_softc *sc) { uint32_t timeout; for (timeout = 100; timeout != 0; --timeout) { DELAY(100); if ((EMAC_READ_REG(sc, EMAC_MAC_MIND) & 0x1) == 0) return (true); } return (false); } /* * The MII bus interface */ static int emac_miibus_readreg(device_t dev, int phy, int reg) { struct emac_softc *sc; int rval; sc = device_get_softc(dev); /* Issue phy address and reg */ EMAC_WRITE_REG(sc, EMAC_MAC_MADR, (phy << 8) | reg); /* Pull up the phy io line */ EMAC_WRITE_REG(sc, EMAC_MAC_MCMD, 0x1); if (!emac_miibus_iowait(sc)) { device_printf(dev, "timeout waiting for mii read\n"); return (0); } /* Push down the phy io line */ EMAC_WRITE_REG(sc, EMAC_MAC_MCMD, 0x0); /* Read data */ rval = EMAC_READ_REG(sc, EMAC_MAC_MRDD); return (rval); } static int emac_miibus_writereg(device_t dev, int phy, int reg, int data) { struct emac_softc *sc; sc = device_get_softc(dev); /* Issue phy address and reg */ EMAC_WRITE_REG(sc, EMAC_MAC_MADR, (phy << 8) | reg); /* Write data */ EMAC_WRITE_REG(sc, EMAC_MAC_MWTD, data); /* Pull up the phy io line */ EMAC_WRITE_REG(sc, EMAC_MAC_MCMD, 0x1); if (!emac_miibus_iowait(sc)) { device_printf(dev, "timeout waiting for mii write\n"); return (0); } /* Push down the phy io line */ EMAC_WRITE_REG(sc, EMAC_MAC_MCMD, 0x0); return (0); } static void emac_miibus_statchg(device_t dev) { struct emac_softc *sc; struct mii_data *mii; if_t ifp; uint32_t reg_val; sc = device_get_softc(dev); mii = device_get_softc(sc->emac_miibus); ifp = sc->emac_ifp; if (mii == NULL || ifp == NULL || (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; sc->emac_link = 0; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->emac_link = 1; break; default: break; } } /* Program MACs with resolved speed/duplex. */ if (sc->emac_link != 0) { reg_val = EMAC_READ_REG(sc, EMAC_MAC_IPGT); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { reg_val &= ~EMAC_MAC_IPGT_HD; reg_val |= EMAC_MAC_IPGT_FD; } else { reg_val &= ~EMAC_MAC_IPGT_FD; reg_val |= EMAC_MAC_IPGT_HD; } EMAC_WRITE_REG(sc, EMAC_MAC_IPGT, reg_val); /* Enable RX/TX */ reg_val = EMAC_READ_REG(sc, EMAC_CTL); reg_val |= EMAC_CTL_RST | EMAC_CTL_TX_EN | EMAC_CTL_RX_EN; EMAC_WRITE_REG(sc, EMAC_CTL, reg_val); } else { /* Disable RX/TX */ reg_val = EMAC_READ_REG(sc, EMAC_CTL); reg_val &= ~(EMAC_CTL_RST | EMAC_CTL_TX_EN | EMAC_CTL_RX_EN); EMAC_WRITE_REG(sc, EMAC_CTL, reg_val); } } static int emac_ifmedia_upd(if_t ifp) { struct emac_softc *sc; struct mii_data *mii; struct mii_softc *miisc; int error; sc = if_getsoftc(ifp); mii = device_get_softc(sc->emac_miibus); EMAC_LOCK(sc); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); EMAC_UNLOCK(sc); return (error); } static void emac_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct emac_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = device_get_softc(sc->emac_miibus); EMAC_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; EMAC_UNLOCK(sc); } static device_method_t emac_methods[] = { /* Device interface */ DEVMETHOD(device_probe, emac_probe), DEVMETHOD(device_attach, emac_attach), DEVMETHOD(device_detach, emac_detach), DEVMETHOD(device_shutdown, emac_shutdown), DEVMETHOD(device_suspend, emac_suspend), DEVMETHOD(device_resume, emac_resume), /* bus interface, for miibus */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, emac_miibus_readreg), DEVMETHOD(miibus_writereg, emac_miibus_writereg), DEVMETHOD(miibus_statchg, emac_miibus_statchg), DEVMETHOD_END }; static driver_t emac_driver = { "emac", emac_methods, sizeof(struct emac_softc) }; DRIVER_MODULE(emac, simplebus, emac_driver, 0, 0); DRIVER_MODULE(miibus, emac, miibus_driver, 0, 0); MODULE_DEPEND(emac, miibus, 1, 1, 1); MODULE_DEPEND(emac, ether, 1, 1, 1); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (arg1 == NULL) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || req->newptr == NULL) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_emac_proc_limit(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, EMAC_PROC_MIN, EMAC_PROC_MAX)); } diff --git a/sys/arm/freescale/imx/imx_gpio.c b/sys/arm/freescale/imx/imx_gpio.c index d3d01d3cb857..c5e92992a36b 100644 --- a/sys/arm/freescale/imx/imx_gpio.c +++ b/sys/arm/freescale/imx/imx_gpio.c @@ -1,955 +1,955 @@ /*- * Copyright (c) 2012, 2013 The FreeBSD Foundation * * This software was developed by Oleksandr Rybalko under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Freescale i.MX515 GPIO driver. */ #include #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__aarch64__) #define IMX_ENABLE_CLOCKS #endif #ifdef IMX_ENABLE_CLOCKS -#include +#include #endif #include "gpio_if.h" #ifdef INTRNG #include "pic_if.h" #endif #define WRITE4(_sc, _r, _v) \ bus_space_write_4((_sc)->sc_iot, (_sc)->sc_ioh, (_r), (_v)) #define READ4(_sc, _r) \ bus_space_read_4((_sc)->sc_iot, (_sc)->sc_ioh, (_r)) #define SET4(_sc, _r, _m) \ WRITE4((_sc), (_r), READ4((_sc), (_r)) | (_m)) #define CLEAR4(_sc, _r, _m) \ WRITE4((_sc), (_r), READ4((_sc), (_r)) & ~(_m)) /* Registers definition for Freescale i.MX515 GPIO controller */ #define IMX_GPIO_DR_REG 0x000 /* Pin Data */ #define IMX_GPIO_OE_REG 0x004 /* Set Pin Output */ #define IMX_GPIO_PSR_REG 0x008 /* Pad Status */ #define IMX_GPIO_ICR1_REG 0x00C /* Interrupt Configuration */ #define IMX_GPIO_ICR2_REG 0x010 /* Interrupt Configuration */ #define GPIO_ICR_COND_LOW 0 #define GPIO_ICR_COND_HIGH 1 #define GPIO_ICR_COND_RISE 2 #define GPIO_ICR_COND_FALL 3 #define GPIO_ICR_COND_MASK 0x3 #define IMX_GPIO_IMR_REG 0x014 /* Interrupt Mask Register */ #define IMX_GPIO_ISR_REG 0x018 /* Interrupt Status Register */ #define IMX_GPIO_EDGE_REG 0x01C /* Edge Detect Register */ #ifdef INTRNG #define DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | \ GPIO_INTR_LEVEL_LOW | GPIO_INTR_LEVEL_HIGH | GPIO_INTR_EDGE_RISING | \ GPIO_INTR_EDGE_FALLING | GPIO_INTR_EDGE_BOTH) #else #define DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT) #endif #define NGPIO 32 #ifdef INTRNG struct gpio_irqsrc { struct intr_irqsrc gi_isrc; u_int gi_irq; uint32_t gi_mode; }; #endif struct imx51_gpio_softc { device_t dev; device_t sc_busdev; struct mtx sc_mtx; struct resource *sc_res[3]; /* 1 x mem, 2 x IRQ */ void *gpio_ih[2]; bus_space_tag_t sc_iot; bus_space_handle_t sc_ioh; int gpio_npins; struct gpio_pin gpio_pins[NGPIO]; #ifdef INTRNG struct gpio_irqsrc gpio_pic_irqsrc[NGPIO]; #endif #ifdef IMX_ENABLE_CLOCKS clk_t clk; #endif }; static struct ofw_compat_data compat_data[] = { {"fsl,imx8mq-gpio", 1}, {"fsl,imx6q-gpio", 1}, {"fsl,imx53-gpio", 1}, {"fsl,imx51-gpio", 1}, {NULL, 0} }; static struct resource_spec imx_gpio_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { SYS_RES_IRQ, 1, RF_ACTIVE }, { -1, 0 } }; #define FIRST_IRQRES 1 #define NUM_IRQRES 2 /* * Helpers */ static void imx51_gpio_pin_configure(struct imx51_gpio_softc *, struct gpio_pin *, uint32_t); /* * Driver stuff */ static int imx51_gpio_probe(device_t); static int imx51_gpio_attach(device_t); static int imx51_gpio_detach(device_t); /* * GPIO interface */ static device_t imx51_gpio_get_bus(device_t); static int imx51_gpio_pin_max(device_t, int *); static int imx51_gpio_pin_getcaps(device_t, uint32_t, uint32_t *); static int imx51_gpio_pin_getflags(device_t, uint32_t, uint32_t *); static int imx51_gpio_pin_getname(device_t, uint32_t, char *); static int imx51_gpio_pin_setflags(device_t, uint32_t, uint32_t); static int imx51_gpio_pin_set(device_t, uint32_t, unsigned int); static int imx51_gpio_pin_get(device_t, uint32_t, unsigned int *); static int imx51_gpio_pin_toggle(device_t, uint32_t pin); #ifdef INTRNG static int gpio_pic_map_fdt(struct imx51_gpio_softc *sc, struct intr_map_data_fdt *daf, u_int *irqp, uint32_t *modep) { u_int irq; uint32_t mode; /* * From devicetree/bindings/gpio/fsl-imx-gpio.txt: * #interrupt-cells: 2. The first cell is the GPIO number. The second * cell bits[3:0] is used to specify trigger type and level flags: * 1 = low-to-high edge triggered. * 2 = high-to-low edge triggered. * 4 = active high level-sensitive. * 8 = active low level-sensitive. * We can do any single one of these modes, and also edge low+high * (i.e., trigger on both edges); other combinations are not supported. */ if (daf->ncells != 2) { device_printf(sc->dev, "Invalid #interrupt-cells\n"); return (EINVAL); } irq = daf->cells[0]; if (irq >= sc->gpio_npins) { device_printf(sc->dev, "Invalid interrupt number %u\n", irq); return (EINVAL); } switch (daf->cells[1]) { case 1: mode = GPIO_INTR_EDGE_RISING; break; case 2: mode = GPIO_INTR_EDGE_FALLING; break; case 3: mode = GPIO_INTR_EDGE_BOTH; break; case 4: mode = GPIO_INTR_LEVEL_HIGH; break; case 8: mode = GPIO_INTR_LEVEL_LOW; break; default: device_printf(sc->dev, "Unsupported interrupt mode 0x%2x\n", daf->cells[1]); return (ENOTSUP); } *irqp = irq; if (modep != NULL) *modep = mode; return (0); } static int gpio_pic_map_gpio(struct imx51_gpio_softc *sc, struct intr_map_data_gpio *dag, u_int *irqp, uint32_t *modep) { u_int irq; irq = dag->gpio_pin_num; if (irq >= sc->gpio_npins) { device_printf(sc->dev, "Invalid interrupt number %u\n", irq); return (EINVAL); } switch (dag->gpio_intr_mode) { case GPIO_INTR_LEVEL_LOW: case GPIO_INTR_LEVEL_HIGH: case GPIO_INTR_EDGE_RISING: case GPIO_INTR_EDGE_FALLING: case GPIO_INTR_EDGE_BOTH: break; default: device_printf(sc->dev, "Unsupported interrupt mode 0x%8x\n", dag->gpio_intr_mode); return (EINVAL); } *irqp = irq; if (modep != NULL) *modep = dag->gpio_intr_mode; return (0); } static int gpio_pic_map(struct imx51_gpio_softc *sc, struct intr_map_data *data, u_int *irqp, uint32_t *modep) { switch (data->type) { case INTR_MAP_DATA_FDT: return (gpio_pic_map_fdt(sc, (struct intr_map_data_fdt *)data, irqp, modep)); case INTR_MAP_DATA_GPIO: return (gpio_pic_map_gpio(sc, (struct intr_map_data_gpio *)data, irqp, modep)); default: return (ENOTSUP); } } static int gpio_pic_map_intr(device_t dev, struct intr_map_data *data, struct intr_irqsrc **isrcp) { int error; u_int irq; struct imx51_gpio_softc *sc; sc = device_get_softc(dev); error = gpio_pic_map(sc, data, &irq, NULL); if (error == 0) *isrcp = &sc->gpio_pic_irqsrc[irq].gi_isrc; return (error); } static int gpio_pic_teardown_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct imx51_gpio_softc *sc; struct gpio_irqsrc *gi; sc = device_get_softc(dev); if (isrc->isrc_handlers == 0) { gi = (struct gpio_irqsrc *)isrc; gi->gi_mode = GPIO_INTR_CONFORM; // XXX Not sure this is necessary mtx_lock_spin(&sc->sc_mtx); CLEAR4(sc, IMX_GPIO_IMR_REG, (1U << gi->gi_irq)); WRITE4(sc, IMX_GPIO_ISR_REG, (1U << gi->gi_irq)); mtx_unlock_spin(&sc->sc_mtx); } return (0); } static int gpio_pic_setup_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct imx51_gpio_softc *sc; struct gpio_irqsrc *gi; int error; u_int icfg, irq, reg, shift, wrk; uint32_t mode; if (data == NULL) return (ENOTSUP); sc = device_get_softc(dev); gi = (struct gpio_irqsrc *)isrc; /* Get config for interrupt. */ error = gpio_pic_map(sc, data, &irq, &mode); if (error != 0) return (error); if (gi->gi_irq != irq) return (EINVAL); /* Compare config if this is not first setup. */ if (isrc->isrc_handlers != 0) return (gi->gi_mode == mode ? 0 : EINVAL); gi->gi_mode = mode; /* * To interrupt on both edges we have to use the EDGE register. The * manual says it only exists for backwards compatibilty with older imx * chips, but it's also the only way to configure interrupting on both * edges. If the EDGE bit is on, the corresponding ICRn bit is ignored. */ mtx_lock_spin(&sc->sc_mtx); if (mode == GPIO_INTR_EDGE_BOTH) { SET4(sc, IMX_GPIO_EDGE_REG, (1u << irq)); } else { CLEAR4(sc, IMX_GPIO_EDGE_REG, (1u << irq)); switch (mode) { default: /* silence warnings; default can't actually happen. */ /* FALLTHROUGH */ case GPIO_INTR_LEVEL_LOW: icfg = GPIO_ICR_COND_LOW; break; case GPIO_INTR_LEVEL_HIGH: icfg = GPIO_ICR_COND_HIGH; break; case GPIO_INTR_EDGE_RISING: icfg = GPIO_ICR_COND_RISE; break; case GPIO_INTR_EDGE_FALLING: icfg = GPIO_ICR_COND_FALL; break; } if (irq < 16) { reg = IMX_GPIO_ICR1_REG; shift = 2 * irq; } else { reg = IMX_GPIO_ICR2_REG; shift = 2 * (irq - 16); } wrk = READ4(sc, reg); wrk &= ~(GPIO_ICR_COND_MASK << shift); wrk |= icfg << shift; WRITE4(sc, reg, wrk); } WRITE4(sc, IMX_GPIO_ISR_REG, (1u << irq)); SET4(sc, IMX_GPIO_IMR_REG, (1u << irq)); mtx_unlock_spin(&sc->sc_mtx); return (0); } /* * this is mask_intr */ static void gpio_pic_disable_intr(device_t dev, struct intr_irqsrc *isrc) { struct imx51_gpio_softc *sc; u_int irq; sc = device_get_softc(dev); irq = ((struct gpio_irqsrc *)isrc)->gi_irq; mtx_lock_spin(&sc->sc_mtx); CLEAR4(sc, IMX_GPIO_IMR_REG, (1U << irq)); mtx_unlock_spin(&sc->sc_mtx); } /* * this is unmask_intr */ static void gpio_pic_enable_intr(device_t dev, struct intr_irqsrc *isrc) { struct imx51_gpio_softc *sc; u_int irq; sc = device_get_softc(dev); irq = ((struct gpio_irqsrc *)isrc)->gi_irq; mtx_lock_spin(&sc->sc_mtx); SET4(sc, IMX_GPIO_IMR_REG, (1U << irq)); mtx_unlock_spin(&sc->sc_mtx); } static void gpio_pic_post_filter(device_t dev, struct intr_irqsrc *isrc) { struct imx51_gpio_softc *sc; u_int irq; sc = device_get_softc(dev); irq = ((struct gpio_irqsrc *)isrc)->gi_irq; arm_irq_memory_barrier(0); /* EOI. W1C reg so no r-m-w, no locking needed. */ WRITE4(sc, IMX_GPIO_ISR_REG, (1U << irq)); } static void gpio_pic_post_ithread(device_t dev, struct intr_irqsrc *isrc) { struct imx51_gpio_softc *sc; u_int irq; sc = device_get_softc(dev); irq = ((struct gpio_irqsrc *)isrc)->gi_irq; arm_irq_memory_barrier(0); /* EOI. W1C reg so no r-m-w, no locking needed. */ WRITE4(sc, IMX_GPIO_ISR_REG, (1U << irq)); gpio_pic_enable_intr(dev, isrc); } static void gpio_pic_pre_ithread(device_t dev, struct intr_irqsrc *isrc) { gpio_pic_disable_intr(dev, isrc); } static int gpio_pic_filter(void *arg) { struct imx51_gpio_softc *sc; struct intr_irqsrc *isrc; uint32_t i, interrupts; sc = arg; mtx_lock_spin(&sc->sc_mtx); interrupts = READ4(sc, IMX_GPIO_ISR_REG) & READ4(sc, IMX_GPIO_IMR_REG); mtx_unlock_spin(&sc->sc_mtx); for (i = 0; interrupts != 0; i++, interrupts >>= 1) { if ((interrupts & 0x1) == 0) continue; isrc = &sc->gpio_pic_irqsrc[i].gi_isrc; if (intr_isrc_dispatch(isrc, curthread->td_intr_frame) != 0) { gpio_pic_disable_intr(sc->dev, isrc); gpio_pic_post_filter(sc->dev, isrc); device_printf(sc->dev, "Stray irq %u disabled\n", i); } } return (FILTER_HANDLED); } /* * Initialize our isrcs and register them with intrng. */ static int gpio_pic_register_isrcs(struct imx51_gpio_softc *sc) { int error; uint32_t irq; const char *name; name = device_get_nameunit(sc->dev); for (irq = 0; irq < NGPIO; irq++) { sc->gpio_pic_irqsrc[irq].gi_irq = irq; sc->gpio_pic_irqsrc[irq].gi_mode = GPIO_INTR_CONFORM; error = intr_isrc_register(&sc->gpio_pic_irqsrc[irq].gi_isrc, sc->dev, 0, "%s,%u", name, irq); if (error != 0) { /* XXX call intr_isrc_deregister() */ device_printf(sc->dev, "%s failed", __func__); return (error); } } return (0); } #endif /* * */ static void imx51_gpio_pin_configure(struct imx51_gpio_softc *sc, struct gpio_pin *pin, unsigned int flags) { u_int newflags, pad; mtx_lock_spin(&sc->sc_mtx); /* * Manage input/output; other flags not supported yet (maybe not ever, * since we have no connection to the pad config registers from here). * * When setting a pin to output, honor the PRESET_[LOW,HIGH] flags if * present. Otherwise, for glitchless transitions on pins with pulls, * read the current state of the pad and preset the DR register to drive * the current value onto the pin before enabling the pin for output. * * Note that changes to pin->gp_flags must be acccumulated in newflags * and stored with a single writeback to gp_flags at the end, to enable * unlocked reads of that value elsewhere. This is only about unlocked * access to gp_flags from elsewhere; we still use locking in this * function to protect r-m-w access to the hardware registers. */ if (flags & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) { newflags = pin->gp_flags & ~(GPIO_PIN_INPUT | GPIO_PIN_OUTPUT); if (flags & GPIO_PIN_OUTPUT) { if (flags & GPIO_PIN_PRESET_LOW) { pad = 0; } else if (flags & GPIO_PIN_PRESET_HIGH) { pad = 1; } else { if (flags & GPIO_PIN_OPENDRAIN) pad = READ4(sc, IMX_GPIO_PSR_REG); else pad = READ4(sc, IMX_GPIO_DR_REG); pad = (pad >> pin->gp_pin) & 1; } newflags |= GPIO_PIN_OUTPUT; SET4(sc, IMX_GPIO_DR_REG, (pad << pin->gp_pin)); SET4(sc, IMX_GPIO_OE_REG, (1U << pin->gp_pin)); } else { newflags |= GPIO_PIN_INPUT; CLEAR4(sc, IMX_GPIO_OE_REG, (1U << pin->gp_pin)); } pin->gp_flags = newflags; } mtx_unlock_spin(&sc->sc_mtx); } static device_t imx51_gpio_get_bus(device_t dev) { struct imx51_gpio_softc *sc; sc = device_get_softc(dev); return (sc->sc_busdev); } static int imx51_gpio_pin_max(device_t dev, int *maxpin) { struct imx51_gpio_softc *sc; sc = device_get_softc(dev); *maxpin = sc->gpio_npins - 1; return (0); } static int imx51_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps) { struct imx51_gpio_softc *sc; sc = device_get_softc(dev); if (pin >= sc->gpio_npins) return (EINVAL); *caps = sc->gpio_pins[pin].gp_caps; return (0); } static int imx51_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags) { struct imx51_gpio_softc *sc; sc = device_get_softc(dev); if (pin >= sc->gpio_npins) return (EINVAL); *flags = sc->gpio_pins[pin].gp_flags; return (0); } static int imx51_gpio_pin_getname(device_t dev, uint32_t pin, char *name) { struct imx51_gpio_softc *sc; sc = device_get_softc(dev); if (pin >= sc->gpio_npins) return (EINVAL); mtx_lock_spin(&sc->sc_mtx); memcpy(name, sc->gpio_pins[pin].gp_name, GPIOMAXNAME); mtx_unlock_spin(&sc->sc_mtx); return (0); } static int imx51_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags) { struct imx51_gpio_softc *sc; sc = device_get_softc(dev); if (pin >= sc->gpio_npins) return (EINVAL); imx51_gpio_pin_configure(sc, &sc->gpio_pins[pin], flags); return (0); } static int imx51_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value) { struct imx51_gpio_softc *sc; sc = device_get_softc(dev); if (pin >= sc->gpio_npins) return (EINVAL); mtx_lock_spin(&sc->sc_mtx); if (value) SET4(sc, IMX_GPIO_DR_REG, (1U << pin)); else CLEAR4(sc, IMX_GPIO_DR_REG, (1U << pin)); mtx_unlock_spin(&sc->sc_mtx); return (0); } static int imx51_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val) { struct imx51_gpio_softc *sc; sc = device_get_softc(dev); if (pin >= sc->gpio_npins) return (EINVAL); /* * Normally a pin set for output can be read by reading the DR reg which * indicates what value is being driven to that pin. The exception is * pins configured for open-drain mode, in which case we have to read * the pad status register in case the pin is being driven externally. * Doing so requires that the SION bit be configured in pinmux, which * isn't the case for most normal gpio pins, so only try to read via PSR * if the OPENDRAIN flag is set, and it's the user's job to correctly * configure SION along with open-drain output mode for those pins. */ if (sc->gpio_pins[pin].gp_flags & GPIO_PIN_OPENDRAIN) *val = (READ4(sc, IMX_GPIO_PSR_REG) >> pin) & 1; else *val = (READ4(sc, IMX_GPIO_DR_REG) >> pin) & 1; return (0); } static int imx51_gpio_pin_toggle(device_t dev, uint32_t pin) { struct imx51_gpio_softc *sc; sc = device_get_softc(dev); if (pin >= sc->gpio_npins) return (EINVAL); mtx_lock_spin(&sc->sc_mtx); WRITE4(sc, IMX_GPIO_DR_REG, (READ4(sc, IMX_GPIO_DR_REG) ^ (1U << pin))); mtx_unlock_spin(&sc->sc_mtx); return (0); } static int imx51_gpio_pin_access_32(device_t dev, uint32_t first_pin, uint32_t clear_pins, uint32_t change_pins, uint32_t *orig_pins) { struct imx51_gpio_softc *sc; if (first_pin != 0) return (EINVAL); sc = device_get_softc(dev); if (orig_pins != NULL) *orig_pins = READ4(sc, IMX_GPIO_DR_REG); if ((clear_pins | change_pins) != 0) { mtx_lock_spin(&sc->sc_mtx); WRITE4(sc, IMX_GPIO_DR_REG, (READ4(sc, IMX_GPIO_DR_REG) & ~clear_pins) ^ change_pins); mtx_unlock_spin(&sc->sc_mtx); } return (0); } static int imx51_gpio_pin_config_32(device_t dev, uint32_t first_pin, uint32_t num_pins, uint32_t *pin_flags) { struct imx51_gpio_softc *sc; u_int i; uint32_t bit, drclr, drset, flags, oeclr, oeset, pads; sc = device_get_softc(dev); if (first_pin != 0 || num_pins > sc->gpio_npins) return (EINVAL); drclr = drset = oeclr = oeset = 0; pads = READ4(sc, IMX_GPIO_DR_REG); for (i = 0; i < num_pins; ++i) { bit = 1u << i; flags = pin_flags[i]; if (flags & GPIO_PIN_INPUT) { oeclr |= bit; } else if (flags & GPIO_PIN_OUTPUT) { oeset |= bit; if (flags & GPIO_PIN_PRESET_LOW) drclr |= bit; else if (flags & GPIO_PIN_PRESET_HIGH) drset |= bit; else /* Drive whatever it's now pulled to. */ drset |= pads & bit; } } mtx_lock_spin(&sc->sc_mtx); WRITE4(sc, IMX_GPIO_DR_REG, (READ4(sc, IMX_GPIO_DR_REG) & ~drclr) | drset); WRITE4(sc, IMX_GPIO_OE_REG, (READ4(sc, IMX_GPIO_OE_REG) & ~oeclr) | oeset); mtx_unlock_spin(&sc->sc_mtx); return (0); } static int imx51_gpio_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data != 0) { device_set_desc(dev, "Freescale i.MX GPIO Controller"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int imx51_gpio_attach(device_t dev) { struct imx51_gpio_softc *sc; int i, irq, unit; #ifdef IMX_ENABLE_CLOCKS int err; #endif sc = device_get_softc(dev); sc->dev = dev; sc->gpio_npins = NGPIO; mtx_init(&sc->sc_mtx, device_get_nameunit(sc->dev), NULL, MTX_SPIN); #ifdef IMX_ENABLE_CLOCKS if (clk_get_by_ofw_index(sc->dev, 0, 0, &sc->clk) != 0) { device_printf(dev, "could not get clock"); return (ENOENT); } err = clk_enable(sc->clk); if (err != 0) { device_printf(sc->dev, "could not enable ipg clock\n"); return (err); } #endif if (bus_alloc_resources(dev, imx_gpio_spec, sc->sc_res)) { device_printf(dev, "could not allocate resources\n"); bus_release_resources(dev, imx_gpio_spec, sc->sc_res); mtx_destroy(&sc->sc_mtx); return (ENXIO); } sc->sc_iot = rman_get_bustag(sc->sc_res[0]); sc->sc_ioh = rman_get_bushandle(sc->sc_res[0]); /* * Mask off all interrupts in hardware, then set up interrupt handling. */ WRITE4(sc, IMX_GPIO_IMR_REG, 0); for (irq = 0; irq < 2; irq++) { #ifdef INTRNG if ((bus_setup_intr(dev, sc->sc_res[1 + irq], INTR_TYPE_CLK, gpio_pic_filter, NULL, sc, &sc->gpio_ih[irq]))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); imx51_gpio_detach(dev); return (ENXIO); } #endif } unit = device_get_unit(dev); for (i = 0; i < sc->gpio_npins; i++) { sc->gpio_pins[i].gp_pin = i; sc->gpio_pins[i].gp_caps = DEFAULT_CAPS; sc->gpio_pins[i].gp_flags = (READ4(sc, IMX_GPIO_OE_REG) & (1U << i)) ? GPIO_PIN_OUTPUT : GPIO_PIN_INPUT; snprintf(sc->gpio_pins[i].gp_name, GPIOMAXNAME, "GPIO%d_IO%02d", unit + 1, i); } #ifdef INTRNG gpio_pic_register_isrcs(sc); intr_pic_register(dev, OF_xref_from_node(ofw_bus_get_node(dev))); #endif sc->sc_busdev = gpiobus_attach_bus(dev); if (sc->sc_busdev == NULL) { imx51_gpio_detach(dev); return (ENXIO); } return (0); } static int imx51_gpio_detach(device_t dev) { int irq; struct imx51_gpio_softc *sc; #ifdef IMX_ENABLE_CLOCKS int error; #endif sc = device_get_softc(dev); #ifdef IMX_ENABLE_CLOCKS error = clk_disable(sc->clk); if (error != 0) { device_printf(sc->dev, "could not disable ipg clock\n"); return (error); } #endif gpiobus_detach_bus(dev); for (irq = 0; irq < NUM_IRQRES; irq++) { if (sc->gpio_ih[irq]) bus_teardown_intr(dev, sc->sc_res[irq + FIRST_IRQRES], sc->gpio_ih[irq]); } bus_release_resources(dev, imx_gpio_spec, sc->sc_res); mtx_destroy(&sc->sc_mtx); return(0); } static phandle_t imx51_gpio_get_node(device_t bus, device_t dev) { /* * Share controller node with gpiobus device */ return ofw_bus_get_node(bus); } static device_method_t imx51_gpio_methods[] = { DEVMETHOD(device_probe, imx51_gpio_probe), DEVMETHOD(device_attach, imx51_gpio_attach), DEVMETHOD(device_detach, imx51_gpio_detach), #ifdef INTRNG /* Interrupt controller interface */ DEVMETHOD(pic_disable_intr, gpio_pic_disable_intr), DEVMETHOD(pic_enable_intr, gpio_pic_enable_intr), DEVMETHOD(pic_map_intr, gpio_pic_map_intr), DEVMETHOD(pic_setup_intr, gpio_pic_setup_intr), DEVMETHOD(pic_teardown_intr, gpio_pic_teardown_intr), DEVMETHOD(pic_post_filter, gpio_pic_post_filter), DEVMETHOD(pic_post_ithread, gpio_pic_post_ithread), DEVMETHOD(pic_pre_ithread, gpio_pic_pre_ithread), #endif /* OFW methods */ DEVMETHOD(ofw_bus_get_node, imx51_gpio_get_node), /* GPIO protocol */ DEVMETHOD(gpio_get_bus, imx51_gpio_get_bus), DEVMETHOD(gpio_pin_max, imx51_gpio_pin_max), DEVMETHOD(gpio_pin_getname, imx51_gpio_pin_getname), DEVMETHOD(gpio_pin_getflags, imx51_gpio_pin_getflags), DEVMETHOD(gpio_pin_getcaps, imx51_gpio_pin_getcaps), DEVMETHOD(gpio_pin_setflags, imx51_gpio_pin_setflags), DEVMETHOD(gpio_pin_get, imx51_gpio_pin_get), DEVMETHOD(gpio_pin_set, imx51_gpio_pin_set), DEVMETHOD(gpio_pin_toggle, imx51_gpio_pin_toggle), DEVMETHOD(gpio_pin_access_32, imx51_gpio_pin_access_32), DEVMETHOD(gpio_pin_config_32, imx51_gpio_pin_config_32), {0, 0}, }; static driver_t imx51_gpio_driver = { "gpio", imx51_gpio_methods, sizeof(struct imx51_gpio_softc), }; EARLY_DRIVER_MODULE(imx51_gpio, simplebus, imx51_gpio_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE); diff --git a/sys/arm/freescale/imx/imx_i2c.c b/sys/arm/freescale/imx/imx_i2c.c index 7014ea4ea7e3..f8769d29e7f7 100644 --- a/sys/arm/freescale/imx/imx_i2c.c +++ b/sys/arm/freescale/imx/imx_i2c.c @@ -1,728 +1,728 @@ /*- * Copyright (C) 2008-2009 Semihalf, Michal Hajduk * Copyright (c) 2012, 2013 The FreeBSD Foundation * Copyright (c) 2015 Ian Lepore * All rights reserved. * * Portions of this software were developed by Oleksandr Rybalko * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * I2C driver for Freescale i.MX hardware. * * Note that the hardware is capable of running as both a master and a slave. * This driver currently implements only master-mode operations. * * This driver supports multi-master i2c buses, by detecting bus arbitration * loss and returning IIC_EBUSBSY status. Notably, it does not do any kind of * retries if some other master jumps onto the bus and interrupts one of our * transfer cycles resulting in arbitration loss in mid-transfer. The caller * must handle retries in a way that makes sense for the slave being addressed. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" #include #include #include #include #include #if defined(__aarch64__) #define IMX_ENABLE_CLOCKS #endif #ifdef IMX_ENABLE_CLOCKS -#include +#include #endif #define I2C_ADDR_REG 0x00 /* I2C slave address register */ #define I2C_FDR_REG 0x04 /* I2C frequency divider register */ #define I2C_CONTROL_REG 0x08 /* I2C control register */ #define I2C_STATUS_REG 0x0C /* I2C status register */ #define I2C_DATA_REG 0x10 /* I2C data register */ #define I2C_DFSRR_REG 0x14 /* I2C Digital Filter Sampling rate */ #define I2CCR_MEN (1 << 7) /* Module enable */ #define I2CCR_MSTA (1 << 5) /* Master/slave mode */ #define I2CCR_MTX (1 << 4) /* Transmit/receive mode */ #define I2CCR_TXAK (1 << 3) /* Transfer acknowledge */ #define I2CCR_RSTA (1 << 2) /* Repeated START */ #define I2CSR_MCF (1 << 7) /* Data transfer */ #define I2CSR_MASS (1 << 6) /* Addressed as a slave */ #define I2CSR_MBB (1 << 5) /* Bus busy */ #define I2CSR_MAL (1 << 4) /* Arbitration lost */ #define I2CSR_SRW (1 << 2) /* Slave read/write */ #define I2CSR_MIF (1 << 1) /* Module interrupt */ #define I2CSR_RXAK (1 << 0) /* Received acknowledge */ #define I2C_BAUD_RATE_FAST 0x31 #define I2C_BAUD_RATE_DEF 0x3F #define I2C_DFSSR_DIV 0x10 /* * A table of available divisors and the associated coded values to put in the * FDR register to achieve that divisor.. There is no algorithmic relationship I * can see between divisors and the codes that go into the register. The table * begins and ends with entries that handle insane configuration values. */ struct clkdiv { u_int divisor; u_int regcode; }; static struct clkdiv clkdiv_table[] = { { 0, 0x20 }, { 22, 0x20 }, { 24, 0x21 }, { 26, 0x22 }, { 28, 0x23 }, { 30, 0x00 }, { 32, 0x24 }, { 36, 0x25 }, { 40, 0x26 }, { 42, 0x03 }, { 44, 0x27 }, { 48, 0x28 }, { 52, 0x05 }, { 56, 0x29 }, { 60, 0x06 }, { 64, 0x2a }, { 72, 0x2b }, { 80, 0x2c }, { 88, 0x09 }, { 96, 0x2d }, { 104, 0x0a }, { 112, 0x2e }, { 128, 0x2f }, { 144, 0x0c }, { 160, 0x30 }, { 192, 0x31 }, { 224, 0x32 }, { 240, 0x0f }, { 256, 0x33 }, { 288, 0x10 }, { 320, 0x34 }, { 384, 0x35 }, { 448, 0x36 }, { 480, 0x13 }, { 512, 0x37 }, { 576, 0x14 }, { 640, 0x38 }, { 768, 0x39 }, { 896, 0x3a }, { 960, 0x17 }, { 1024, 0x3b }, { 1152, 0x18 }, { 1280, 0x3c }, { 1536, 0x3d }, { 1792, 0x3e }, { 1920, 0x1b }, { 2048, 0x3f }, { 2304, 0x1c }, { 2560, 0x1d }, { 3072, 0x1e }, { 3840, 0x1f }, {UINT_MAX, 0x1f} }; static struct ofw_compat_data compat_data[] = { {"fsl,imx21-i2c", 1}, {"fsl,imx6q-i2c", 1}, {"fsl,imx-i2c", 1}, {NULL, 0} }; struct i2c_softc { device_t dev; device_t iicbus; struct resource *res; int rid; sbintime_t byte_time_sbt; int rb_pinctl_idx; gpio_pin_t rb_sclpin; gpio_pin_t rb_sdapin; u_int debug; u_int slave; #ifdef IMX_ENABLE_CLOCKS clk_t ipgclk; #endif }; #define DEVICE_DEBUGF(sc, lvl, fmt, args...) \ if ((lvl) <= (sc)->debug) \ device_printf((sc)->dev, fmt, ##args) #define DEBUGF(sc, lvl, fmt, args...) \ if ((lvl) <= (sc)->debug) \ printf(fmt, ##args) static phandle_t i2c_get_node(device_t, device_t); static int i2c_probe(device_t); static int i2c_attach(device_t); static int i2c_detach(device_t); static int i2c_repeated_start(device_t, u_char, int); static int i2c_start(device_t, u_char, int); static int i2c_stop(device_t); static int i2c_reset(device_t, u_char, u_char, u_char *); static int i2c_read(device_t, char *, int, int *, int, int); static int i2c_write(device_t, const char *, int, int *, int); static device_method_t i2c_methods[] = { DEVMETHOD(device_probe, i2c_probe), DEVMETHOD(device_attach, i2c_attach), DEVMETHOD(device_detach, i2c_detach), /* OFW methods */ DEVMETHOD(ofw_bus_get_node, i2c_get_node), DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_repeated_start, i2c_repeated_start), DEVMETHOD(iicbus_start, i2c_start), DEVMETHOD(iicbus_stop, i2c_stop), DEVMETHOD(iicbus_reset, i2c_reset), DEVMETHOD(iicbus_read, i2c_read), DEVMETHOD(iicbus_write, i2c_write), DEVMETHOD(iicbus_transfer, iicbus_transfer_gen), DEVMETHOD_END }; static driver_t i2c_driver = { "imx_i2c", i2c_methods, sizeof(struct i2c_softc), }; DRIVER_MODULE(imx_i2c, simplebus, i2c_driver, 0, 0); DRIVER_MODULE(ofw_iicbus, imx_i2c, ofw_iicbus_driver, 0, 0); MODULE_DEPEND(imx_i2c, iicbus, 1, 1, 1); SIMPLEBUS_PNP_INFO(compat_data); static phandle_t i2c_get_node(device_t bus, device_t dev) { /* * Share controller node with iicbus device */ return ofw_bus_get_node(bus); } static __inline void i2c_write_reg(struct i2c_softc *sc, bus_size_t off, uint8_t val) { bus_write_1(sc->res, off, val); } static __inline uint8_t i2c_read_reg(struct i2c_softc *sc, bus_size_t off) { return (bus_read_1(sc->res, off)); } static __inline void i2c_flag_set(struct i2c_softc *sc, bus_size_t off, uint8_t mask) { uint8_t status; status = i2c_read_reg(sc, off); status |= mask; i2c_write_reg(sc, off, status); } /* Wait for bus to become busy or not-busy. */ static int wait_for_busbusy(struct i2c_softc *sc, int wantbusy) { int retry, srb; retry = 1000; while (retry --) { srb = i2c_read_reg(sc, I2C_STATUS_REG) & I2CSR_MBB; if ((srb && wantbusy) || (!srb && !wantbusy)) return (IIC_NOERR); DELAY(1); } return (IIC_ETIMEOUT); } /* Wait for transfer to complete, optionally check RXAK. */ static int wait_for_xfer(struct i2c_softc *sc, int checkack) { int retry, sr; /* * Sleep for about the time it takes to transfer a byte (with precision * set to tolerate 5% oversleep). We calculate the approximate byte * transfer time when we set the bus speed divisor. Slaves are allowed * to do clock-stretching so the actual transfer time can be larger, but * this gets the bulk of the waiting out of the way without tying up the * processor the whole time. */ pause_sbt("imxi2c", sc->byte_time_sbt, sc->byte_time_sbt / 20, 0); retry = 10000; while (retry --) { sr = i2c_read_reg(sc, I2C_STATUS_REG); if (sr & I2CSR_MIF) { if (sr & I2CSR_MAL) return (IIC_EBUSERR); else if (checkack && (sr & I2CSR_RXAK)) return (IIC_ENOACK); else return (IIC_NOERR); } DELAY(1); } return (IIC_ETIMEOUT); } /* * Implement the error handling shown in the state diagram of the imx6 reference * manual. If there was an error, then: * - Clear master mode (MSTA and MTX). * - Wait for the bus to become free or for a timeout to happen. * - Disable the controller. */ static int i2c_error_handler(struct i2c_softc *sc, int error) { if (error != 0) { i2c_write_reg(sc, I2C_STATUS_REG, 0); i2c_write_reg(sc, I2C_CONTROL_REG, I2CCR_MEN); wait_for_busbusy(sc, false); i2c_write_reg(sc, I2C_CONTROL_REG, 0); } return (error); } static int i2c_recover_getsda(void *ctx) { bool active; gpio_pin_is_active(((struct i2c_softc *)ctx)->rb_sdapin, &active); return (active); } static void i2c_recover_setsda(void *ctx, int value) { gpio_pin_set_active(((struct i2c_softc *)ctx)->rb_sdapin, value); } static int i2c_recover_getscl(void *ctx) { bool active; gpio_pin_is_active(((struct i2c_softc *)ctx)->rb_sclpin, &active); return (active); } static void i2c_recover_setscl(void *ctx, int value) { gpio_pin_set_active(((struct i2c_softc *)ctx)->rb_sclpin, value); } static int i2c_recover_bus(struct i2c_softc *sc) { struct iicrb_pin_access pins; int err; /* * If we have gpio pinmux config, reconfigure the pins to gpio mode, * invoke iic_recover_bus which checks for a hung bus and bitbangs a * recovery sequence if necessary, then configure the pins back to i2c * mode (idx 0). */ if (sc->rb_pinctl_idx == 0) return (0); fdt_pinctrl_configure(sc->dev, sc->rb_pinctl_idx); pins.ctx = sc; pins.getsda = i2c_recover_getsda; pins.setsda = i2c_recover_setsda; pins.getscl = i2c_recover_getscl; pins.setscl = i2c_recover_setscl; err = iic_recover_bus(&pins); fdt_pinctrl_configure(sc->dev, 0); return (err); } static int i2c_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Freescale i.MX I2C"); return (BUS_PROBE_DEFAULT); } static int i2c_attach(device_t dev) { char wrkstr[16]; struct i2c_softc *sc; phandle_t node; int err, cfgidx; sc = device_get_softc(dev); sc->dev = dev; sc->rid = 0; #ifdef IMX_ENABLE_CLOCKS if (clk_get_by_ofw_index(sc->dev, 0, 0, &sc->ipgclk) != 0) { device_printf(dev, "could not get ipg clock"); return (ENOENT); } err = clk_enable(sc->ipgclk); if (err != 0) { device_printf(sc->dev, "could not enable ipg clock\n"); return (err); } #endif sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->rid, RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "could not allocate resources"); return (ENXIO); } sc->iicbus = device_add_child(dev, "iicbus", -1); if (sc->iicbus == NULL) { device_printf(dev, "could not add iicbus child"); return (ENXIO); } /* Set up debug-enable sysctl. */ SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "debug", CTLFLAG_RWTUN, &sc->debug, 0, "Enable debug; 1=reads/writes, 2=add starts/stops"); /* * Set up for bus recovery using gpio pins, if the pinctrl and gpio * properties are present. This is optional. If all the config data is * not in place, we just don't do gpio bitbang bus recovery. */ node = ofw_bus_get_node(sc->dev); err = gpio_pin_get_by_ofw_property(dev, node, "scl-gpios", &sc->rb_sclpin); if (err != 0) goto no_recovery; err = gpio_pin_get_by_ofw_property(dev, node, "sda-gpios", &sc->rb_sdapin); if (err != 0) goto no_recovery; /* * Preset the gpio pins to output high (idle bus state). The signal * won't actually appear on the pins until the bus recovery code changes * the pinmux config from i2c to gpio. */ gpio_pin_setflags(sc->rb_sclpin, GPIO_PIN_OUTPUT); gpio_pin_setflags(sc->rb_sdapin, GPIO_PIN_OUTPUT); gpio_pin_set_active(sc->rb_sclpin, true); gpio_pin_set_active(sc->rb_sdapin, true); /* * Obtain the index of pinctrl node for bus recovery using gpio pins, * then confirm that pinctrl properties exist for that index and for the * default pinctrl-0. If sc->rb_pinctl_idx is non-zero, the reset code * will also do a bus recovery, so setting this value must be last. */ err = ofw_bus_find_string_index(node, "pinctrl-names", "gpio", &cfgidx); if (err == 0) { snprintf(wrkstr, sizeof(wrkstr), "pinctrl-%d", cfgidx); if (OF_hasprop(node, "pinctrl-0") && OF_hasprop(node, wrkstr)) sc->rb_pinctl_idx = cfgidx; } no_recovery: /* We don't do a hardware reset here because iicbus_attach() does it. */ /* Probe and attach the iicbus when interrupts are available. */ return (bus_delayed_attach_children(dev)); } static int i2c_detach(device_t dev) { struct i2c_softc *sc; int error; sc = device_get_softc(dev); #ifdef IMX_ENABLE_CLOCKS error = clk_disable(sc->ipgclk); if (error != 0) { device_printf(sc->dev, "could not disable ipg clock\n"); return (error); } #endif if ((error = bus_generic_detach(sc->dev)) != 0) { device_printf(sc->dev, "cannot detach child devices\n"); return (error); } if (sc->iicbus != NULL) device_delete_child(dev, sc->iicbus); /* Release bus-recover pins; gpio_pin_release() handles NULL args. */ gpio_pin_release(sc->rb_sclpin); gpio_pin_release(sc->rb_sdapin); if (sc->res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); return (0); } static int i2c_repeated_start(device_t dev, u_char slave, int timeout) { struct i2c_softc *sc; int error; sc = device_get_softc(dev); if ((i2c_read_reg(sc, I2C_STATUS_REG) & I2CSR_MBB) == 0) { return (IIC_EBUSERR); } /* * Set repeated start condition, delay (per reference manual, min 156nS) * before writing slave address, wait for ack after write. */ i2c_flag_set(sc, I2C_CONTROL_REG, I2CCR_RSTA); DELAY(1); i2c_write_reg(sc, I2C_STATUS_REG, 0x0); i2c_write_reg(sc, I2C_DATA_REG, slave); sc->slave = slave; DEVICE_DEBUGF(sc, 2, "rstart 0x%02x\n", sc->slave); error = wait_for_xfer(sc, true); return (i2c_error_handler(sc, error)); } static int i2c_start_ll(device_t dev, u_char slave, int timeout) { struct i2c_softc *sc; int error; sc = device_get_softc(dev); i2c_write_reg(sc, I2C_CONTROL_REG, I2CCR_MEN); DELAY(10); /* Delay for controller to sample bus state. */ if (i2c_read_reg(sc, I2C_STATUS_REG) & I2CSR_MBB) { return (i2c_error_handler(sc, IIC_EBUSERR)); } i2c_write_reg(sc, I2C_CONTROL_REG, I2CCR_MEN | I2CCR_MSTA | I2CCR_MTX); if ((error = wait_for_busbusy(sc, true)) != IIC_NOERR) return (i2c_error_handler(sc, error)); i2c_write_reg(sc, I2C_STATUS_REG, 0); i2c_write_reg(sc, I2C_DATA_REG, slave); sc->slave = slave; DEVICE_DEBUGF(sc, 2, "start 0x%02x\n", sc->slave); error = wait_for_xfer(sc, true); return (i2c_error_handler(sc, error)); } static int i2c_start(device_t dev, u_char slave, int timeout) { struct i2c_softc *sc; int error; sc = device_get_softc(dev); /* * Invoke the low-level code to put the bus into master mode and address * the given slave. If that fails, idle the controller and attempt a * bus recovery, and then try again one time. Signaling a start and * addressing the slave is the only operation that a low-level driver * can safely retry without any help from the upper layers that know * more about the slave device. */ if ((error = i2c_start_ll(dev, slave, timeout)) != 0) { i2c_write_reg(sc, I2C_CONTROL_REG, 0x0); if ((error = i2c_recover_bus(sc)) != 0) return (error); error = i2c_start_ll(dev, slave, timeout); } return (error); } static int i2c_stop(device_t dev) { struct i2c_softc *sc; sc = device_get_softc(dev); i2c_write_reg(sc, I2C_CONTROL_REG, I2CCR_MEN); wait_for_busbusy(sc, false); i2c_write_reg(sc, I2C_CONTROL_REG, 0); DEVICE_DEBUGF(sc, 2, "stop 0x%02x\n", sc->slave); return (IIC_NOERR); } static int i2c_reset(device_t dev, u_char speed, u_char addr, u_char *oldadr) { struct i2c_softc *sc; u_int busfreq, div, i, ipgfreq; #ifdef IMX_ENABLE_CLOCKS int err; uint64_t freq; #endif sc = device_get_softc(dev); DEVICE_DEBUGF(sc, 1, "reset\n"); /* * Look up the divisor that gives the nearest speed that doesn't exceed * the configured value for the bus. */ #ifdef IMX_ENABLE_CLOCKS err = clk_get_freq(sc->ipgclk, &freq); if (err != 0) { device_printf(sc->dev, "cannot get frequency\n"); return (err); } ipgfreq = (int32_t)freq; #else ipgfreq = imx_ccm_ipg_hz(); #endif busfreq = IICBUS_GET_FREQUENCY(sc->iicbus, speed); div = howmany(ipgfreq, busfreq); for (i = 0; i < nitems(clkdiv_table); i++) { if (clkdiv_table[i].divisor >= div) break; } /* * Calculate roughly how long it will take to transfer a byte (which * requires 9 clock cycles) at the new bus speed. This value is used to * pause() while waiting for transfer-complete. With a 66MHz IPG clock * and the actual i2c bus speeds that leads to, for nominal 100KHz and * 400KHz bus speeds the transfer times are roughly 104uS and 22uS. */ busfreq = ipgfreq / clkdiv_table[i].divisor; sc->byte_time_sbt = SBT_1US * (9000000 / busfreq); /* * Disable the controller (do the reset), and set the new clock divisor. */ i2c_write_reg(sc, I2C_STATUS_REG, 0x0); i2c_write_reg(sc, I2C_CONTROL_REG, 0x0); i2c_write_reg(sc, I2C_FDR_REG, (uint8_t)clkdiv_table[i].regcode); /* * Now that the controller is idle, perform bus recovery. If the bus * isn't hung, this a fairly fast no-op. */ return (i2c_recover_bus(sc)); } static int i2c_read(device_t dev, char *buf, int len, int *read, int last, int delay) { struct i2c_softc *sc; int error, reg; sc = device_get_softc(dev); *read = 0; DEVICE_DEBUGF(sc, 1, "read 0x%02x len %d: ", sc->slave, len); if (len) { if (len == 1) i2c_write_reg(sc, I2C_CONTROL_REG, I2CCR_MEN | I2CCR_MSTA | I2CCR_TXAK); else i2c_write_reg(sc, I2C_CONTROL_REG, I2CCR_MEN | I2CCR_MSTA); /* Dummy read to prime the receiver. */ i2c_write_reg(sc, I2C_STATUS_REG, 0x0); i2c_read_reg(sc, I2C_DATA_REG); } error = 0; *read = 0; while (*read < len) { if ((error = wait_for_xfer(sc, false)) != IIC_NOERR) break; i2c_write_reg(sc, I2C_STATUS_REG, 0x0); if (last) { if (*read == len - 2) { /* NO ACK on last byte */ i2c_write_reg(sc, I2C_CONTROL_REG, I2CCR_MEN | I2CCR_MSTA | I2CCR_TXAK); } else if (*read == len - 1) { /* Transfer done, signal stop. */ i2c_write_reg(sc, I2C_CONTROL_REG, I2CCR_MEN | I2CCR_TXAK); wait_for_busbusy(sc, false); } } reg = i2c_read_reg(sc, I2C_DATA_REG); DEBUGF(sc, 1, "0x%02x ", reg); *buf++ = reg; (*read)++; } DEBUGF(sc, 1, "\n"); return (i2c_error_handler(sc, error)); } static int i2c_write(device_t dev, const char *buf, int len, int *sent, int timeout) { struct i2c_softc *sc; int error; sc = device_get_softc(dev); error = 0; *sent = 0; DEVICE_DEBUGF(sc, 1, "write 0x%02x len %d: ", sc->slave, len); while (*sent < len) { DEBUGF(sc, 1, "0x%02x ", *buf); i2c_write_reg(sc, I2C_STATUS_REG, 0x0); i2c_write_reg(sc, I2C_DATA_REG, *buf++); if ((error = wait_for_xfer(sc, true)) != IIC_NOERR) break; (*sent)++; } DEBUGF(sc, 1, "\n"); return (i2c_error_handler(sc, error)); } diff --git a/sys/arm/freescale/vybrid/vf_i2c.c b/sys/arm/freescale/vybrid/vf_i2c.c index 38717c186814..cc0b32bb4759 100644 --- a/sys/arm/freescale/vybrid/vf_i2c.c +++ b/sys/arm/freescale/vybrid/vf_i2c.c @@ -1,610 +1,610 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2014 Ruslan Bukin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Vybrid Family Inter-Integrated Circuit (I2C) * Chapter 48, Vybrid Reference Manual, Rev. 5, 07/2013 */ /* * This driver is based on the I2C driver for i.MX */ #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" #include #include #include -#include +#include #include #include #include #include #define I2C_IBAD 0x0 /* I2C Bus Address Register */ #define I2C_IBFD 0x1 /* I2C Bus Frequency Divider Register */ #define I2C_IBCR 0x2 /* I2C Bus Control Register */ #define IBCR_MDIS (1 << 7) /* Module disable. */ #define IBCR_IBIE (1 << 6) /* I-Bus Interrupt Enable. */ #define IBCR_MSSL (1 << 5) /* Master/Slave mode select. */ #define IBCR_TXRX (1 << 4) /* Transmit/Receive mode select. */ #define IBCR_NOACK (1 << 3) /* Data Acknowledge disable. */ #define IBCR_RSTA (1 << 2) /* Repeat Start. */ #define IBCR_DMAEN (1 << 1) /* DMA Enable. */ #define I2C_IBSR 0x3 /* I2C Bus Status Register */ #define IBSR_TCF (1 << 7) /* Transfer complete. */ #define IBSR_IAAS (1 << 6) /* Addressed as a slave. */ #define IBSR_IBB (1 << 5) /* Bus busy. */ #define IBSR_IBAL (1 << 4) /* Arbitration Lost. */ #define IBSR_SRW (1 << 2) /* Slave Read/Write. */ #define IBSR_IBIF (1 << 1) /* I-Bus Interrupt Flag. */ #define IBSR_RXAK (1 << 0) /* Received Acknowledge. */ #define I2C_IBDR 0x4 /* I2C Bus Data I/O Register */ #define I2C_IBIC 0x5 /* I2C Bus Interrupt Config Register */ #define IBIC_BIIE (1 << 7) /* Bus Idle Interrupt Enable bit. */ #define I2C_IBDBG 0x6 /* I2C Bus Debug Register */ #ifdef DEBUG #define vf_i2c_dbg(_sc, fmt, args...) \ device_printf((_sc)->dev, fmt, ##args) #else #define vf_i2c_dbg(_sc, fmt, args...) #endif #define HW_UNKNOWN 0x00 #define HW_MVF600 0x01 #define HW_VF610 0x02 static int i2c_repeated_start(device_t, u_char, int); static int i2c_start(device_t, u_char, int); static int i2c_stop(device_t); static int i2c_reset(device_t, u_char, u_char, u_char *); static int i2c_read(device_t, char *, int, int *, int, int); static int i2c_write(device_t, const char *, int, int *, int); static phandle_t i2c_get_node(device_t, device_t); struct i2c_div_type { uint32_t reg_val; uint32_t div; }; struct i2c_softc { struct resource *res[2]; bus_space_tag_t bst; bus_space_handle_t bsh; clk_t clock; uint32_t freq; device_t dev; device_t iicbus; struct mtx mutex; uintptr_t hwtype; }; static struct resource_spec i2c_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; static struct i2c_div_type vf610_div_table[] = { { 0x00, 20 }, { 0x01, 22 }, { 0x02, 24 }, { 0x03, 26 }, { 0x04, 28 }, { 0x05, 30 }, { 0x09, 32 }, { 0x06, 34 }, { 0x0A, 36 }, { 0x0B, 40 }, { 0x0C, 44 }, { 0x0D, 48 }, { 0x0E, 56 }, { 0x12, 64 }, { 0x13, 72 }, { 0x14, 80 }, { 0x15, 88 }, { 0x19, 96 }, { 0x16, 104 }, { 0x1A, 112 }, { 0x17, 128 }, { 0x1D, 160 }, { 0x1E, 192 }, { 0x22, 224 }, { 0x1F, 240 }, { 0x23, 256 }, { 0x24, 288 }, { 0x25, 320 }, { 0x26, 384 }, { 0x2A, 448 }, { 0x27, 480 }, { 0x2B, 512 }, { 0x2C, 576 }, { 0x2D, 640 }, { 0x2E, 768 }, { 0x32, 896 }, { 0x2F, 960 }, { 0x33, 1024 }, { 0x34, 1152 }, { 0x35, 1280 }, { 0x36, 1536 }, { 0x3A, 1792 }, { 0x37, 1920 }, { 0x3B, 2048 }, { 0x3C, 2304 }, { 0x3D, 2560 }, { 0x3E, 3072 }, { 0x3F, 3840 }, { 0x3F, 3840 }, { 0x7B, 4096 }, { 0x7D, 5120 }, { 0x7E, 6144 }, }; static const struct ofw_compat_data i2c_compat_data[] = { {"fsl,mvf600-i2c", HW_MVF600}, {"fsl,vf610-i2c", HW_VF610}, {NULL, HW_UNKNOWN} }; static int i2c_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, i2c_compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Vybrid Family Inter-Integrated Circuit (I2C)"); return (BUS_PROBE_DEFAULT); } static int i2c_attach(device_t dev) { struct i2c_softc *sc; phandle_t node; int error; sc = device_get_softc(dev); sc->dev = dev; sc->hwtype = ofw_bus_search_compatible(dev, i2c_compat_data)->ocd_data; node = ofw_bus_get_node(dev); error = clk_get_by_ofw_index(dev, node, 0, &sc->clock); if (error != 0) { sc->freq = 0; device_printf(dev, "Parent clock not found.\n"); } else { if (OF_hasprop(node, "clock-frequency")) OF_getencprop(node, "clock-frequency", &sc->freq, sizeof(sc->freq)); else sc->freq = 100000; } mtx_init(&sc->mutex, device_get_nameunit(dev), "I2C", MTX_DEF); error = bus_alloc_resources(dev, i2c_spec, sc->res); if (error != 0) { mtx_destroy(&sc->mutex); device_printf(dev, "could not allocate resources\n"); return (ENXIO); } /* Memory interface */ sc->bst = rman_get_bustag(sc->res[0]); sc->bsh = rman_get_bushandle(sc->res[0]); WRITE1(sc, I2C_IBIC, IBIC_BIIE); sc->iicbus = device_add_child(dev, "iicbus", -1); if (sc->iicbus == NULL) { device_printf(dev, "could not add iicbus child"); mtx_destroy(&sc->mutex); bus_release_resources(dev, i2c_spec, sc->res); return (ENXIO); } bus_generic_attach(dev); return (0); } static int i2c_detach(device_t dev) { struct i2c_softc *sc; int error = 0; sc = device_get_softc(dev); error = bus_generic_detach(dev); if (error != 0) { device_printf(dev, "cannot detach child devices.\n"); return (error); } error = device_delete_child(dev, sc->iicbus); if (error != 0) { device_printf(dev, "could not delete iicbus child.\n"); return (error); } bus_release_resources(dev, i2c_spec, sc->res); mtx_destroy(&sc->mutex); return (0); } /* Wait for transfer interrupt flag */ static int wait_for_iif(struct i2c_softc *sc) { int retry; retry = 1000; while (retry --) { if (READ1(sc, I2C_IBSR) & IBSR_IBIF) { WRITE1(sc, I2C_IBSR, IBSR_IBIF); return (IIC_NOERR); } DELAY(10); } return (IIC_ETIMEOUT); } /* Wait for free bus */ static int wait_for_nibb(struct i2c_softc *sc) { int retry; retry = 1000; while (retry --) { if ((READ1(sc, I2C_IBSR) & IBSR_IBB) == 0) return (IIC_NOERR); DELAY(10); } return (IIC_ETIMEOUT); } /* Wait for transfer complete+interrupt flag */ static int wait_for_icf(struct i2c_softc *sc) { int retry; retry = 1000; while (retry --) { if (READ1(sc, I2C_IBSR) & IBSR_TCF) { if (READ1(sc, I2C_IBSR) & IBSR_IBIF) { WRITE1(sc, I2C_IBSR, IBSR_IBIF); return (IIC_NOERR); } } DELAY(10); } return (IIC_ETIMEOUT); } /* Get ACK bit from last write */ static bool tx_acked(struct i2c_softc *sc) { return (READ1(sc, I2C_IBSR) & IBSR_RXAK) ? false : true; } static int i2c_repeated_start(device_t dev, u_char slave, int timeout) { struct i2c_softc *sc; int error; int reg; sc = device_get_softc(dev); vf_i2c_dbg(sc, "i2c repeated start\n"); mtx_lock(&sc->mutex); WRITE1(sc, I2C_IBAD, slave); if ((READ1(sc, I2C_IBSR) & IBSR_IBB) == 0) { mtx_unlock(&sc->mutex); return (IIC_EBUSERR); } /* Set repeated start condition */ DELAY(10); reg = READ1(sc, I2C_IBCR); reg |= (IBCR_RSTA | IBCR_IBIE); WRITE1(sc, I2C_IBCR, reg); DELAY(10); /* Write target address - LSB is R/W bit */ WRITE1(sc, I2C_IBDR, slave); error = wait_for_iif(sc); if (!tx_acked(sc)) { vf_i2c_dbg(sc, "cant i2c start: missing ACK after slave addres\n"); return (IIC_ENOACK); } mtx_unlock(&sc->mutex); if (error != 0) return (error); return (IIC_NOERR); } static int i2c_start(device_t dev, u_char slave, int timeout) { struct i2c_softc *sc; int error; int reg; sc = device_get_softc(dev); vf_i2c_dbg(sc, "i2c start\n"); mtx_lock(&sc->mutex); WRITE1(sc, I2C_IBAD, slave); if (READ1(sc, I2C_IBSR) & IBSR_IBB) { mtx_unlock(&sc->mutex); vf_i2c_dbg(sc, "cant i2c start: IIC_EBUSBSY\n"); return (IIC_EBUSERR); } /* Set start condition */ reg = (IBCR_MSSL | IBCR_NOACK | IBCR_IBIE); WRITE1(sc, I2C_IBCR, reg); DELAY(100); reg |= (IBCR_TXRX); WRITE1(sc, I2C_IBCR, reg); /* Write target address - LSB is R/W bit */ WRITE1(sc, I2C_IBDR, slave); error = wait_for_iif(sc); if (error != 0) { mtx_unlock(&sc->mutex); vf_i2c_dbg(sc, "cant i2c start: iif error\n"); return (error); } mtx_unlock(&sc->mutex); if (!tx_acked(sc)) { vf_i2c_dbg(sc, "cant i2c start: missing QACK after slave addres\n"); return (IIC_ENOACK); } return (IIC_NOERR); } static int i2c_stop(device_t dev) { struct i2c_softc *sc; sc = device_get_softc(dev); vf_i2c_dbg(sc, "i2c stop\n"); mtx_lock(&sc->mutex); WRITE1(sc, I2C_IBCR, IBCR_NOACK | IBCR_IBIE); DELAY(100); /* Reset controller if bus still busy after STOP */ if (wait_for_nibb(sc) == IIC_ETIMEOUT) { WRITE1(sc, I2C_IBCR, IBCR_MDIS); DELAY(1000); WRITE1(sc, I2C_IBCR, IBCR_NOACK); } mtx_unlock(&sc->mutex); return (IIC_NOERR); } static uint32_t i2c_get_div_val(device_t dev) { struct i2c_softc *sc; uint64_t clk_freq; int error, i; sc = device_get_softc(dev); if (sc->hwtype == HW_MVF600) return 20; if (sc->freq == 0) return vf610_div_table[nitems(vf610_div_table) - 1].reg_val; error = clk_get_freq(sc->clock, &clk_freq); if (error != 0) { device_printf(dev, "Could not get parent clock frequency. " "Using default divider.\n"); return vf610_div_table[nitems(vf610_div_table) - 1].reg_val; } for (i = 0; i < nitems(vf610_div_table) - 1; i++) if ((clk_freq / vf610_div_table[i].div) <= sc->freq) break; return vf610_div_table[i].reg_val; } static int i2c_reset(device_t dev, u_char speed, u_char addr, u_char *oldadr) { struct i2c_softc *sc; uint32_t div; sc = device_get_softc(dev); div = i2c_get_div_val(dev); vf_i2c_dbg(sc, "Div val: %02x\n", div); vf_i2c_dbg(sc, "i2c reset\n"); switch (speed) { case IIC_FAST: case IIC_SLOW: case IIC_UNKNOWN: case IIC_FASTEST: default: break; } mtx_lock(&sc->mutex); WRITE1(sc, I2C_IBCR, IBCR_MDIS); DELAY(1000); WRITE1(sc, I2C_IBFD, div); WRITE1(sc, I2C_IBCR, 0x0); /* Enable i2c */ DELAY(1000); mtx_unlock(&sc->mutex); return (IIC_NOERR); } static int i2c_read(device_t dev, char *buf, int len, int *read, int last, int delay) { struct i2c_softc *sc; int error; sc = device_get_softc(dev); vf_i2c_dbg(sc, "i2c read\n"); *read = 0; mtx_lock(&sc->mutex); if (len) { if (len == 1) WRITE1(sc, I2C_IBCR, IBCR_IBIE | IBCR_MSSL | \ IBCR_NOACK); else WRITE1(sc, I2C_IBCR, IBCR_IBIE | IBCR_MSSL); /* dummy read */ READ1(sc, I2C_IBDR); DELAY(1000); } while (*read < len) { error = wait_for_icf(sc); if (error != 0) { mtx_unlock(&sc->mutex); return (error); } if ((*read == len - 2) && last) { /* NO ACK on last byte */ WRITE1(sc, I2C_IBCR, IBCR_IBIE | IBCR_MSSL | \ IBCR_NOACK); } if ((*read == len - 1) && last) { /* Transfer done, remove master bit */ WRITE1(sc, I2C_IBCR, IBCR_IBIE | IBCR_NOACK); } *buf++ = READ1(sc, I2C_IBDR); (*read)++; } mtx_unlock(&sc->mutex); return (IIC_NOERR); } static int i2c_write(device_t dev, const char *buf, int len, int *sent, int timeout) { struct i2c_softc *sc; int error; sc = device_get_softc(dev); vf_i2c_dbg(sc, "i2c write\n"); *sent = 0; mtx_lock(&sc->mutex); while (*sent < len) { WRITE1(sc, I2C_IBDR, *buf++); error = wait_for_iif(sc); if (error != 0) { mtx_unlock(&sc->mutex); return (error); } if (!tx_acked(sc) && (*sent = (len - 2)) ){ mtx_unlock(&sc->mutex); vf_i2c_dbg(sc, "no ACK on %d write\n", *sent); return (IIC_ENOACK); } (*sent)++; } mtx_unlock(&sc->mutex); return (IIC_NOERR); } static phandle_t i2c_get_node(device_t bus, device_t dev) { return ofw_bus_get_node(bus); } static device_method_t i2c_methods[] = { DEVMETHOD(device_probe, i2c_probe), DEVMETHOD(device_attach, i2c_attach), DEVMETHOD(device_detach, i2c_detach), DEVMETHOD(ofw_bus_get_node, i2c_get_node), DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_repeated_start, i2c_repeated_start), DEVMETHOD(iicbus_start, i2c_start), DEVMETHOD(iicbus_stop, i2c_stop), DEVMETHOD(iicbus_reset, i2c_reset), DEVMETHOD(iicbus_read, i2c_read), DEVMETHOD(iicbus_write, i2c_write), DEVMETHOD(iicbus_transfer, iicbus_transfer_gen), { 0, 0 } }; static DEFINE_CLASS_0(i2c, i2c_driver, i2c_methods, sizeof(struct i2c_softc)); DRIVER_MODULE(vybrid_i2c, simplebus, i2c_driver, 0, 0); DRIVER_MODULE(iicbus, i2c, iicbus_driver, 0, 0); DRIVER_MODULE(ofw_iicbus, i2c, ofw_iicbus_driver, 0, 0); diff --git a/sys/arm/mv/clk/a37x0_nb_periph_clk_driver.c b/sys/arm/mv/clk/a37x0_nb_periph_clk_driver.c index 423a42b80e06..b102f05d25c1 100644 --- a/sys/arm/mv/clk/a37x0_nb_periph_clk_driver.c +++ b/sys/arm/mv/clk/a37x0_nb_periph_clk_driver.c @@ -1,164 +1,164 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include -#include -#include +#include +#include #include #include #include "clkdev_if.h" #include "periph.h" #define NB_DEV_COUNT 17 static struct clk_div_table a37x0_periph_clk_table_6 [] = { { .value = 1, .divider = 1 }, { .value = 2, .divider = 2 }, { .value = 3, .divider = 3 }, { .value = 4, .divider = 4 }, { .value = 5, .divider = 5 }, { .value = 6, .divider = 6 }, { .value = 0, .divider = 0 } }; static struct clk_div_table a37x0_periph_clk_table_2 [] = { { .value = 0, .divider = 1 }, { .value = 1, .divider = 2 }, { .value = 2, .divider = 4 }, { .value = 3, .divider = 1 } }; static struct a37x0_periph_clknode_def a37x0_nb_devices [] = { CLK_FULL_DD("mmc", 0, 2, 0, 0, DIV_SEL2, DIV_SEL2, 16, 13, "tbg_mux_mmc_50", "div1_mmc_51", "div2_mmc_52", "clk_mux_mmc_53"), CLK_FULL_DD("sata_host", 1, 3, 2, 1, DIV_SEL2, DIV_SEL2, 10, 7, "tbg_sata_host_mmc_55", "div1_sata_host_56", "div2_sata_host_57", "clk_sata_host_mmc_58"), CLK_FULL_DD("sec_at", 2, 6, 4, 2, DIV_SEL1, DIV_SEL1, 3, 0, "tbg_mux_sec_at_60", "div1_sec_at_61", "div2_sec_at_62", "clk_mux_sec_at_63"), CLK_FULL_DD("sec_dap", 3, 7, 6, 3, DIV_SEL1, DIV_SEL1, 9, 6, "tbg_mux_sec_dap_65", "div1_sec_dap_67", "div2_sec_dap_68", "clk_mux_sec_dap_69"), CLK_FULL_DD("tsecm", 4, 8, 8, 4, DIV_SEL1, DIV_SEL1, 15, 12, "tbg_mux_tsecm_71", "div1_tsecm_72", "div2_tsecm_73", "clk_mux_tsecm_74"), CLK_FULL("setm_tmx", 5, 10, 10, 5, DIV_SEL1, 18, a37x0_periph_clk_table_6, "tbg_mux_setm_tmx_76", "div1_setm_tmx_77", "clk_mux_setm_tmx_78"), CLK_FIXED("avs", 6, 11, 6, "mux_avs_80", "fixed1_avs_82"), CLK_FULL_DD("pwm", 7, 13, 14, 8, DIV_SEL0, DIV_SEL0, 3, 0, "tbg_mux_pwm_83", "div1_pwm_84", "div2_pwm_85", "clk_mux_pwm_86"), CLK_FULL_DD("sqf", 8, 12, 12, 7, DIV_SEL1, DIV_SEL1, 27, 14, "tbg_mux_sqf_88", "div1_sqf_89", "div2_sqf_90", "clk_mux_sqf_91"), CLK_GATE("i2c_2", 9, 16, NULL), CLK_GATE("i2c_1", 10, 17, NULL), CLK_MUX_GATE_FIXED("ddr_phy", 11, 19, 10, "mux_ddr_phy_95", "gate_ddr_phy_96", "fixed1_ddr_phy_97"), CLK_FULL_DD("ddr_fclk", 12, 21, 16, 11, DIV_SEL0, DIV_SEL0, 15, 12, "tbg_mux_ddr_fclk_99", "div1_ddr_fclk_100", "div2_ddr_fclk_101", "clk_mux_ddr_fclk_102"), CLK_FULL("trace", 13, 22, 18, 12, DIV_SEL0, 20, a37x0_periph_clk_table_6, "tbg_mux_trace_104", "div1_trace_105", "clk_mux_trace_106"), CLK_FULL("counter", 14, 23, 20, 13, DIV_SEL0, 23, a37x0_periph_clk_table_6, "tbg_mux_counter_108", "div1_counter_109", "clk_mux_counter_110"), CLK_FULL_DD("eip97", 15, 26, 24, 9, DIV_SEL2, DIV_SEL2, 22, 19, "tbg_mux_eip97_112", "div1_eip97_113", "div2_eip97_114", "clk_mux_eip97_115"), CLK_CPU("cpu", 16, 22, 15, DIV_SEL0, 28, a37x0_periph_clk_table_2, "tbg_mux_cpu_117", "div1_cpu_118"), }; static struct ofw_compat_data a37x0_periph_compat_data [] = { { "marvell,armada-3700-periph-clock-nb", 1 }, { NULL, 0 } }; static int a37x0_nb_periph_clk_attach(device_t); static int a37x0_nb_periph_clk_probe(device_t); static device_method_t a37x0_nb_periph_clk_methods[] = { DEVMETHOD(clkdev_device_unlock, a37x0_periph_clk_device_unlock), DEVMETHOD(clkdev_device_lock, a37x0_periph_clk_device_lock), DEVMETHOD(clkdev_read_4, a37x0_periph_clk_read_4), DEVMETHOD(device_attach, a37x0_nb_periph_clk_attach), DEVMETHOD(device_detach, a37x0_periph_clk_detach), DEVMETHOD(device_probe, a37x0_nb_periph_clk_probe), DEVMETHOD_END }; static driver_t a37x0_nb_periph_driver = { "a37x0_nb_periph_driver", a37x0_nb_periph_clk_methods, sizeof(struct a37x0_periph_clk_softc) }; EARLY_DRIVER_MODULE(a37x0_nb_periph, simplebus, a37x0_nb_periph_driver, 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_LATE); static int a37x0_nb_periph_clk_attach(device_t dev) { struct a37x0_periph_clk_softc *sc; sc = device_get_softc(dev); sc->devices = a37x0_nb_devices; sc->device_count = NB_DEV_COUNT; return (a37x0_periph_clk_attach(dev)); } static int a37x0_nb_periph_clk_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, a37x0_periph_compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "marvell,armada-3700-nb-periph-clock"); return (BUS_PROBE_DEFAULT); } diff --git a/sys/arm/mv/clk/a37x0_periph_clk_driver.c b/sys/arm/mv/clk/a37x0_periph_clk_driver.c index e8d90a0c9904..4456537ea785 100644 --- a/sys/arm/mv/clk/a37x0_periph_clk_driver.c +++ b/sys/arm/mv/clk/a37x0_periph_clk_driver.c @@ -1,213 +1,213 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include -#include -#include +#include +#include #include #include #include "clkdev_if.h" #include "periph.h" #define TBG_COUNT 4 #define XTAL_OFW_INDEX 4 static struct resource_spec a37x0_periph_clk_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; int a37x0_periph_clk_attach(device_t dev) { struct a37x0_periph_clknode_def *dev_defs; struct a37x0_periph_clk_softc *sc; const char *tbg_clocks[5]; const char *xtal_clock; phandle_t node; int error, i; clk_t clock; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); sc->dev = dev; mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); if (bus_alloc_resources(dev, a37x0_periph_clk_spec, &sc->res) != 0) { device_printf(dev, "Cannot allocate resources\n"); return (ENXIO); } sc->clkdom = clkdom_create(dev); if (sc->clkdom == NULL) { device_printf(dev, "Cannot create clock domain\n"); return (ENXIO); } for (i = 0; i < TBG_COUNT; i++){ error = clk_get_by_ofw_index(dev, node, i, &clock); if (error) goto fail; tbg_clocks[i] = clk_get_name(clock); } error = clk_get_by_ofw_index(dev, node, XTAL_OFW_INDEX, &clock); if (error) goto fail; xtal_clock = clk_get_name(clock); dev_defs = sc->devices; for (i = 0; i< sc->device_count; i++) { dev_defs[i].common_def.tbgs = tbg_clocks; dev_defs[i].common_def.xtal = xtal_clock; dev_defs[i].common_def.tbg_cnt = TBG_COUNT; switch (dev_defs[i].type) { case CLK_FULL_DD: error = a37x0_periph_d_register_full_clk_dd( sc->clkdom, &dev_defs[i]); if (error) goto fail; break; case CLK_FULL: error = a37x0_periph_d_register_full_clk( sc->clkdom, &dev_defs[i]); if (error) goto fail; break; case CLK_GATE: error = a37x0_periph_gate_register_gate( sc->clkdom, &dev_defs[i]); if (error) goto fail; break; case CLK_MUX_GATE: error = a37x0_periph_register_mux_gate( sc->clkdom, &dev_defs[i]); if (error) goto fail; break; case CLK_FIXED: error = a37x0_periph_fixed_register_fixed( sc->clkdom, &dev_defs[i]); if (error) goto fail; break; case CLK_CPU: error = a37x0_periph_d_register_periph_cpu( sc->clkdom, &dev_defs[i]); if (error) goto fail; break; case CLK_MDD: error = a37x0_periph_d_register_mdd( sc->clkdom, &dev_defs[i]); if (error) goto fail; break; case CLK_MUX_GATE_FIXED: error = a37x0_periph_register_mux_gate_fixed( sc->clkdom, &dev_defs[i]); if (error) goto fail; break; default: return (ENXIO); } } error = clkdom_finit(sc->clkdom); if (error) goto fail; if (bootverbose) clkdom_dump(sc->clkdom); return (0); fail: bus_release_resources(dev, a37x0_periph_clk_spec, &sc->res); return (error); } int a37x0_periph_clk_read_4(device_t dev, bus_addr_t addr, uint32_t *val) { struct a37x0_periph_clk_softc *sc; sc = device_get_softc(dev); *val = bus_read_4(sc->res, addr); return (0); } void a37x0_periph_clk_device_lock(device_t dev) { struct a37x0_periph_clk_softc *sc; sc = device_get_softc(dev); mtx_lock(&sc->mtx); } void a37x0_periph_clk_device_unlock(device_t dev) { struct a37x0_periph_clk_softc *sc; sc = device_get_softc(dev); mtx_unlock(&sc->mtx); } int a37x0_periph_clk_detach(device_t dev) { return (EBUSY); } diff --git a/sys/arm/mv/clk/a37x0_sb_periph_clk_driver.c b/sys/arm/mv/clk/a37x0_sb_periph_clk_driver.c index 0429c8ae12f8..910525411c7f 100644 --- a/sys/arm/mv/clk/a37x0_sb_periph_clk_driver.c +++ b/sys/arm/mv/clk/a37x0_sb_periph_clk_driver.c @@ -1,133 +1,133 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include -#include -#include +#include +#include #include #include #include "clkdev_if.h" #include "periph.h" #define SB_DEV_COUNT 14 static struct a37x0_periph_clknode_def a37x0_sb_devices [] = { CLK_MDD("gbe_50", 0, 6, 1, DIV_SEL2, DIV_SEL2, 6, 9, "tbg_mux_gbe_50_120", "div1_gbe_50_121", "div2_gbe_50_122"), CLK_MDD("gbe_core", 1, 8, 5, DIV_SEL1, DIV_SEL1, 18, 21, "tbg_mux_gbe_core_124", "div1_gbe_core_125", "div2_gbe_core_126"), CLK_MDD("gbe_125", 2, 10, 3, DIV_SEL1, DIV_SEL1, 6, 9, "tbg_mux_gbe_125_128", "div1_gbe_50_129", "div2_gbe_50_130"), CLK_GATE("gbe1_50", 3, 0, "gbe_50"), CLK_GATE("gbe0_50", 4, 1, "gbe_50"), CLK_GATE("gbe1_125", 5, 2, "gbe_125"), CLK_GATE("gbe0_125", 6, 3, "gbe_125"), CLK_MUX_GATE("gbe1_core", 7, 4, 13, "gbe_core", "mux_gbe1_core_136", "fixed_gbe1_core_138"), CLK_MUX_GATE("gbe0_core", 8, 5, 14, "gbe_core", "mux_gbe0_core_139", "fixed_gbe0_core_141"), CLK_MUX_GATE("gbe_bm", 9, 12, 12, "gbe_core", "mux_gbe_bm_136", "fixed_gbe_bm_138"), CLK_FULL_DD("sdio", 10, 11, 14, 7, DIV_SEL0, DIV_SEL0, 3, 6, "tbg_mux_sdio_139", "div1_sdio_140", "div2_sdio_141", "clk_mux_sdio_142"), CLK_FULL_DD("usb32_usb2_sys", 11, 16, 16, 8, DIV_SEL0, DIV_SEL0, 9, 12, "tbg_mux_usb32_usb2_sys_144", "div1_usb32_usb2_sys_145", "div2_usb32_usb2_sys_146", "clk_mux_usb32_usb2_sys_147"), CLK_FULL_DD("usb32_ss_sys", 12, 17, 18, 9, DIV_SEL0, DIV_SEL0, 15, 18, "tbg_mux_usb32_ss_sys_149", "div1_usb32_ss_sys_150", "div2_usb32_ss_sys_151", "clk_mux_usb32_ss_sys_152"), CLK_GATE("pcie", 13, 14, "gbe_core") }; static struct ofw_compat_data a37x0_sb_periph_compat_data[] = { { "marvell,armada-3700-periph-clock-sb", 1 }, { NULL, 0 } }; static int a37x0_sb_periph_clk_attach(device_t); static int a37x0_sb_periph_clk_probe(device_t); static device_method_t a37x0_sb_periph_clk_methods[] = { DEVMETHOD(clkdev_device_unlock, a37x0_periph_clk_device_unlock), DEVMETHOD(clkdev_device_lock, a37x0_periph_clk_device_lock), DEVMETHOD(clkdev_read_4, a37x0_periph_clk_read_4), DEVMETHOD(device_attach, a37x0_sb_periph_clk_attach), DEVMETHOD(device_detach, a37x0_periph_clk_detach), DEVMETHOD(device_probe, a37x0_sb_periph_clk_probe), DEVMETHOD_END }; static driver_t a37x0_sb_periph_driver = { "a37x0_sb_periph_driver", a37x0_sb_periph_clk_methods, sizeof(struct a37x0_periph_clk_softc) }; EARLY_DRIVER_MODULE(a37x0_sb_periph, simplebus, a37x0_sb_periph_driver, 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_LATE); static int a37x0_sb_periph_clk_attach(device_t dev) { struct a37x0_periph_clk_softc *sc; sc = device_get_softc(dev); sc->devices = a37x0_sb_devices; sc->device_count = SB_DEV_COUNT; return (a37x0_periph_clk_attach(dev)); } static int a37x0_sb_periph_clk_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, a37x0_sb_periph_compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "marvell,armada-3700-sb-periph-clock"); return (BUS_PROBE_DEFAULT); } diff --git a/sys/arm/mv/clk/a37x0_tbg.c b/sys/arm/mv/clk/a37x0_tbg.c index d8f0f5203455..32ec1c83b102 100644 --- a/sys/arm/mv/clk/a37x0_tbg.c +++ b/sys/arm/mv/clk/a37x0_tbg.c @@ -1,221 +1,221 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include -#include +#include #include #include "clkdev_if.h" #include "a37x0_tbg_pll.h" #define NUM_TBG 4 #define TBG_CTRL0 0x4 #define TBG_CTRL1 0x8 #define TBG_CTRL7 0x20 #define TBG_CTRL8 0x30 #define TBG_MASK 0x1FF #define TBG_A_REFDIV 0 #define TBG_B_REFDIV 16 #define TBG_A_FBDIV 2 #define TBG_B_FBDIV 18 #define TBG_A_VCODIV_SEL 0 #define TBG_B_VCODIV_SEL 16 #define TBG_A_VCODIV_DIFF 1 #define TBG_B_VCODIV_DIFF 17 struct a37x0_tbg_softc { device_t dev; struct clkdom *clkdom; struct resource *res; }; static struct resource_spec a37x0_tbg_clk_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; struct a37x0_tbg_def { char *name; uint32_t refdiv_shift; uint32_t fbdiv_shift; uint32_t vcodiv_offset; uint32_t vcodiv_shift; uint32_t tbg_bypass_en; }; static const struct a37x0_tbg_def tbg[NUM_TBG] = { {"TBG-A-P", TBG_A_REFDIV, TBG_A_FBDIV, TBG_CTRL8, TBG_A_VCODIV_DIFF, 9}, {"TBG-B-P", TBG_B_REFDIV, TBG_B_FBDIV, TBG_CTRL8, TBG_B_VCODIV_DIFF, 25}, {"TBG-A-S", TBG_A_REFDIV, TBG_A_FBDIV, TBG_CTRL1, TBG_A_VCODIV_SEL, 9}, {"TBG-B-S", TBG_B_REFDIV, TBG_B_FBDIV, TBG_CTRL1, TBG_B_VCODIV_SEL, 25} }; static int a37x0_tbg_read_4(device_t, bus_addr_t, uint32_t *); static int a37x0_tbg_attach(device_t); static int a37x0_tbg_detach(device_t); static int a37x0_tbg_probe(device_t); static device_method_t a37x0_tbg_methods [] = { DEVMETHOD(device_attach, a37x0_tbg_attach), DEVMETHOD(device_detach, a37x0_tbg_detach), DEVMETHOD(device_probe, a37x0_tbg_probe), DEVMETHOD(clkdev_read_4, a37x0_tbg_read_4), DEVMETHOD_END }; static driver_t a37x0_tbg_driver = { "a37x0_tbg", a37x0_tbg_methods, sizeof(struct a37x0_tbg_softc) }; EARLY_DRIVER_MODULE(a37x0_tbg, simplebus, a37x0_tbg_driver, 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE); static int a37x0_tbg_read_4(device_t dev, bus_addr_t offset, uint32_t *val) { struct a37x0_tbg_softc *sc; sc = device_get_softc(dev); *val = bus_read_4(sc->res, offset); return (0); } static int a37x0_tbg_attach(device_t dev) { struct a37x0_tbg_pll_clk_def def; struct a37x0_tbg_softc *sc; const char *clkname; int error, i; phandle_t node; clk_t clock; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); sc->dev = dev; if (bus_alloc_resources(dev, a37x0_tbg_clk_spec, &sc->res) != 0) { device_printf(dev, "Cannot allocate resources\n"); return (ENXIO); } sc->clkdom = clkdom_create(dev); if (sc->clkdom == NULL) { device_printf(dev, "Cannot create clock domain.\n"); return (ENXIO); } error = clk_get_by_ofw_index(dev, node, 0, &clock); if (error != 0) { device_printf(dev, "Cannot find clock parent\n"); bus_release_resources(dev, a37x0_tbg_clk_spec, &sc->res); return (error); } clkname = clk_get_name(clock); for (i = 0; i < NUM_TBG; i++) { def.clkdef.parent_names = &clkname; def.clkdef.parent_cnt = 1; def.clkdef.id = i; def.clkdef.name = tbg[i].name; def.vcodiv.offset = tbg[i].vcodiv_offset; def.vcodiv.shift = tbg[i].vcodiv_shift; def.refdiv.offset = TBG_CTRL7; def.refdiv.shift = tbg[i].refdiv_shift; def.fbdiv.offset = TBG_CTRL0; def.fbdiv.shift = tbg[i].fbdiv_shift; def.vcodiv.mask = def.refdiv.mask = def.fbdiv.mask = TBG_MASK; def.tbg_bypass.offset = TBG_CTRL1; def.tbg_bypass.shift = tbg[i].tbg_bypass_en; def.tbg_bypass.mask = 0x1; error = a37x0_tbg_pll_clk_register(sc->clkdom, &def); if (error) { device_printf(dev, "Cannot register clock node\n"); bus_release_resources(dev, a37x0_tbg_clk_spec, &sc->res); return (ENXIO); } } error = clkdom_finit(sc->clkdom); if (error) { device_printf(dev, "Cannot finalize clock domain intialization\n"); bus_release_resources(dev, a37x0_tbg_clk_spec, &sc->res); return (ENXIO); } if (bootverbose) clkdom_dump(sc->clkdom); return (0); } static int a37x0_tbg_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "marvell,armada-3700-tbg-clock")) return (ENXIO); device_set_desc(dev, "Marvell Armada 3700 time base generators"); return (BUS_PROBE_DEFAULT); } static int a37x0_tbg_detach(device_t dev) { return (EBUSY); } diff --git a/sys/arm/mv/clk/a37x0_tbg_pll.c b/sys/arm/mv/clk/a37x0_tbg_pll.c index 83115dc5741d..da8ff83cc898 100644 --- a/sys/arm/mv/clk/a37x0_tbg_pll.c +++ b/sys/arm/mv/clk/a37x0_tbg_pll.c @@ -1,127 +1,127 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include -#include +#include #include #include #include "clkdev_if.h" #include "a37x0_tbg_pll.h" #define RD4(_clk, offset, val) \ CLKDEV_READ_4(clknode_get_device(_clk), offset, val) struct a37x0_tbg_pll_softc { struct a37x0_tbg_pll_reg_def vcodiv; struct a37x0_tbg_pll_reg_def refdiv; struct a37x0_tbg_pll_reg_def fbdiv; struct a37x0_tbg_pll_reg_def tbg_bypass; }; static int a37x0_tbg_pll_recalc_freq(struct clknode *clk, uint64_t *freq) { struct a37x0_tbg_pll_softc *sc; uint32_t vcodiv, fbdiv, refdiv; unsigned int val; sc = clknode_get_softc(clk); RD4(clk, sc->tbg_bypass.offset, &val); if ((val >> sc->tbg_bypass.shift) & sc->tbg_bypass.mask) return 0; RD4(clk, sc->vcodiv.offset, &val); vcodiv = 1 << ((val >> sc->vcodiv.shift) & sc->vcodiv.mask); RD4(clk, sc->refdiv.offset, &val); refdiv = (val >> sc->refdiv.shift) & sc->refdiv.mask; RD4(clk, sc->fbdiv.offset, &val); fbdiv = (val >> sc->fbdiv.shift) & sc->fbdiv.mask; if (refdiv == 0) refdiv = 1; *freq = *freq * (fbdiv / refdiv) * 4; *freq /= vcodiv; return (0); } static int a37x0_tbg_pll_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static clknode_method_t a37x0_tbg_pll_clknode_methods[] = { CLKNODEMETHOD(clknode_recalc_freq, a37x0_tbg_pll_recalc_freq), CLKNODEMETHOD(clknode_init, a37x0_tbg_pll_init), CLKNODEMETHOD_END }; DEFINE_CLASS_1(a37x0_tbg_pll__clknode, a37x0_tbg_pll_clknode_class, a37x0_tbg_pll_clknode_methods, sizeof(struct a37x0_tbg_pll_softc), clknode_class); int a37x0_tbg_pll_clk_register(struct clkdom *clkdom, const struct a37x0_tbg_pll_clk_def *clkdef) { struct a37x0_tbg_pll_softc *sc; struct clknode *clk; clk = clknode_create(clkdom, &a37x0_tbg_pll_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->vcodiv = clkdef->vcodiv; sc->refdiv = clkdef->refdiv; sc->fbdiv = clkdef->fbdiv; sc->tbg_bypass = clkdef->tbg_bypass; if (clknode_register(clkdom, clk) == NULL) return (1); return (0); } diff --git a/sys/arm/mv/clk/a37x0_tbg_pll.h b/sys/arm/mv/clk/a37x0_tbg_pll.h index 2bb4fa6ab478..6c79c44c64db 100644 --- a/sys/arm/mv/clk/a37x0_tbg_pll.h +++ b/sys/arm/mv/clk/a37x0_tbg_pll.h @@ -1,52 +1,52 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #ifndef _TBG_CLK_PLL_H_ #define _TBG_CLK_PLL_H_ -#include +#include struct a37x0_tbg_pll_reg_def { uint32_t offset; uint32_t shift; uint32_t mask; }; struct a37x0_tbg_pll_clk_def { struct clknode_init_def clkdef; struct a37x0_tbg_pll_reg_def vcodiv; struct a37x0_tbg_pll_reg_def refdiv; struct a37x0_tbg_pll_reg_def fbdiv; struct a37x0_tbg_pll_reg_def tbg_bypass; }; int a37x0_tbg_pll_clk_register(struct clkdom *, const struct a37x0_tbg_pll_clk_def *); #endif diff --git a/sys/arm/mv/clk/a37x0_xtal.c b/sys/arm/mv/clk/a37x0_xtal.c index e4ce8a90a24e..67a791b8d34a 100644 --- a/sys/arm/mv/clk/a37x0_xtal.c +++ b/sys/arm/mv/clk/a37x0_xtal.c @@ -1,144 +1,144 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include -#include -#include +#include +#include #include #include #include #include "syscon_if.h" #define BIT(x) (1 << (x)) #define NB_GPIO1_PIN_LT_L 0x8 #define NB_GPIO1_MPP1_9 BIT(9) struct a37x0_xtal_softc { device_t dev; struct clkdom *clkdom; }; static int a37x0_xtal_attach(device_t dev); static int a37x0_xtal_detach(device_t dev); static int a37x0_xtal_probe(device_t dev); static device_method_t a37x0_xtal_methods [] = { DEVMETHOD(device_probe, a37x0_xtal_probe), DEVMETHOD(device_attach, a37x0_xtal_attach), DEVMETHOD(device_detach, a37x0_xtal_detach), DEVMETHOD_END }; static driver_t a37x0_xtal_driver = { "a37x0-xtal", a37x0_xtal_methods, sizeof(struct a37x0_xtal_softc) }; EARLY_DRIVER_MODULE(a37x0_xtal, simplebus, a37x0_xtal_driver, 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_EARLY); static int a37x0_xtal_attach(device_t dev) { struct a37x0_xtal_softc *sc; struct clk_fixed_def def; struct syscon *syscon; uint32_t reg; int error; sc = device_get_softc(dev); def.clkdef.name = "armada-3700-xtal"; def.clkdef.parent_names = NULL; def.clkdef.parent_cnt = 0; def.clkdef.id = 1; def.mult = 0; def.div = 0; if (SYSCON_GET_HANDLE(dev, &syscon) != 0 || syscon == NULL){ device_printf(dev, "Cannot get syscon driver handle\n"); return (ENXIO); } reg = SYSCON_READ_4(syscon, NB_GPIO1_PIN_LT_L); if (reg & NB_GPIO1_MPP1_9) def.freq = 40000000; else def.freq = 25000000; sc->clkdom = clkdom_create(dev); error = clknode_fixed_register(sc->clkdom, &def); if (error){ device_printf(dev, "Cannot register clock node\n"); return (ENXIO); } error = clkdom_finit(sc->clkdom); if (error){ device_printf(dev, "Cannot finalize clock domain initialization\n"); return (ENXIO); } if (bootverbose) clkdom_dump(sc->clkdom); return (0); } static int a37x0_xtal_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "marvell,armada-3700-xtal-clock")) return (ENXIO); device_set_desc(dev, "Marvell Armada 3700 Oscillator"); return (BUS_PROBE_DEFAULT); } static int a37x0_xtal_detach(device_t dev) { return (EBUSY); } diff --git a/sys/arm/mv/clk/armada38x_coreclk.c b/sys/arm/mv/clk/armada38x_coreclk.c index 21e630d18b06..f0f7767397b5 100644 --- a/sys/arm/mv/clk/armada38x_coreclk.c +++ b/sys/arm/mv/clk/armada38x_coreclk.c @@ -1,219 +1,219 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include "clkdev_if.h" #define ARMADA38X_CORECLK_MAXREG 0 static struct resource_spec armada38x_coreclk_specs[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; struct armada38x_coreclk_softc { struct resource *res; struct clkdom *clkdom; struct mtx mtx; }; static int armada38x_coreclk_attach(device_t dev); static int armada38x_coreclk_probe(device_t dev); static struct armada38x_gen_clknode_def gen_nodes[] = { { .def = { .name = "coreclk_0", .id = 0, .parent_cnt = 0, }, }, { .def = { .name = "coreclk_2", .id = 1, .parent_cnt = 0, }, } }; static int armada38x_coreclk_read_4(device_t dev, bus_addr_t addr, uint32_t *val) { struct armada38x_coreclk_softc *sc; sc = device_get_softc(dev); if (addr > ARMADA38X_CORECLK_MAXREG) return (EINVAL); *val = bus_read_4(sc->res, addr); return (0); } static int armada38x_coreclk_write_4(device_t dev, bus_addr_t addr, uint32_t val) { struct armada38x_coreclk_softc *sc; sc = device_get_softc(dev); if (addr > ARMADA38X_CORECLK_MAXREG) return (EINVAL); bus_write_4(sc->res, addr, val); return (0); } static void armada38x_coreclk_device_lock(device_t dev) { struct armada38x_coreclk_softc *sc; sc = device_get_softc(dev); mtx_lock(&sc->mtx); } static void armada38x_coreclk_device_unlock(device_t dev) { struct armada38x_coreclk_softc *sc; sc = device_get_softc(dev); mtx_unlock(&sc->mtx); } static int armada38x_coreclk_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "marvell,armada-380-core-clock")) return (ENXIO); device_set_desc(dev, "ARMADA38X core-clock"); return (BUS_PROBE_DEFAULT); } static int armada38x_coreclk_create_coreclk(device_t dev) { struct armada38x_coreclk_softc *sc; int rv, i; sc = device_get_softc(dev); for (i = 0; i < nitems(gen_nodes); ++i) { rv = armada38x_gen_register(sc->clkdom, &gen_nodes[i]); if (rv) return (rv); } return (rv); } static int armada38x_coreclk_attach(device_t dev) { struct armada38x_coreclk_softc *sc; int error; sc = device_get_softc(dev); if (bus_alloc_resources(dev, armada38x_coreclk_specs, &sc->res) != 0) { device_printf(dev, "Cannot allocate resources.\n"); return (ENXIO); } mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); sc->clkdom = clkdom_create(dev); if (NULL == sc->clkdom) { device_printf(dev, "Cannot create clkdom\n"); return (ENXIO); } error = armada38x_coreclk_create_coreclk(dev); if (0 != error) { device_printf(dev, "Cannot create coreclk.\n"); return (error); } if (clkdom_finit(sc->clkdom) != 0) panic("Cannot finalize clock domain initialization.\n"); if (bootverbose) clkdom_dump(sc->clkdom); return (0); } static device_method_t amada38x_coreclk_methods[] = { DEVMETHOD(clkdev_write_4, armada38x_coreclk_write_4), DEVMETHOD(clkdev_read_4, armada38x_coreclk_read_4), DEVMETHOD(clkdev_device_lock, armada38x_coreclk_device_lock), DEVMETHOD(clkdev_device_unlock, armada38x_coreclk_device_unlock), DEVMETHOD(device_attach, armada38x_coreclk_attach), DEVMETHOD(device_probe, armada38x_coreclk_probe), DEVMETHOD_END }; static driver_t armada38x_coreclk_driver = { "armada38x_coreclk", amada38x_coreclk_methods, sizeof(struct armada38x_coreclk_softc), }; EARLY_DRIVER_MODULE(armada38x_coreclk, simplebus, armada38x_coreclk_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/arm/mv/clk/armada38x_gateclk.c b/sys/arm/mv/clk/armada38x_gateclk.c index c50a65b3396e..24355d9c9dac 100644 --- a/sys/arm/mv/clk/armada38x_gateclk.c +++ b/sys/arm/mv/clk/armada38x_gateclk.c @@ -1,291 +1,291 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include +#include +#include #include "clkdev_if.h" struct armada38x_gateclk_softc { struct clkdom *clkdom; struct mtx mtx; const char* parent; }; static struct clk_gate_def gateclk_nodes[] = { { .clkdef = { .name = "gateclk-audio", .id = 0, .parent_cnt = 1, .flags = 0, }, .shift = 0, }, { .clkdef = { .name = "gateclk-eth2", .id = 2, .parent_cnt = 1, .flags = 0, }, .shift = 2, }, { .clkdef = { .name = "gateclk-eth1", .id = 3, .parent_cnt = 1, .flags = 0, }, .shift = 3, }, { .clkdef = { .name = "gateclk-eth0", .id = 4, .parent_cnt = 1, .flags = 0, }, .shift = 4, }, { .clkdef = { .name = "gateclk-mdio", .id = 4, .parent_cnt = 1, .flags = 0, }, .shift = 4, }, { .clkdef = { .name = "gateclk-usb3h0", .id = 9, .parent_cnt = 1, .flags = 0, }, .shift = 9, }, { .clkdef = { .name = "gateclk-usb3h1", .id = 10, .parent_cnt = 1, .flags = 0, }, .shift = 10, }, { .clkdef = { .name = "gateclk-bm", .id = 13, .parent_cnt = 1, .flags = 0, }, .shift = 13, }, { .clkdef = { .name = "gateclk-crypto0z", .id = 14, .parent_cnt = 1, .flags = 0, }, .shift = 14, }, { .clkdef = { .name = "gateclk-sata0", .id = 15, .parent_cnt = 1, .flags = 0, }, .shift = 15, }, { .clkdef = { .name = "gateclk-crypto1z", .id = 16, .parent_cnt = 1, .flags = 0, }, .shift = 16, }, { .clkdef = { .name = "gateclk-sdio", .id = 17, .parent_cnt = 1, .flags = 0, }, .shift = 17, }, { .clkdef = { .name = "gateclk-usb2", .id = 18, .parent_cnt = 1, .flags = 0, }, .shift = 18, }, { .clkdef = { .name = "gateclk-crypto1", .id = 21, .parent_cnt = 1, .flags = 0, }, .shift = 21, }, { .clkdef = { .name = "gateclk-xor0", .id = 22, .parent_cnt = 1, .flags = 0, }, .shift = 22, }, { .clkdef = { .name = "gateclk-crypto0", .id = 23, .parent_cnt = 1, .flags = 0, }, .shift = 23, }, { .clkdef = { .name = "gateclk-xor1", .id = 28, .parent_cnt = 1, .flags = 0, }, .shift = 28, }, { .clkdef = { .name = "gateclk-sata1", .id = 30, .parent_cnt = 1, .flags = 0, }, .shift = 30, } }; static int armada38x_gateclk_probe(device_t dev); static int armada38x_gateclk_attach(device_t dev); static device_method_t armada38x_gateclk_methods[] = { DEVMETHOD(device_probe, armada38x_gateclk_probe), DEVMETHOD(device_attach, armada38x_gateclk_attach), DEVMETHOD_END }; static driver_t armada38x_gateclk_driver = { "armada38x_gateclk", armada38x_gateclk_methods, sizeof(struct armada38x_gateclk_softc), }; EARLY_DRIVER_MODULE(armada38x_gateclk, simplebus, armada38x_gateclk_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE + 1); static int armada38x_gateclk_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if(!ofw_bus_is_compatible(dev, "marvell,armada-380-gating-clock")) return (ENXIO); device_set_desc(dev, "ARMADA38X gateclk"); return (BUS_PROBE_DEFAULT); } static int armada38x_gateclk_attach(device_t dev) { struct armada38x_gateclk_softc *sc; phandle_t node; int i, error; clk_t clock; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); sc->clkdom = clkdom_create(dev); if (sc->clkdom == NULL) { device_printf(dev, "Cannot create clock domain.\n"); return (ENXIO); } error = clk_get_by_ofw_index(dev, node, 0, &clock); if (error > 0) return (error); sc->parent = clk_get_name(clock); for (i = 0; i < nitems(gateclk_nodes); ++i) { gateclk_nodes[i].clkdef.parent_names = &sc->parent; error = clknode_gate_register(sc->clkdom, &gateclk_nodes[i]); if (error != 0) { device_printf(dev, "Cannot create gate nodes\n"); return (error); } } if (clkdom_finit(sc->clkdom) != 0) panic("Cannot finalize clock domain initialization.\n"); return (0); } diff --git a/sys/arm/mv/clk/armada38x_gen.c b/sys/arm/mv/clk/armada38x_gen.c index 13951a33da55..b225c927f0d2 100644 --- a/sys/arm/mv/clk/armada38x_gen.c +++ b/sys/arm/mv/clk/armada38x_gen.c @@ -1,97 +1,97 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include -#include +#include #include #include "clkdev_if.h" #define SAR_A38X_TCLK_FREQ_SHIFT 15 #define SAR_A38X_TCLK_FREQ_MASK 0x00008000 #define TCLK_250MHZ 250 * 1000 * 1000 #define TCLK_200MHZ 200 * 1000 * 1000 #define WR4(_clk, offset, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), offset, val) #define RD4(_clk, offset, val) \ CLKDEV_READ_4(clknode_get_device(_clk), offset, val) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) static int armada38x_gen_recalc(struct clknode *clk, uint64_t *freq) { uint32_t reg; DEVICE_LOCK(clk); RD4(clk, 0, ®); DEVICE_UNLOCK(clk); reg = (reg & SAR_A38X_TCLK_FREQ_MASK) >> SAR_A38X_TCLK_FREQ_SHIFT; *freq = reg ? TCLK_200MHZ : TCLK_250MHZ; return (0); } static int armada38x_gen_init(struct clknode *clk, device_t dev) { return (0); } static clknode_method_t armada38x_gen_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, armada38x_gen_init), CLKNODEMETHOD(clknode_recalc_freq, armada38x_gen_recalc), CLKNODEMETHOD_END }; DEFINE_CLASS_1(armada38x_gen_clknode, armada38x_gen_clknode_class, armada38x_gen_clknode_methods, 0, clknode_class); int armada38x_gen_register(struct clkdom *clkdom, const struct armada38x_gen_clknode_def *clkdef) { struct clknode *clk; clk = clknode_create(clkdom, &armada38x_gen_clknode_class, &clkdef->def); if (clk == NULL) return (1); clknode_register(clkdom, clk); return(0); } diff --git a/sys/arm/mv/clk/armada38x_gen.h b/sys/arm/mv/clk/armada38x_gen.h index ad46e38728fa..7275bd528529 100644 --- a/sys/arm/mv/clk/armada38x_gen.h +++ b/sys/arm/mv/clk/armada38x_gen.h @@ -1,40 +1,40 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _ARMADA38X_GEN_H_ #define _ARMADA38X_GEN_H_ -#include +#include struct armada38x_gen_clknode_def { struct clknode_init_def def; }; int armada38x_gen_register(struct clkdom *clkdom, const struct armada38x_gen_clknode_def *clkdef); #endif diff --git a/sys/arm/mv/clk/periph.c b/sys/arm/mv/clk/periph.c index 98445fa8bc6a..986016f77b8f 100644 --- a/sys/arm/mv/clk/periph.c +++ b/sys/arm/mv/clk/periph.c @@ -1,109 +1,109 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #include #include #include "clkdev_if.h" #include "periph.h" int a37x0_periph_create_mux(struct clkdom *clkdom, struct clk_mux_def *mux, int id) { int error; mux->clkdef.id = id; error = clknode_mux_register(clkdom, mux); if (error != 0) { printf("Failed to create %s: %d\n", mux->clkdef.name, error); return (error); } return (0); } int a37x0_periph_create_div(struct clkdom *clkdom, struct clk_div_def *div, int id) { int error; div->clkdef.id = id; error = clknode_div_register(clkdom, div); if (error != 0) { printf("Failed to register %s: %d\n", div->clkdef.name, error); return (error); } return (0); } int a37x0_periph_create_gate(struct clkdom *clkdom, struct clk_gate_def *gate, int id) { int error; gate->clkdef.id = id; error = clknode_gate_register(clkdom, gate); if (error != 0) { printf("Failed to create %s:%d\n", gate->clkdef.name, error); return (error); } return (0); } void a37x0_periph_set_props(struct clknode_init_def *clkdef, const char **parent_names, unsigned int parent_cnt) { clkdef->parent_names = parent_names; clkdef->parent_cnt = parent_cnt; } diff --git a/sys/arm/mv/clk/periph.h b/sys/arm/mv/clk/periph.h index 3eada61665a2..f23f641b8f1d 100644 --- a/sys/arm/mv/clk/periph.h +++ b/sys/arm/mv/clk/periph.h @@ -1,420 +1,420 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #ifndef _PERIPH_H_ #define _PERIPH_H_ -#include -#include -#include -#include +#include +#include +#include +#include #define TBG_SEL 0x0 #define DIV_SEL0 0x4 #define DIV_SEL1 0x8 #define DIV_SEL2 0xC #define CLK_SEL 0x10 #define CLK_DIS 0x14 #define DIV_MASK 0x7 #define MUX_POS 1 #define DIV1_POS 2 #define DIV2_POS 3 #define GATE_POS 4 #define FIXED1_POS 5 #define FIXED2_POS 6 #define CLK_MUX_POS 7 #define RD4(_clk, offset, val) \ CLKDEV_READ_4(clknode_get_device(_clk), offset, val) #define A37x0_INTERNAL_CLK_ID(_base, _pos) \ ((_base * 10) + (_pos)) #define CLK_FULL_DD(_name, _id, _gate_shift, _tbg_mux_shift, \ _clk_mux_shift, _div1_reg, _div2_reg, _div1_shift, _div2_shift, \ _tbg_mux_name, _div1_name, _div2_name, _clk_mux_name) \ { \ .type = CLK_FULL_DD, \ .common_def.device_name = _name, \ .common_def.device_id = _id, \ .clk_def.full_dd.tbg_mux.clkdef.name = _tbg_mux_name, \ .clk_def.full_dd.tbg_mux.offset = TBG_SEL, \ .clk_def.full_dd.tbg_mux.shift = _tbg_mux_shift, \ .clk_def.full_dd.tbg_mux.width = 0x2, \ .clk_def.full_dd.tbg_mux.mux_flags = 0x0, \ .clk_def.full_dd.div1.clkdef.name = _div1_name, \ .clk_def.full_dd.div1.offset = _div1_reg, \ .clk_def.full_dd.div1.i_shift = _div1_shift, \ .clk_def.full_dd.div1.i_width = 0x3, \ .clk_def.full_dd.div1.f_shift = 0x0, \ .clk_def.full_dd.div1.f_width = 0x0, \ .clk_def.full_dd.div1.div_flags = 0x0, \ .clk_def.full_dd.div1.div_table = NULL, \ .clk_def.full_dd.div2.clkdef.name = _div2_name, \ .clk_def.full_dd.div2.offset = _div2_reg, \ .clk_def.full_dd.div2.i_shift = _div2_shift, \ .clk_def.full_dd.div2.i_width = 0x3, \ .clk_def.full_dd.div2.f_shift = 0x0, \ .clk_def.full_dd.div2.f_width = 0x0, \ .clk_def.full_dd.div2.div_flags = 0x0, \ .clk_def.full_dd.div2.div_table = NULL, \ .clk_def.full_dd.clk_mux.clkdef.name = _clk_mux_name, \ .clk_def.full_dd.clk_mux.offset = CLK_SEL, \ .clk_def.full_dd.clk_mux.shift = _clk_mux_shift, \ .clk_def.full_dd.clk_mux.width = 0x1, \ .clk_def.full_dd.clk_mux.mux_flags = 0x0, \ .clk_def.full_dd.gate.clkdef.name = _name, \ .clk_def.full_dd.gate.offset = CLK_DIS, \ .clk_def.full_dd.gate.shift = _gate_shift, \ .clk_def.full_dd.gate.on_value = 0, \ .clk_def.full_dd.gate.off_value = 1, \ .clk_def.full_dd.gate.mask = 0x1, \ .clk_def.full_dd.gate.gate_flags = 0x0 \ } #define CLK_FULL(_name, _id, _gate_shift, _tbg_mux_shift, \ _clk_mux_shift, _div1_reg, _div1_shift, _div_table, _tbg_mux_name, \ _div1_name, _clk_mux_name) \ { \ .type = CLK_FULL, \ .common_def.device_name = _name, \ .common_def.device_id = _id, \ .clk_def.full_d.tbg_mux.clkdef.name = _tbg_mux_name, \ .clk_def.full_d.tbg_mux.offset = TBG_SEL, \ .clk_def.full_d.tbg_mux.shift = _tbg_mux_shift, \ .clk_def.full_d.tbg_mux.width = 0x2, \ .clk_def.full_d.tbg_mux.mux_flags = 0x0, \ .clk_def.full_d.div.clkdef.name = _div1_name, \ .clk_def.full_d.div.offset = _div1_reg, \ .clk_def.full_d.div.i_shift = _div1_shift, \ .clk_def.full_d.div.i_width = 0x3, \ .clk_def.full_d.div.f_shift = 0x0, \ .clk_def.full_d.div.f_width = 0x0, \ .clk_def.full_d.div.div_flags = 0x0, \ .clk_def.full_d.div.div_table = _div_table, \ .clk_def.full_d.clk_mux.clkdef.name = _clk_mux_name, \ .clk_def.full_d.clk_mux.offset = CLK_SEL, \ .clk_def.full_d.clk_mux.shift = _clk_mux_shift, \ .clk_def.full_d.clk_mux.width = 0x1, \ .clk_def.full_d.clk_mux.mux_flags = 0x0, \ .clk_def.full_d.gate.clkdef.name = _name, \ .clk_def.full_d.gate.offset = CLK_DIS, \ .clk_def.full_d.gate.shift = _gate_shift, \ .clk_def.full_d.gate.on_value = 0, \ .clk_def.full_d.gate.off_value = 1, \ .clk_def.full_d.gate.mask = 0x1, \ .clk_def.full_d.gate.gate_flags = 0x0 \ } #define CLK_CPU(_name, _id, _tbg_mux_shift, _clk_mux_shift, _div1_reg, \ _div1_shift, _div_table, _tbg_mux_name, _div1_name) \ { \ .type = CLK_CPU, \ .common_def.device_name = _name, \ .common_def.device_id = _id, \ .clk_def.cpu.tbg_mux.clkdef.name = _tbg_mux_name, \ .clk_def.cpu.tbg_mux.offset = TBG_SEL, \ .clk_def.cpu.tbg_mux.shift = _tbg_mux_shift, \ .clk_def.cpu.tbg_mux.width = 0x2, \ .clk_def.cpu.tbg_mux.mux_flags = 0x0, \ .clk_def.cpu.div.clkdef.name = _div1_name, \ .clk_def.cpu.div.offset = _div1_reg, \ .clk_def.cpu.div.i_shift = _div1_shift, \ .clk_def.cpu.div.i_width = 0x3, \ .clk_def.cpu.div.f_shift = 0x0, \ .clk_def.cpu.div.f_width = 0x0, \ .clk_def.cpu.div.div_flags = 0x0, \ .clk_def.cpu.div.div_table = _div_table, \ .clk_def.cpu.clk_mux.clkdef.name = _name, \ .clk_def.cpu.clk_mux.offset = CLK_SEL, \ .clk_def.cpu.clk_mux.shift = _clk_mux_shift, \ .clk_def.cpu.clk_mux.width = 0x1, \ .clk_def.cpu.clk_mux.mux_flags = 0x0, \ } #define CLK_GATE(_name, _id, _gate_shift, _pname) \ { \ .type = CLK_GATE, \ .common_def.device_name = _name, \ .common_def.device_id = _id, \ .common_def.pname = _pname, \ .clk_def.gate.gate.clkdef.name = _name, \ .clk_def.gate.gate.clkdef.parent_cnt = 1, \ .clk_def.gate.gate.offset = CLK_DIS, \ .clk_def.gate.gate.shift = _gate_shift, \ .clk_def.gate.gate.on_value = 0, \ .clk_def.gate.gate.off_value = 1, \ .clk_def.gate.gate.mask = 0x1, \ .clk_def.gate.gate.gate_flags = 0x0 \ } #define CLK_MDD(_name, _id, _tbg_mux_shift, _clk_mux_shift, _div1_reg, \ _div2_reg, _div1_shift, _div2_shift, _tbg_mux_name, _div1_name, \ _div2_name) \ { \ .type = CLK_MDD, \ .common_def.device_name = _name, \ .common_def.device_id = _id, \ .clk_def.mdd.tbg_mux.clkdef.name = _tbg_mux_name, \ .clk_def.mdd.tbg_mux.offset = TBG_SEL, \ .clk_def.mdd.tbg_mux.shift = _tbg_mux_shift, \ .clk_def.mdd.tbg_mux.width = 0x2, \ .clk_def.mdd.tbg_mux.mux_flags = 0x0, \ .clk_def.mdd.div1.clkdef.name = _div1_name, \ .clk_def.mdd.div1.offset = _div1_reg, \ .clk_def.mdd.div1.i_shift = _div1_shift, \ .clk_def.mdd.div1.i_width = 0x3, \ .clk_def.mdd.div1.f_shift = 0x0, \ .clk_def.mdd.div1.f_width = 0x0, \ .clk_def.mdd.div1.div_flags = 0x0, \ .clk_def.mdd.div1.div_table = NULL, \ .clk_def.mdd.div2.clkdef.name = _div2_name, \ .clk_def.mdd.div2.offset = _div2_reg, \ .clk_def.mdd.div2.i_shift = _div2_shift, \ .clk_def.mdd.div2.i_width = 0x3, \ .clk_def.mdd.div2.f_shift = 0x0, \ .clk_def.mdd.div2.f_width = 0x0, \ .clk_def.mdd.div2.div_flags = 0x0, \ .clk_def.mdd.div2.div_table = NULL, \ .clk_def.mdd.clk_mux.clkdef.name = _name, \ .clk_def.mdd.clk_mux.offset = CLK_SEL, \ .clk_def.mdd.clk_mux.shift = _clk_mux_shift, \ .clk_def.mdd.clk_mux.width = 0x1, \ .clk_def.mdd.clk_mux.mux_flags = 0x0 \ } #define CLK_MUX_GATE(_name, _id, _gate_shift, _mux_shift, _pname, \ _mux_name, _fixed_name) \ { \ .type = CLK_MUX_GATE, \ .common_def.device_name = _name, \ .common_def.device_id = _id, \ .common_def.pname = _pname, \ .clk_def.mux_gate.mux.clkdef.name = _mux_name, \ .clk_def.mux_gate.mux.offset = TBG_SEL, \ .clk_def.mux_gate.mux.shift = _mux_shift, \ .clk_def.mux_gate.mux.width = 0x1, \ .clk_def.mux_gate.mux.mux_flags = 0x0, \ .clk_def.mux_gate.gate.clkdef.name = _name, \ .clk_def.mux_gate.gate.offset = CLK_DIS, \ .clk_def.mux_gate.gate.shift = _gate_shift, \ .clk_def.mux_gate.gate.on_value = 0, \ .clk_def.mux_gate.gate.off_value = 1, \ .clk_def.mux_gate.gate.mask = 0x1, \ .clk_def.mux_gate.gate.gate_flags = 0x0, \ .clk_def.mux_gate.fixed.clkdef.name = _fixed_name \ } #define CLK_MUX_GATE_FIXED(_name, _id, _gate_shift, _mux_shift, \ _mux_name, _gate_name, _fixed1_name) \ { \ .type = CLK_MUX_GATE_FIXED, \ .common_def.device_name = _name, \ .common_def.device_id = _id, \ .clk_def.mux_gate_fixed.mux.clkdef.name = _mux_name, \ .clk_def.mux_gate_fixed.mux.offset = TBG_SEL, \ .clk_def.mux_gate_fixed.mux.shift = _mux_shift, \ .clk_def.mux_gate_fixed.mux.width = 0x1, \ .clk_def.mux_gate_fixed.mux.mux_flags = 0x0, \ .clk_def.mux_gate_fixed.gate.clkdef.name = _gate_name, \ .clk_def.mux_gate_fixed.gate.offset = CLK_DIS, \ .clk_def.mux_gate_fixed.gate.shift = _gate_shift, \ .clk_def.mux_gate_fixed.gate.on_value = 0, \ .clk_def.mux_gate_fixed.gate.off_value = 1, \ .clk_def.mux_gate_fixed.gate.mask = 0x1, \ .clk_def.mux_gate_fixed.gate.gate_flags = 0x0, \ .clk_def.mux_gate_fixed.fixed1.clkdef.name = _fixed1_name, \ .clk_def.mux_gate_fixed.fixed2.clkdef.name = _name \ } #define CLK_FIXED(_name, _id, _gate_shift, _mux_shift, _mux_name, \ _fixed_name) \ { \ .type = CLK_FIXED, \ .common_def.device_name = _name, \ .common_def.device_id = _id, \ .clk_def.fixed.mux.clkdef.name = _mux_name, \ .clk_def.fixed.mux.offset = TBG_SEL, \ .clk_def.fixed.mux.shift = _mux_shift, \ .clk_def.fixed.mux.width = 0x1, \ .clk_def.fixed.mux.mux_flags = 0x0, \ .clk_def.fixed.gate.clkdef.name = _name, \ .clk_def.fixed.gate.offset = CLK_DIS, \ .clk_def.fixed.gate.shift = _gate_shift, \ .clk_def.fixed.gate.on_value = 0, \ .clk_def.fixed.gate.off_value = 1, \ .clk_def.fixed.gate.mask = 0x1, \ .clk_def.fixed.gate.gate_flags = 0x0, \ .clk_def.fixed.fixed.clkdef.name = _fixed_name \ } struct a37x0_periph_clk_softc { device_t dev; struct resource *res; struct clkdom *clkdom; struct mtx mtx; struct a37x0_periph_clknode_def *devices; int device_count; }; struct a37x0_periph_clk_dd_def { struct clk_mux_def tbg_mux; struct clk_div_def div1; struct clk_div_def div2; struct clk_mux_def clk_mux; struct clk_gate_def gate; }; struct a37x0_periph_clk_cpu_def { struct clk_mux_def tbg_mux; struct clk_div_def div; struct clk_mux_def clk_mux; }; struct a37x0_periph_clk_d_def { struct clk_mux_def tbg_mux; struct clk_div_def div; struct clk_mux_def clk_mux; struct clk_gate_def gate; }; struct a37x0_periph_clk_fixed_def { struct clk_mux_def mux; struct clk_fixed_def fixed; struct clk_gate_def gate; }; struct a37x0_periph_clk_gate_def { struct clk_gate_def gate; }; struct a37x0_periph_clk_mux_dd_def { struct clk_mux_def tbg_mux; struct clk_div_def div1; struct clk_div_def div2; struct clk_mux_def clk_mux; }; struct a37x0_periph_clk_mux_div_def { struct clk_mux_def mux; struct clk_div_def div; }; struct a37x0_periph_clk_mux_gate_def { struct clk_mux_def mux; struct clk_fixed_def fixed; struct clk_gate_def gate; }; struct a37x0_periph_clk_mux_gate_fixed_def { struct clk_fixed_def fixed1; struct clk_mux_def mux; struct clk_gate_def gate; struct clk_fixed_def fixed2; }; enum a37x0_periph_clk_type { /* Double divider clock */ CLK_FULL_DD, /* Single divider clock */ CLK_FULL, /* Gate clock */ CLK_GATE, /* Mux, gate clock */ CLK_MUX_GATE, /* CPU clock */ CLK_CPU, /* Clock with fixed frequency divider */ CLK_FIXED, /* Clock with double divider, without gate */ CLK_MDD, /* Clock with two fixed frequency dividers */ CLK_MUX_GATE_FIXED }; struct a37x0_periph_common_defs { char *device_name; int device_id; int tbg_cnt; const char *pname; const char **tbgs; const char *xtal; }; union a37x0_periph_clocks_defs { struct a37x0_periph_clk_dd_def full_dd; struct a37x0_periph_clk_d_def full_d; struct a37x0_periph_clk_gate_def gate; struct a37x0_periph_clk_mux_gate_def mux_gate; struct a37x0_periph_clk_cpu_def cpu; struct a37x0_periph_clk_fixed_def fixed; struct a37x0_periph_clk_mux_dd_def mdd; struct a37x0_periph_clk_mux_gate_fixed_def mux_gate_fixed; }; struct a37x0_periph_clknode_def { enum a37x0_periph_clk_type type; struct a37x0_periph_common_defs common_def; union a37x0_periph_clocks_defs clk_def; }; int a37x0_periph_create_mux(struct clkdom *, struct clk_mux_def *, int); int a37x0_periph_create_div(struct clkdom *, struct clk_div_def *, int); int a37x0_periph_create_gate(struct clkdom *, struct clk_gate_def *, int); void a37x0_periph_set_props(struct clknode_init_def *, const char **, unsigned int); int a37x0_periph_d_register_full_clk_dd(struct clkdom *, struct a37x0_periph_clknode_def *); int a37x0_periph_d_register_full_clk(struct clkdom *, struct a37x0_periph_clknode_def *); int a37x0_periph_d_register_periph_cpu(struct clkdom *, struct a37x0_periph_clknode_def *); int a37x0_periph_fixed_register_fixed(struct clkdom*, struct a37x0_periph_clknode_def *); int a37x0_periph_gate_register_gate(struct clkdom *, struct a37x0_periph_clknode_def *); int a37x0_periph_d_register_mdd(struct clkdom *, struct a37x0_periph_clknode_def *); int a37x0_periph_d_register_mux_div_clk(struct clkdom *, struct a37x0_periph_clknode_def *); int a37x0_periph_register_mux_gate(struct clkdom *, struct a37x0_periph_clknode_def *); int a37x0_periph_register_mux_gate_fixed(struct clkdom *, struct a37x0_periph_clknode_def *); int a37x0_periph_clk_read_4(device_t, bus_addr_t, uint32_t *); void a37x0_periph_clk_device_unlock(device_t); void a37x0_periph_clk_device_lock(device_t); int a37x0_periph_clk_attach(device_t); int a37x0_periph_clk_detach(device_t); #endif diff --git a/sys/arm/mv/clk/periph_clk_d.c b/sys/arm/mv/clk/periph_clk_d.c index 0e8659d56e18..f3dc3a30a8b6 100644 --- a/sys/arm/mv/clk/periph_clk_d.c +++ b/sys/arm/mv/clk/periph_clk_d.c @@ -1,271 +1,271 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #include #include #include "clkdev_if.h" #include "periph.h" #define PARENT_CNT 2 /* * Register chain: mux (select proper TBG) -> div1 (first frequency divider) -> * div2 (second frequency divider) -> mux (select divided freq. * or xtal output) -> gate (enable or disable clock), which is also final node */ int a37x0_periph_d_register_full_clk_dd(struct clkdom *clkdom, struct a37x0_periph_clknode_def *device_def) { const char *parent_names[PARENT_CNT]; struct clk_mux_def *clk_mux; struct clk_mux_def *tbg_mux; struct clk_gate_def *gate; struct clk_div_def *div1; struct clk_div_def *div2; int error, dev_id; dev_id = device_def->common_def.device_id; tbg_mux = &device_def->clk_def.full_dd.tbg_mux; div1 = &device_def->clk_def.full_dd.div1; div2 = &device_def->clk_def.full_dd.div2; gate = &device_def->clk_def.full_dd.gate; clk_mux = &device_def->clk_def.full_dd.clk_mux; a37x0_periph_set_props(&tbg_mux->clkdef, device_def->common_def.tbgs, device_def->common_def.tbg_cnt); error = a37x0_periph_create_mux(clkdom, tbg_mux, A37x0_INTERNAL_CLK_ID(dev_id, MUX_POS)); if (error) goto fail; a37x0_periph_set_props(&div1->clkdef, &tbg_mux->clkdef.name, 1); error = a37x0_periph_create_div(clkdom, div1, A37x0_INTERNAL_CLK_ID(dev_id, DIV1_POS)); if (error) goto fail; a37x0_periph_set_props(&div2->clkdef, &div1->clkdef.name, 1); error = a37x0_periph_create_div(clkdom, div2, A37x0_INTERNAL_CLK_ID(dev_id, DIV2_POS)); if (error) goto fail; parent_names[0] = device_def->common_def.xtal; parent_names[1] = div2->clkdef.name; a37x0_periph_set_props(&clk_mux->clkdef, parent_names, PARENT_CNT); error = a37x0_periph_create_mux(clkdom, clk_mux, A37x0_INTERNAL_CLK_ID(dev_id, CLK_MUX_POS)); if (error) goto fail; a37x0_periph_set_props(&gate->clkdef, &clk_mux->clkdef.name, 1); error = a37x0_periph_create_gate(clkdom, gate, dev_id); if (error) goto fail; fail: return (error); } /* * Register chain: mux (select proper TBG) -> div1 (first frequency divider) -> * mux (select divided freq. or xtal output) -> gate (enable or disable clock), * which is also final node */ int a37x0_periph_d_register_full_clk(struct clkdom *clkdom, struct a37x0_periph_clknode_def *device_def) { const char *parent_names[PARENT_CNT]; struct clk_mux_def *tbg_mux; struct clk_mux_def *clk_mux; struct clk_gate_def *gate; struct clk_div_def *div; int error, dev_id; dev_id = device_def->common_def.device_id; tbg_mux = &device_def->clk_def.full_d.tbg_mux; div = &device_def->clk_def.full_d.div; gate = &device_def->clk_def.full_d.gate; clk_mux = &device_def->clk_def.full_d. clk_mux; a37x0_periph_set_props(&tbg_mux->clkdef, device_def->common_def.tbgs, device_def->common_def.tbg_cnt); error = a37x0_periph_create_mux(clkdom, tbg_mux, A37x0_INTERNAL_CLK_ID(device_def->common_def.device_id, MUX_POS)); if (error) goto fail; a37x0_periph_set_props(&div->clkdef, &tbg_mux->clkdef.name, 1); error = a37x0_periph_create_div(clkdom, div, A37x0_INTERNAL_CLK_ID(device_def->common_def.device_id, DIV1_POS)); if (error) goto fail; parent_names[0] = device_def->common_def.xtal; parent_names[1] = div->clkdef.name; a37x0_periph_set_props(&clk_mux->clkdef, parent_names, PARENT_CNT); error = a37x0_periph_create_mux(clkdom, clk_mux, A37x0_INTERNAL_CLK_ID(dev_id, CLK_MUX_POS)); if (error) goto fail; a37x0_periph_set_props(&gate->clkdef, &clk_mux->clkdef.name, 1); error = a37x0_periph_create_gate(clkdom, gate, dev_id); if (error) goto fail; fail: return (error); } /* * Register CPU clock. It consists of mux (select proper TBG) -> div (frequency * divider) -> mux (choose divided or xtal output). */ int a37x0_periph_d_register_periph_cpu(struct clkdom *clkdom, struct a37x0_periph_clknode_def *device_def) { const char *parent_names[PARENT_CNT]; struct clk_mux_def *clk_mux; struct clk_mux_def *tbg_mux; struct clk_div_def *div; int error, dev_id; dev_id = device_def->common_def.device_id; tbg_mux = &device_def->clk_def.cpu.tbg_mux; div = &device_def->clk_def.cpu.div; clk_mux = &device_def->clk_def.cpu.clk_mux; a37x0_periph_set_props(&tbg_mux->clkdef, device_def->common_def.tbgs, device_def->common_def.tbg_cnt); error = a37x0_periph_create_mux(clkdom, tbg_mux, A37x0_INTERNAL_CLK_ID(dev_id, MUX_POS)); if (error) goto fail; a37x0_periph_set_props(&div->clkdef, &tbg_mux->clkdef.name, 1); error = a37x0_periph_create_div(clkdom, div, A37x0_INTERNAL_CLK_ID(dev_id, DIV1_POS)); if (error) goto fail; parent_names[0] = device_def->common_def.xtal; parent_names[1] = div->clkdef.name; a37x0_periph_set_props(&clk_mux->clkdef, parent_names, PARENT_CNT); error = a37x0_periph_create_mux(clkdom, clk_mux, dev_id); fail: return (error); } /* * Register chain: mux (choose proper TBG) -> div1 (first frequency divider) -> * div2 (second frequency divider) -> mux (choose divided or xtal output). */ int a37x0_periph_d_register_mdd(struct clkdom *clkdom, struct a37x0_periph_clknode_def *device_def) { const char *parent_names[PARENT_CNT]; struct clk_mux_def *tbg_mux; struct clk_mux_def *clk_mux; struct clk_div_def *div1; struct clk_div_def *div2; int error, dev_id; dev_id = device_def->common_def.device_id; tbg_mux = &device_def->clk_def.mdd.tbg_mux; div1 = &device_def->clk_def.mdd.div1; div2 = &device_def->clk_def.mdd.div2; clk_mux = &device_def->clk_def.mdd.clk_mux; a37x0_periph_set_props(&tbg_mux->clkdef, device_def->common_def.tbgs, device_def->common_def.tbg_cnt); error = a37x0_periph_create_mux(clkdom, tbg_mux, A37x0_INTERNAL_CLK_ID(dev_id, MUX_POS)); if (error) goto fail; a37x0_periph_set_props(&div1->clkdef, &tbg_mux->clkdef.name, 1); error = a37x0_periph_create_div(clkdom, div1, A37x0_INTERNAL_CLK_ID(dev_id, DIV1_POS)); if (error) goto fail; a37x0_periph_set_props(&div2->clkdef, &div1->clkdef.name, 1); error = a37x0_periph_create_div(clkdom, div2, A37x0_INTERNAL_CLK_ID(dev_id, DIV2_POS)); if (error) goto fail; parent_names[0] = device_def->common_def.xtal; parent_names[1] = div2->clkdef.name; a37x0_periph_set_props(&clk_mux->clkdef, parent_names, PARENT_CNT); error = a37x0_periph_create_mux(clkdom, clk_mux, dev_id); if (error) goto fail; fail: return (error); } diff --git a/sys/arm/mv/clk/periph_clk_fixed.c b/sys/arm/mv/clk/periph_clk_fixed.c index d3c1f0deafa8..e0e6f446716c 100644 --- a/sys/arm/mv/clk/periph_clk_fixed.c +++ b/sys/arm/mv/clk/periph_clk_fixed.c @@ -1,102 +1,102 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #include #include #include "clkdev_if.h" #include "periph.h" #define PARENT_CNT 2 /* * Register clock with fixed frequency divider clock. Chain consists of: * fixed clock (output from xtal/2) -> mux (choose fixed or xtal frequency) */ int a37x0_periph_fixed_register_fixed(struct clkdom *clkdom, struct a37x0_periph_clknode_def *device_def) { const char *parent_names[PARENT_CNT]; struct clk_fixed_def fixed_def; struct clk_gate_def *gate; struct clk_mux_def *mux; int error, dev_id; dev_id = device_def->common_def.device_id; mux = &device_def->clk_def.fixed.mux; gate = &device_def->clk_def.fixed.gate; fixed_def = device_def->clk_def.fixed.fixed; fixed_def.clkdef.parent_names = &device_def->common_def.xtal; fixed_def.clkdef.parent_cnt = 1; fixed_def.clkdef.id = A37x0_INTERNAL_CLK_ID(dev_id, FIXED1_POS); fixed_def.clkdef.flags = 0; fixed_def.mult = 1; fixed_def.div = 2; fixed_def.freq = 0; parent_names[0] = device_def->common_def.xtal; parent_names[1] = fixed_def.clkdef.name; error = clknode_fixed_register(clkdom, &fixed_def); if (error) goto fail; a37x0_periph_set_props(&mux->clkdef, parent_names ,PARENT_CNT); error = a37x0_periph_create_mux(clkdom, mux, A37x0_INTERNAL_CLK_ID(dev_id, MUX_POS)); if (error) goto fail; a37x0_periph_set_props(&gate->clkdef, &mux->clkdef.name, 1); error = a37x0_periph_create_gate(clkdom, gate, dev_id); if (error) goto fail; fail: return (error); } diff --git a/sys/arm/mv/clk/periph_clk_gate.c b/sys/arm/mv/clk/periph_clk_gate.c index 803b5f8ed7e1..538077eb444f 100644 --- a/sys/arm/mv/clk/periph_clk_gate.c +++ b/sys/arm/mv/clk/periph_clk_gate.c @@ -1,79 +1,79 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #include #include #include "clkdev_if.h" #include "periph.h" /* * Regsiter gate clock (disable or enable clock). */ int a37x0_periph_gate_register_gate(struct clkdom *clkdom, struct a37x0_periph_clknode_def *device_def) { struct clk_gate_def *gate; const char *parent_name; int error, dev_id; dev_id = device_def->common_def.device_id; gate = &device_def->clk_def.gate.gate; if (device_def->common_def.pname == NULL) parent_name = device_def->common_def.xtal; else parent_name = device_def->common_def.pname; a37x0_periph_set_props(&gate->clkdef, &parent_name, 1); error = a37x0_periph_create_gate(clkdom, gate, dev_id); if (error) goto fail; fail: return (error); return (0); } diff --git a/sys/arm/mv/clk/periph_clk_mux_gate.c b/sys/arm/mv/clk/periph_clk_mux_gate.c index 6016a7b1c1a4..f96d034228b4 100644 --- a/sys/arm/mv/clk/periph_clk_mux_gate.c +++ b/sys/arm/mv/clk/periph_clk_mux_gate.c @@ -1,163 +1,163 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #include #include #include "clkdev_if.h" #include "periph.h" #define PARENT_CNT 2 #define TBG_A_S_OFW_INDEX 0 /* * Register chain: fixed (freq/2) -> mux (choose fixed or parent frequency) -> * gate (enable or disable clock). */ int a37x0_periph_register_mux_gate(struct clkdom *clkdom, struct a37x0_periph_clknode_def *device_def) { const char *parent_names[PARENT_CNT]; struct clk_fixed_def fixed; struct clk_gate_def *gate; struct clk_mux_def *mux; int error, dev_id; dev_id = device_def->common_def.device_id; mux = &device_def->clk_def.mux_gate.mux; gate = &device_def->clk_def.mux_gate.gate; fixed = device_def->clk_def.fixed.fixed; fixed.clkdef.id = A37x0_INTERNAL_CLK_ID(dev_id, FIXED1_POS); fixed.clkdef.parent_names = &device_def->common_def.pname; fixed.clkdef.parent_cnt = 1; fixed.clkdef.flags = 0x0; fixed.mult = 1; fixed.div = 2; fixed.freq = 0; error = clknode_fixed_register(clkdom, &fixed); if (error) goto fail; parent_names[0] = device_def->common_def.pname; parent_names[1] = fixed.clkdef.name; a37x0_periph_set_props(&mux->clkdef, parent_names, PARENT_CNT); error = a37x0_periph_create_mux(clkdom, mux, A37x0_INTERNAL_CLK_ID(dev_id, MUX_POS)); if (error) goto fail; a37x0_periph_set_props(&gate->clkdef, &mux->clkdef.name, 1); error = a37x0_periph_create_gate(clkdom, gate, dev_id); if (error) goto fail; fail: return (error); } /* * Register chain: fixed1 (freq/2) -> mux (fixed1 or TBG-A-S frequency) -> * gate -> fixed2 (freq/2). */ int a37x0_periph_register_mux_gate_fixed(struct clkdom * clkdom, struct a37x0_periph_clknode_def *device_def) { struct clk_fixed_def *fixed1, *fixed2; const char *parent_names[PARENT_CNT]; struct clk_gate_def *gate; struct clk_mux_def *mux; int error, dev_id; dev_id = device_def->common_def.device_id; mux = &device_def->clk_def.mux_gate_fixed.mux; gate = &device_def->clk_def.mux_gate_fixed.gate; fixed1 = &device_def->clk_def.mux_gate_fixed.fixed1; fixed2 = &device_def->clk_def.mux_gate_fixed.fixed2; fixed1->clkdef.parent_names = &device_def->common_def.pname; fixed1->clkdef.id = A37x0_INTERNAL_CLK_ID(dev_id, FIXED1_POS); fixed1->clkdef.flags = 0x0; fixed1->mult = 1; fixed1->div = 2; fixed1->freq = 0; error = clknode_fixed_register(clkdom, fixed1); if (error) goto fail; parent_names[0] = device_def->common_def.tbgs[TBG_A_S_OFW_INDEX]; parent_names[1] = fixed1->clkdef.name; a37x0_periph_set_props(&mux->clkdef, parent_names, PARENT_CNT); error = a37x0_periph_create_mux(clkdom, mux, A37x0_INTERNAL_CLK_ID(dev_id, MUX_POS)); if (error) goto fail; a37x0_periph_set_props(&gate->clkdef, &mux->clkdef.name, 1); error = a37x0_periph_create_gate(clkdom, gate, A37x0_INTERNAL_CLK_ID(dev_id, GATE_POS)); if (error) goto fail; fixed2->clkdef.parent_names = &gate->clkdef.name; fixed2->clkdef.parent_cnt = 1; fixed2->clkdef.id = dev_id; error = clknode_fixed_register(clkdom, fixed2); if (error) goto fail; fail: return (error); } diff --git a/sys/arm/mv/mv_ap806_clock.c b/sys/arm/mv/mv_ap806_clock.c index b926a58aca0c..f41f5e09c4dd 100644 --- a/sys/arm/mv/mv_ap806_clock.c +++ b/sys/arm/mv/mv_ap806_clock.c @@ -1,226 +1,226 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Rubicon Communications, LLC (Netgate) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "syscon_if.h" static struct clk_fixed_def ap806_clk_cluster_0 = { .clkdef.id = 0, .clkdef.name = "ap806-cpu-cluster-0", .freq = 0, }; static struct clk_fixed_def ap806_clk_cluster_1 = { .clkdef.id = 1, .clkdef.name = "ap806-cpu-cluster-1", .freq = 0, }; static struct clk_fixed_def ap806_clk_fixed = { .clkdef.id = 2, .clkdef.name = "ap806-fixed", .freq = 1200000000, }; /* Thoses are the only exported clocks AFAICT */ static const char *mss_parents[] = {"ap806-fixed"}; static struct clk_fixed_def ap806_clk_mss = { .clkdef.id = 3, .clkdef.name = "ap806-mss", .clkdef.parent_names = mss_parents, .clkdef.parent_cnt = 1, .mult = 1, .div = 6, }; static const char *sdio_parents[] = {"ap806-fixed"}; static struct clk_fixed_def ap806_clk_sdio = { .clkdef.id = 4, .clkdef.name = "ap806-sdio", .clkdef.parent_names = sdio_parents, .clkdef.parent_cnt = 1, .mult = 1, .div = 3, }; struct mv_ap806_clock_softc { device_t dev; struct syscon *syscon; }; static struct ofw_compat_data compat_data[] = { {"marvell,ap806-clock", 1}, {NULL, 0} }; #define RD4(sc, reg) SYSCON_READ_4((sc)->syscon, (reg)) #define WR4(sc, reg, val) SYSCON_WRITE_4((sc)->syscon, (reg), (val)) static int mv_ap806_clock_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Marvell AP806 Clock Controller"); return (BUS_PROBE_DEFAULT); } static int mv_ap806_clock_attach(device_t dev) { struct mv_ap806_clock_softc *sc; struct clkdom *clkdom; uint64_t clock_freq; uint32_t reg; sc = device_get_softc(dev); sc->dev = dev; if (SYSCON_GET_HANDLE(sc->dev, &sc->syscon) != 0 || sc->syscon == NULL) { device_printf(dev, "cannot get syscon for device\n"); return (ENXIO); } reg = RD4(sc, 0x400); switch (reg & 0x1f) { case 0x0: case 0x1: clock_freq = 2000000000; break; case 0x4: clock_freq = 1600000000; break; case 0x6: clock_freq = 1800000000; break; case 0x7: clock_freq = 1800000000; break; case 0xb: clock_freq = 1600000000; break; case 0xd: clock_freq = 1600000000; break; case 0x13: clock_freq = 1000000000; break; case 0x14: clock_freq = 1333000000; break; case 0x17: clock_freq = 1333000000; break; case 0x19: clock_freq = 1200000000; break; case 0x1a: clock_freq = 1400000000; break; case 0x1b: clock_freq = 600000000; break; case 0x1c: clock_freq = 800000000; break; case 0x1d: clock_freq = 1000000000; break; default: device_printf(dev, "Cannot guess clock freq with reg %x\n", reg & 0x1f); return (ENXIO); break; }; ap806_clk_cluster_0.freq = clock_freq; ap806_clk_cluster_1.freq = clock_freq; clkdom = clkdom_create(dev); clknode_fixed_register(clkdom, &ap806_clk_cluster_0); clknode_fixed_register(clkdom, &ap806_clk_cluster_1); clknode_fixed_register(clkdom, &ap806_clk_fixed); clknode_fixed_register(clkdom, &ap806_clk_mss); clknode_fixed_register(clkdom, &ap806_clk_sdio); clkdom_finit(clkdom); if (bootverbose) clkdom_dump(clkdom); return (0); } static int mv_ap806_clock_detach(device_t dev) { return (EBUSY); } static device_method_t mv_ap806_clock_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mv_ap806_clock_probe), DEVMETHOD(device_attach, mv_ap806_clock_attach), DEVMETHOD(device_detach, mv_ap806_clock_detach), DEVMETHOD_END }; static driver_t mv_ap806_clock_driver = { "mv_ap806_clock", mv_ap806_clock_methods, sizeof(struct mv_ap806_clock_softc), }; EARLY_DRIVER_MODULE(mv_ap806_clock, simplebus, mv_ap806_clock_driver, 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_LATE); diff --git a/sys/arm/mv/mv_cp110_clock.c b/sys/arm/mv/mv_cp110_clock.c index 604195da1ab4..45f8476df5b3 100644 --- a/sys/arm/mv/mv_cp110_clock.c +++ b/sys/arm/mv/mv_cp110_clock.c @@ -1,363 +1,363 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Rubicon Communications, LLC (Netgate) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include -#include -#include +#include +#include #include #include #include #include #include "clkdev_if.h" #include "syscon_if.h" /* Clocks */ static struct clk_fixed_def cp110_clk_pll_0 = { .clkdef.id = CP110_PLL_0, .freq = 1000000000, }; static const char *clk_parents_0[] = {"cp110-pll0-0"}; static const char *clk_parents_1[] = {"cp110-pll0-1"}; static struct clk_fixed_def cp110_clk_ppv2_core = { .clkdef.id = CP110_PPV2_CORE, .clkdef.parent_cnt = 1, .mult = 1, .div = 3, }; static struct clk_fixed_def cp110_clk_x2core = { .clkdef.id = CP110_X2CORE, .clkdef.parent_cnt = 1, .mult = 1, .div = 2, }; static const char *core_parents_0[] = {"cp110-x2core-0"}; static const char *core_parents_1[] = {"cp110-x2core-1"}; static struct clk_fixed_def cp110_clk_core = { .clkdef.id = CP110_CORE, .clkdef.parent_cnt = 1, .mult = 1, .div = 2, }; static struct clk_fixed_def cp110_clk_sdio = { .clkdef.id = CP110_SDIO, .clkdef.parent_cnt = 1, .mult = 2, .div = 5, }; /* Gates */ static struct cp110_gate cp110_gates[] = { CCU_GATE(CP110_GATE_AUDIO, "cp110-gate-audio", 0) CCU_GATE(CP110_GATE_COMM_UNIT, "cp110-gate-comm_unit", 1) /* CCU_GATE(CP110_GATE_NAND, "cp110-gate-nand", 2) */ CCU_GATE(CP110_GATE_PPV2, "cp110-gate-ppv2", 3) CCU_GATE(CP110_GATE_SDIO, "cp110-gate-sdio", 4) CCU_GATE(CP110_GATE_MG, "cp110-gate-mg", 5) CCU_GATE(CP110_GATE_MG_CORE, "cp110-gate-mg_core", 6) CCU_GATE(CP110_GATE_XOR1, "cp110-gate-xor1", 7) CCU_GATE(CP110_GATE_XOR0, "cp110-gate-xor0", 8) CCU_GATE(CP110_GATE_GOP_DP, "cp110-gate-gop_dp", 9) CCU_GATE(CP110_GATE_PCIE_X1_0, "cp110-gate-pcie_x10", 11) CCU_GATE(CP110_GATE_PCIE_X1_1, "cp110-gate-pcie_x11", 12) CCU_GATE(CP110_GATE_PCIE_X4, "cp110-gate-pcie_x4", 13) CCU_GATE(CP110_GATE_PCIE_XOR, "cp110-gate-pcie_xor", 14) CCU_GATE(CP110_GATE_SATA, "cp110-gate-sata", 15) CCU_GATE(CP110_GATE_SATA_USB, "cp110-gate-sata_usb", 16) CCU_GATE(CP110_GATE_MAIN, "cp110-gate-main", 17) CCU_GATE(CP110_GATE_SDMMC_GOP, "cp110-gate-sdmmc_gop", 18) CCU_GATE(CP110_GATE_SLOW_IO, "cp110-gate-slow_io", 21) CCU_GATE(CP110_GATE_USB3H0, "cp110-gate-usb3h0", 22) CCU_GATE(CP110_GATE_USB3H1, "cp110-gate-usb3h1", 23) CCU_GATE(CP110_GATE_USB3DEV, "cp110-gate-usb3dev", 24) CCU_GATE(CP110_GATE_EIP150, "cp110-gate-eip150", 25) CCU_GATE(CP110_GATE_EIP197, "cp110-gate-eip197", 26) }; struct mv_cp110_clock_softc { device_t dev; struct syscon *syscon; struct mtx mtx; }; static struct ofw_compat_data compat_data[] = { {"marvell,cp110-clock", 1}, {NULL, 0} }; #define RD4(sc, reg) SYSCON_READ_4((sc)->syscon, (reg)) #define WR4(sc, reg, val) SYSCON_WRITE_4((sc)->syscon, (reg), (val)) static char * mv_cp110_clock_name(device_t dev, const char *name) { char *clkname = NULL; int unit; unit = device_get_unit(dev); if (asprintf(&clkname, M_DEVBUF, "%s-%d", name, unit) <= 0) panic("Cannot generate unique clock name for %s\n", name); return (clkname); } static int mv_cp110_clock_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Marvell CP110 Clock Controller"); return (BUS_PROBE_DEFAULT); } static int cp110_ofw_map(struct clkdom *clkdom, uint32_t ncells, phandle_t *cells, struct clknode **clk) { int id = 0; if (ncells != 2) return (ENXIO); id = cells[1]; if (cells[0] == 1) id += CP110_MAX_CLOCK; *clk = clknode_find_by_id(clkdom, id); return (0); } static int mv_cp110_clock_attach(device_t dev) { struct mv_cp110_clock_softc *sc; struct clkdom *clkdom; struct clk_gate_def def; char *pll0_name; int unit, i; sc = device_get_softc(dev); sc->dev = dev; if (SYSCON_GET_HANDLE(sc->dev, &sc->syscon) != 0 || sc->syscon == NULL) { device_printf(dev, "cannot get syscon for device\n"); return (ENXIO); } unit = device_get_unit(dev); if (unit > 1) { device_printf(dev, "Bogus cp110-system-controller unit %d\n", unit); return (ENXIO); } mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); clkdom = clkdom_create(dev); clkdom_set_ofw_mapper(clkdom, cp110_ofw_map); pll0_name = mv_cp110_clock_name(dev, "cp110-pll0"); cp110_clk_pll_0.clkdef.name = pll0_name; clknode_fixed_register(clkdom, &cp110_clk_pll_0); cp110_clk_ppv2_core.clkdef.name = mv_cp110_clock_name(dev, "cp110-ppv2"); cp110_clk_ppv2_core.clkdef.parent_names = (unit == 0) ? clk_parents_0 : clk_parents_1; clknode_fixed_register(clkdom, &cp110_clk_ppv2_core); cp110_clk_x2core.clkdef.name = mv_cp110_clock_name(dev, "cp110-x2core"); cp110_clk_x2core.clkdef.parent_names = (unit == 0) ? clk_parents_0 : clk_parents_1; clknode_fixed_register(clkdom, &cp110_clk_x2core); cp110_clk_core.clkdef.name = mv_cp110_clock_name(dev, "cp110-core"); cp110_clk_core.clkdef.parent_names = (unit == 0) ? core_parents_0 : core_parents_1; clknode_fixed_register(clkdom, &cp110_clk_core); /* NAND missing */ cp110_clk_sdio.clkdef.name = mv_cp110_clock_name(dev, "cp110-sdio"); cp110_clk_sdio.clkdef.parent_names = (unit == 0) ? clk_parents_0 : clk_parents_1; clknode_fixed_register(clkdom, &cp110_clk_sdio); for (i = 0; i < nitems(cp110_gates); i++) { if (cp110_gates[i].name == NULL) continue; memset(&def, 0, sizeof(def)); def.clkdef.id = CP110_MAX_CLOCK + i; def.clkdef.name = mv_cp110_clock_name(dev, cp110_gates[i].name); def.clkdef.parent_cnt = 1; def.offset = CP110_CLOCK_GATING_OFFSET; def.shift = cp110_gates[i].shift; def.mask = 1; def.on_value = 1; def.off_value = 0; switch (i) { case CP110_GATE_MG: case CP110_GATE_GOP_DP: case CP110_GATE_PPV2: def.clkdef.parent_names = &cp110_clk_ppv2_core.clkdef.name; break; case CP110_GATE_SDIO: def.clkdef.parent_names = &cp110_clk_sdio.clkdef.name; break; case CP110_GATE_MAIN: case CP110_GATE_PCIE_XOR: case CP110_GATE_PCIE_X4: case CP110_GATE_EIP150: case CP110_GATE_EIP197: def.clkdef.parent_names = &cp110_clk_x2core.clkdef.name; break; default: def.clkdef.parent_names = &cp110_clk_core.clkdef.name; break; } clknode_gate_register(clkdom, &def); } clkdom_finit(clkdom); if (bootverbose) clkdom_dump(clkdom); return (0); } static int mv_cp110_clock_detach(device_t dev) { return (EBUSY); } static int mv_cp110_clock_write_4(device_t dev, bus_addr_t addr, uint32_t val) { struct mv_cp110_clock_softc *sc; sc = device_get_softc(dev); WR4(sc, addr, val); return (0); } static int mv_cp110_clock_read_4(device_t dev, bus_addr_t addr, uint32_t *val) { struct mv_cp110_clock_softc *sc; sc = device_get_softc(dev); *val = RD4(sc, addr); return (0); } static int mv_cp110_clock_modify_4(device_t dev, bus_addr_t addr, uint32_t clr, uint32_t set) { struct mv_cp110_clock_softc *sc; uint32_t reg; sc = device_get_softc(dev); reg = RD4(sc, addr); reg &= ~clr; reg |= set; WR4(sc, addr, reg); return (0); } static void mv_cp110_clock_device_lock(device_t dev) { struct mv_cp110_clock_softc *sc; sc = device_get_softc(dev); mtx_lock(&sc->mtx); } static void mv_cp110_clock_device_unlock(device_t dev) { struct mv_cp110_clock_softc *sc; sc = device_get_softc(dev); mtx_unlock(&sc->mtx); } static device_method_t mv_cp110_clock_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mv_cp110_clock_probe), DEVMETHOD(device_attach, mv_cp110_clock_attach), DEVMETHOD(device_detach, mv_cp110_clock_detach), /* clkdev interface */ DEVMETHOD(clkdev_write_4, mv_cp110_clock_write_4), DEVMETHOD(clkdev_read_4, mv_cp110_clock_read_4), DEVMETHOD(clkdev_modify_4, mv_cp110_clock_modify_4), DEVMETHOD(clkdev_device_lock, mv_cp110_clock_device_lock), DEVMETHOD(clkdev_device_unlock, mv_cp110_clock_device_unlock), DEVMETHOD_END }; static driver_t mv_cp110_clock_driver = { "mv_cp110_clock", mv_cp110_clock_methods, sizeof(struct mv_cp110_clock_softc), }; EARLY_DRIVER_MODULE(mv_cp110_clock, simplebus, mv_cp110_clock_driver, 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_LATE); diff --git a/sys/arm/nvidia/drm2/tegra_bo.c b/sys/arm/nvidia/drm2/tegra_bo.c index 1ffd65de9d36..c27b9f39c508 100644 --- a/sys/arm/nvidia/drm2/tegra_bo.c +++ b/sys/arm/nvidia/drm2/tegra_bo.c @@ -1,362 +1,362 @@ /*- * Copyright (c) 2015 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include static void tegra_bo_destruct(struct tegra_bo *bo) { vm_page_t m; size_t size; int i; if (bo->cdev_pager == NULL) return; size = round_page(bo->gem_obj.size); if (bo->vbase != 0) pmap_qremove(bo->vbase, bo->npages); VM_OBJECT_WLOCK(bo->cdev_pager); for (i = 0; i < bo->npages; i++) { m = bo->m[i]; vm_page_busy_acquire(m, 0); cdev_pager_free_page(bo->cdev_pager, m); m->flags &= ~PG_FICTITIOUS; vm_page_unwire_noq(m); vm_page_free(m); } VM_OBJECT_WUNLOCK(bo->cdev_pager); vm_object_deallocate(bo->cdev_pager); if (bo->vbase != 0) vmem_free(kmem_arena, bo->vbase, size); } static void tegra_bo_free_object(struct drm_gem_object *gem_obj) { struct tegra_bo *bo; bo = container_of(gem_obj, struct tegra_bo, gem_obj); drm_gem_free_mmap_offset(gem_obj); drm_gem_object_release(gem_obj); tegra_bo_destruct(bo); free(bo->m, DRM_MEM_DRIVER); free(bo, DRM_MEM_DRIVER); } static int tegra_bo_alloc_contig(size_t npages, u_long alignment, vm_memattr_t memattr, vm_page_t **ret_page) { vm_page_t m; int err, i, tries; vm_paddr_t low, high, boundary; low = 0; high = -1UL; boundary = 0; tries = 0; retry: m = vm_page_alloc_noobj_contig(VM_ALLOC_WIRED | VM_ALLOC_ZERO, npages, low, high, alignment, boundary, memattr); if (m == NULL) { if (tries < 3) { err = vm_page_reclaim_contig(0, npages, low, high, alignment, boundary); if (err == ENOMEM) vm_wait(NULL); else if (err != 0) return (ENOMEM); tries++; goto retry; } return (ENOMEM); } for (i = 0; i < npages; i++, m++) { m->valid = VM_PAGE_BITS_ALL; (*ret_page)[i] = m; } return (0); } /* Initialize pager and insert all object pages to it*/ static int tegra_bo_init_pager(struct tegra_bo *bo) { vm_page_t m; size_t size; int i; size = round_page(bo->gem_obj.size); bo->pbase = VM_PAGE_TO_PHYS(bo->m[0]); if (vmem_alloc(kmem_arena, size, M_WAITOK | M_BESTFIT, &bo->vbase)) return (ENOMEM); VM_OBJECT_WLOCK(bo->cdev_pager); for (i = 0; i < bo->npages; i++) { m = bo->m[i]; /* * XXX This is a temporary hack. * We need pager suitable for paging (mmap) managed * real (non-fictitious) pages. * - managed pages are needed for clean module unload. * - aliasing fictitious page to real one is bad, * pmap cannot handle this situation without issues * It expects that * paddr = PHYS_TO_VM_PAGE(VM_PAGE_TO_PHYS(paddr)) * for every single page passed to pmap. */ m->oflags &= ~VPO_UNMANAGED; m->flags |= PG_FICTITIOUS; if (vm_page_insert(m, bo->cdev_pager, i) != 0) return (EINVAL); } VM_OBJECT_WUNLOCK(bo->cdev_pager); pmap_qenter(bo->vbase, bo->m, bo->npages); return (0); } /* Allocate memory for frame buffer */ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) { size_t size; int rv; size = bo->gem_obj.size; bo->npages = atop(size); bo->m = malloc(sizeof(vm_page_t *) * bo->npages, DRM_MEM_DRIVER, M_WAITOK | M_ZERO); rv = tegra_bo_alloc_contig(bo->npages, PAGE_SIZE, VM_MEMATTR_WRITE_COMBINING, &(bo->m)); if (rv != 0) { DRM_WARNING("Cannot allocate memory for gem object.\n"); return (rv); } rv = tegra_bo_init_pager(bo); if (rv != 0) { DRM_WARNING("Cannot initialize gem object pager.\n"); return (rv); } return (0); } int tegra_bo_create(struct drm_device *drm, size_t size, struct tegra_bo **res_bo) { struct tegra_bo *bo; int rv; if (size <= 0) return (-EINVAL); bo = malloc(sizeof(*bo), DRM_MEM_DRIVER, M_WAITOK | M_ZERO); size = round_page(size); rv = drm_gem_object_init(drm, &bo->gem_obj, size); if (rv != 0) { free(bo, DRM_MEM_DRIVER); return (rv); } rv = drm_gem_create_mmap_offset(&bo->gem_obj); if (rv != 0) { drm_gem_object_release(&bo->gem_obj); free(bo, DRM_MEM_DRIVER); return (rv); } bo->cdev_pager = cdev_pager_allocate(&bo->gem_obj, OBJT_MGTDEVICE, drm->driver->gem_pager_ops, size, 0, 0, NULL); rv = tegra_bo_alloc(drm, bo); if (rv != 0) { tegra_bo_free_object(&bo->gem_obj); return (rv); } *res_bo = bo; return (0); } static int tegra_bo_create_with_handle(struct drm_file *file, struct drm_device *drm, size_t size, uint32_t *handle, struct tegra_bo **res_bo) { int rv; struct tegra_bo *bo; rv = tegra_bo_create(drm, size, &bo); if (rv != 0) return (rv); rv = drm_gem_handle_create(file, &bo->gem_obj, handle); if (rv != 0) { tegra_bo_free_object(&bo->gem_obj); drm_gem_object_release(&bo->gem_obj); return (rv); } drm_gem_object_unreference_unlocked(&bo->gem_obj); *res_bo = bo; return (0); } static int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm_dev, struct drm_mode_create_dumb *args) { struct tegra_drm *drm; struct tegra_bo *bo; int rv; drm = container_of(drm_dev, struct tegra_drm, drm_dev); args->pitch= (args->width * args->bpp + 7) / 8; args->pitch = roundup(args->pitch, drm->pitch_align); args->size = args->pitch * args->height; rv = tegra_bo_create_with_handle(file, drm_dev, args->size, &args->handle, &bo); return (rv); } static int tegra_bo_dumb_map_offset(struct drm_file *file_priv, struct drm_device *drm_dev, uint32_t handle, uint64_t *offset) { struct drm_gem_object *gem_obj; int rv; DRM_LOCK(drm_dev); gem_obj = drm_gem_object_lookup(drm_dev, file_priv, handle); if (gem_obj == NULL) { device_printf(drm_dev->dev, "Object not found\n"); DRM_UNLOCK(drm_dev); return (-EINVAL); } rv = drm_gem_create_mmap_offset(gem_obj); if (rv != 0) goto fail; *offset = DRM_GEM_MAPPING_OFF(gem_obj->map_list.key) | DRM_GEM_MAPPING_KEY; drm_gem_object_unreference(gem_obj); DRM_UNLOCK(drm_dev); return (0); fail: drm_gem_object_unreference(gem_obj); DRM_UNLOCK(drm_dev); return (rv); } static int tegra_bo_dumb_destroy(struct drm_file *file_priv, struct drm_device *drm_dev, unsigned int handle) { int rv; rv = drm_gem_handle_delete(file_priv, handle); return (rv); } /* * mmap support */ static int tegra_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres) { #ifdef DRM_PAGER_DEBUG DRM_DEBUG("object %p offset %jd prot %d mres %p\n", vm_obj, (intmax_t)offset, prot, mres); #endif return (VM_PAGER_FAIL); } static int tegra_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred, u_short *color) { if (color != NULL) *color = 0; return (0); } static void tegra_gem_pager_dtor(void *handle) { } static struct cdev_pager_ops tegra_gem_pager_ops = { .cdev_pg_fault = tegra_gem_pager_fault, .cdev_pg_ctor = tegra_gem_pager_ctor, .cdev_pg_dtor = tegra_gem_pager_dtor }; /* Fill up relevant fields in drm_driver ops */ void tegra_bo_driver_register(struct drm_driver *drm_drv) { drm_drv->gem_free_object = tegra_bo_free_object; drm_drv->gem_pager_ops = &tegra_gem_pager_ops; drm_drv->dumb_create = tegra_bo_dumb_create; drm_drv->dumb_map_offset = tegra_bo_dumb_map_offset; drm_drv->dumb_destroy = tegra_bo_dumb_destroy; } diff --git a/sys/arm/nvidia/drm2/tegra_dc.c b/sys/arm/nvidia/drm2/tegra_dc.c index 81177180056c..637ea3981acc 100644 --- a/sys/arm/nvidia/drm2/tegra_dc.c +++ b/sys/arm/nvidia/drm2/tegra_dc.c @@ -1,1438 +1,1438 @@ /*- * Copyright (c) 2015 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include #include "tegra_drm_if.h" #include "tegra_dc_if.h" #define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, 4 * (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, 4 * (_r)) #define LOCK(_sc) mtx_lock(&(_sc)->mtx) #define UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define SLEEP(_sc, timeout) \ mtx_sleep(sc, &sc->mtx, 0, "tegra_dc_wait", timeout); #define LOCK_INIT(_sc) \ mtx_init(&_sc->mtx, device_get_nameunit(_sc->dev), "tegra_dc", MTX_DEF) #define LOCK_DESTROY(_sc) mtx_destroy(&_sc->mtx) #define ASSERT_LOCKED(_sc) mtx_assert(&_sc->mtx, MA_OWNED) #define ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->mtx, MA_NOTOWNED) #define SYNCPT_VBLANK0 26 #define SYNCPT_VBLANK1 27 #define DC_MAX_PLANES 2 /* Maximum planes */ /* DRM Formats supported by DC */ /* XXXX expand me */ static uint32_t dc_plane_formats[] = { DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565, DRM_FORMAT_UYVY, DRM_FORMAT_YUYV, DRM_FORMAT_YUV420, DRM_FORMAT_YUV422, }; /* Complete description of one window (plane) */ struct dc_window { /* Source (in framebuffer) rectangle, in pixels */ u_int src_x; u_int src_y; u_int src_w; u_int src_h; /* Destination (on display) rectangle, in pixels */ u_int dst_x; u_int dst_y; u_int dst_w; u_int dst_h; /* Parsed pixel format */ u_int bits_per_pixel; bool is_yuv; /* any YUV mode */ bool is_yuv_planar; /* planar YUV mode */ uint32_t color_mode; /* DC_WIN_COLOR_DEPTH */ uint32_t swap; /* DC_WIN_BYTE_SWAP */ uint32_t surface_kind; /* DC_WINBUF_SURFACE_KIND */ uint32_t block_height; /* DC_WINBUF_SURFACE_KIND */ /* Parsed flipping, rotation is not supported for pitched modes */ bool flip_x; /* inverted X-axis */ bool flip_y; /* inverted Y-axis */ bool transpose_xy; /* swap X and Y-axis */ /* Color planes base addresses and strides */ bus_size_t base[3]; uint32_t stride[3]; /* stride[2] isn't used by HW */ }; struct dc_softc { device_t dev; struct resource *mem_res; struct resource *irq_res; void *irq_ih; struct mtx mtx; clk_t clk_parent; clk_t clk_dc; hwreset_t hwreset_dc; int pitch_align; struct tegra_crtc tegra_crtc; struct drm_pending_vblank_event *event; struct drm_gem_object *cursor_gem; }; static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-dc", 1}, {NULL, 0}, }; /* Convert standard drm pixel format to tegra windows parameters. */ static int dc_parse_drm_format(struct tegra_fb *fb, struct dc_window *win) { struct tegra_bo *bo; uint32_t cm; uint32_t sw; bool is_yuv, is_yuv_planar; int nplanes, i; switch (fb->drm_fb.pixel_format) { case DRM_FORMAT_XBGR8888: sw = BYTE_SWAP(NOSWAP); cm = WIN_COLOR_DEPTH_R8G8B8A8; is_yuv = false; is_yuv_planar = false; break; case DRM_FORMAT_XRGB8888: sw = BYTE_SWAP(NOSWAP); cm = WIN_COLOR_DEPTH_B8G8R8A8; is_yuv = false; is_yuv_planar = false; break; case DRM_FORMAT_RGB565: sw = BYTE_SWAP(NOSWAP); cm = WIN_COLOR_DEPTH_B5G6R5; is_yuv = false; is_yuv_planar = false; break; case DRM_FORMAT_UYVY: sw = BYTE_SWAP(NOSWAP); cm = WIN_COLOR_DEPTH_YCbCr422; is_yuv = true; is_yuv_planar = false; break; case DRM_FORMAT_YUYV: sw = BYTE_SWAP(SWAP2); cm = WIN_COLOR_DEPTH_YCbCr422; is_yuv = true; is_yuv_planar = false; break; case DRM_FORMAT_YUV420: sw = BYTE_SWAP(NOSWAP); cm = WIN_COLOR_DEPTH_YCbCr420P; is_yuv = true; is_yuv_planar = true; break; case DRM_FORMAT_YUV422: sw = BYTE_SWAP(NOSWAP); cm = WIN_COLOR_DEPTH_YCbCr422P; is_yuv = true; is_yuv_planar = true; break; default: /* Unsupported format */ return (-EINVAL); } /* Basic check of arguments. */ switch (fb->rotation) { case 0: case 180: break; case 90: /* Rotation is supported only */ case 270: /* for block linear surfaces */ if (!fb->block_linear) return (-EINVAL); break; default: return (-EINVAL); } /* XXX Add more checks (sizes, scaling...) */ if (win == NULL) return (0); win->surface_kind = fb->block_linear ? SURFACE_KIND_BL_16B2: SURFACE_KIND_PITCH; win->block_height = fb->block_height; switch (fb->rotation) { case 0: /* (0,0,0) */ win->transpose_xy = false; win->flip_x = false; win->flip_y = false; break; case 90: /* (1,0,1) */ win->transpose_xy = true; win->flip_x = false; win->flip_y = true; break; case 180: /* (0,1,1) */ win->transpose_xy = false; win->flip_x = true; win->flip_y = true; break; case 270: /* (1,1,0) */ win->transpose_xy = true; win->flip_x = true; win->flip_y = false; break; } win->flip_x ^= fb->flip_x; win->flip_y ^= fb->flip_y; win->color_mode = cm; win->swap = sw; win->bits_per_pixel = fb->drm_fb.bits_per_pixel; win->is_yuv = is_yuv; win->is_yuv_planar = is_yuv_planar; nplanes = drm_format_num_planes(fb->drm_fb.pixel_format); for (i = 0; i < nplanes; i++) { bo = fb->planes[i]; win->base[i] = bo->pbase + fb->drm_fb.offsets[i]; win->stride[i] = fb->drm_fb.pitches[i]; } return (0); } /* * Scaling functions. * * It's unclear if we want/must program the fractional portion * (aka bias) of init_dda registers, mainly when mirrored axis * modes are used. * For now, we use 1.0 as recommended by TRM. */ static inline uint32_t dc_scaling_init(uint32_t start) { return (1 << 12); } static inline uint32_t dc_scaling_incr(uint32_t src, uint32_t dst, uint32_t maxscale) { uint32_t val; val = (src - 1) << 12 ; /* 4.12 fixed float */ val /= (dst - 1); if (val > (maxscale << 12)) val = maxscale << 12; return val; } /* ------------------------------------------------------------------- * * HW Access. * */ /* * Setup pixel clock. * Minimal frequency is pixel clock, but output is free to select * any higher. */ static int dc_setup_clk(struct dc_softc *sc, struct drm_crtc *crtc, struct drm_display_mode *mode, uint32_t *div) { uint64_t pclk, freq; struct tegra_drm_encoder *output; struct drm_encoder *encoder; long rv; pclk = mode->clock * 1000; /* Find attached encoder */ output = NULL; list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { output = container_of(encoder, struct tegra_drm_encoder, encoder); break; } } if (output == NULL) return (-ENODEV); if (output->setup_clock == NULL) panic("Output have not setup_clock function.\n"); rv = output->setup_clock(output, sc->clk_dc, pclk); if (rv != 0) { device_printf(sc->dev, "Cannot setup pixel clock: %llu\n", pclk); return (rv); } rv = clk_get_freq(sc->clk_dc, &freq); *div = (freq * 2 / pclk) - 2; DRM_DEBUG_KMS("frequency: %llu, DC divider: %u\n", freq, *div); return 0; } static void dc_setup_window(struct dc_softc *sc, unsigned int index, struct dc_window *win) { uint32_t h_offset, v_offset, h_size, v_size, bpp; uint32_t h_init_dda, v_init_dda, h_incr_dda, v_incr_dda; uint32_t val; #ifdef DMR_DEBUG_WINDOW printf("%s window: %d\n", __func__, index); printf(" src: x: %d, y: %d, w: %d, h: %d\n", win->src_x, win->src_y, win->src_w, win->src_h); printf(" dst: x: %d, y: %d, w: %d, h: %d\n", win->dst_x, win->dst_y, win->dst_w, win->dst_h); printf(" bpp: %d, color_mode: %d, swap: %d\n", win->bits_per_pixel, win->color_mode, win->swap); #endif if (win->is_yuv) bpp = win->is_yuv_planar ? 1 : 2; else bpp = (win->bits_per_pixel + 7) / 8; if (!win->transpose_xy) { h_size = win->src_w * bpp; v_size = win->src_h; } else { h_size = win->src_h * bpp; v_size = win->src_w; } h_offset = win->src_x * bpp; v_offset = win->src_y; if (win->flip_x) { h_offset += win->src_w * bpp - 1; } if (win->flip_y) v_offset += win->src_h - 1; /* Adjust offsets for planar yuv modes */ if (win->is_yuv_planar) { h_offset &= ~1; if (win->flip_x ) h_offset |= 1; v_offset &= ~1; if (win->flip_y ) v_offset |= 1; } /* Setup scaling. */ if (!win->transpose_xy) { h_init_dda = dc_scaling_init(win->src_x); v_init_dda = dc_scaling_init(win->src_y); h_incr_dda = dc_scaling_incr(win->src_w, win->dst_w, 4); v_incr_dda = dc_scaling_incr(win->src_h, win->dst_h, 15); } else { h_init_dda = dc_scaling_init(win->src_y); v_init_dda = dc_scaling_init(win->src_x); h_incr_dda = dc_scaling_incr(win->src_h, win->dst_h, 4); v_incr_dda = dc_scaling_incr(win->src_w, win->dst_w, 15); } #ifdef DMR_DEBUG_WINDOW printf("\n"); printf(" bpp: %d, size: h: %d v: %d, offset: h:%d v: %d\n", bpp, h_size, v_size, h_offset, v_offset); printf(" init_dda: h: %d v: %d, incr_dda: h: %d v: %d\n", h_init_dda, v_init_dda, h_incr_dda, v_incr_dda); #endif LOCK(sc); /* Select target window */ val = WINDOW_A_SELECT << index; WR4(sc, DC_CMD_DISPLAY_WINDOW_HEADER, val); /* Sizes */ WR4(sc, DC_WIN_POSITION, WIN_POSITION(win->dst_x, win->dst_y)); WR4(sc, DC_WIN_SIZE, WIN_SIZE(win->dst_w, win->dst_h)); WR4(sc, DC_WIN_PRESCALED_SIZE, WIN_PRESCALED_SIZE(h_size, v_size)); /* DDA */ WR4(sc, DC_WIN_DDA_INCREMENT, WIN_DDA_INCREMENT(h_incr_dda, v_incr_dda)); WR4(sc, DC_WIN_H_INITIAL_DDA, h_init_dda); WR4(sc, DC_WIN_V_INITIAL_DDA, v_init_dda); /* Color planes base addresses and strides */ WR4(sc, DC_WINBUF_START_ADDR, win->base[0]); if (win->is_yuv_planar) { WR4(sc, DC_WINBUF_START_ADDR_U, win->base[1]); WR4(sc, DC_WINBUF_START_ADDR_V, win->base[2]); WR4(sc, DC_WIN_LINE_STRIDE, win->stride[1] << 16 | win->stride[0]); } else { WR4(sc, DC_WIN_LINE_STRIDE, win->stride[0]); } /* Offsets for rotation and axis flip */ WR4(sc, DC_WINBUF_ADDR_H_OFFSET, h_offset); WR4(sc, DC_WINBUF_ADDR_V_OFFSET, v_offset); /* Color format */ WR4(sc, DC_WIN_COLOR_DEPTH, win->color_mode); WR4(sc, DC_WIN_BYTE_SWAP, win->swap); /* Tiling */ val = win->surface_kind; if (win->surface_kind == SURFACE_KIND_BL_16B2) val |= SURFACE_KIND_BLOCK_HEIGHT(win->block_height); WR4(sc, DC_WINBUF_SURFACE_KIND, val); /* Color space coefs for YUV modes */ if (win->is_yuv) { WR4(sc, DC_WINC_CSC_YOF, 0x00f0); WR4(sc, DC_WINC_CSC_KYRGB, 0x012a); WR4(sc, DC_WINC_CSC_KUR, 0x0000); WR4(sc, DC_WINC_CSC_KVR, 0x0198); WR4(sc, DC_WINC_CSC_KUG, 0x039b); WR4(sc, DC_WINC_CSC_KVG, 0x032f); WR4(sc, DC_WINC_CSC_KUB, 0x0204); WR4(sc, DC_WINC_CSC_KVB, 0x0000); } val = WIN_ENABLE; if (win->is_yuv) val |= CSC_ENABLE; else if (win->bits_per_pixel < 24) val |= COLOR_EXPAND; if (win->flip_y) val |= V_DIRECTION; if (win->flip_x) val |= H_DIRECTION; if (win->transpose_xy) val |= SCAN_COLUMN; WR4(sc, DC_WINC_WIN_OPTIONS, val); #ifdef DMR_DEBUG_WINDOW /* Set underflow debug mode -> highlight missing pixels. */ WR4(sc, DC_WINBUF_UFLOW_CTRL, UFLOW_CTR_ENABLE); WR4(sc, DC_WINBUF_UFLOW_DBG_PIXEL, 0xFFFF0000); #endif UNLOCK(sc); } /* ------------------------------------------------------------------- * * Plane functions. * */ static int dc_plane_update(struct drm_plane *drm_plane, struct drm_crtc *drm_crtc, struct drm_framebuffer *drm_fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { struct tegra_plane *plane; struct tegra_crtc *crtc; struct tegra_fb *fb; struct dc_softc *sc; struct dc_window win; int rv; plane = container_of(drm_plane, struct tegra_plane, drm_plane); fb = container_of(drm_fb, struct tegra_fb, drm_fb); crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); memset(&win, 0, sizeof(win)); win.src_x = src_x >> 16; win.src_y = src_y >> 16; win.src_w = src_w >> 16; win.src_h = src_h >> 16; win.dst_x = crtc_x; win.dst_y = crtc_y; win.dst_w = crtc_w; win.dst_h = crtc_h; rv = dc_parse_drm_format(fb, &win); if (rv != 0) { DRM_WARNING("unsupported pixel format %d\n", fb->drm_fb.pixel_format); return (rv); } dc_setup_window(sc, plane->index, &win); WR4(sc, DC_CMD_STATE_CONTROL, WIN_A_UPDATE << plane->index); WR4(sc, DC_CMD_STATE_CONTROL, WIN_A_ACT_REQ << plane->index); return (0); } static int dc_plane_disable(struct drm_plane *drm_plane) { struct tegra_plane *plane; struct tegra_crtc *crtc; struct dc_softc *sc; uint32_t val, idx; if (drm_plane->crtc == NULL) return (0); plane = container_of(drm_plane, struct tegra_plane, drm_plane); crtc = container_of(drm_plane->crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); idx = plane->index; LOCK(sc); WR4(sc, DC_CMD_DISPLAY_WINDOW_HEADER, WINDOW_A_SELECT << idx); val = RD4(sc, DC_WINC_WIN_OPTIONS); val &= ~WIN_ENABLE; WR4(sc, DC_WINC_WIN_OPTIONS, val); UNLOCK(sc); WR4(sc, DC_CMD_STATE_CONTROL, WIN_A_UPDATE << idx); WR4(sc, DC_CMD_STATE_CONTROL, WIN_A_ACT_REQ << idx); return (0); } static void dc_plane_destroy(struct drm_plane *plane) { dc_plane_disable(plane); drm_plane_cleanup(plane); free(plane, DRM_MEM_KMS); } static const struct drm_plane_funcs dc_plane_funcs = { .update_plane = dc_plane_update, .disable_plane = dc_plane_disable, .destroy = dc_plane_destroy, }; /* ------------------------------------------------------------------- * * CRTC helper functions. * */ static void dc_crtc_dpms(struct drm_crtc *crtc, int mode) { /* Empty function */ } static bool dc_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted) { return (true); } static int dc_set_base(struct dc_softc *sc, int x, int y, struct tegra_fb *fb) { struct dc_window win; int rv; memset(&win, 0, sizeof(win)); win.src_x = x; win.src_y = y; win.src_w = fb->drm_fb.width; win.src_h = fb->drm_fb.height; win.dst_x = x; win.dst_y = y; win.dst_w = fb->drm_fb.width; win.dst_h = fb->drm_fb.height; rv = dc_parse_drm_format(fb, &win); if (rv != 0) { DRM_WARNING("unsupported pixel format %d\n", fb->drm_fb.pixel_format); return (rv); } dc_setup_window(sc, 0, &win); return (0); } static int dc_crtc_mode_set(struct drm_crtc *drm_crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted, int x, int y, struct drm_framebuffer *old_fb) { struct dc_softc *sc; struct tegra_crtc *crtc; struct tegra_fb *fb; struct dc_window win; uint32_t div, h_ref_to_sync, v_ref_to_sync; int rv; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); fb = container_of(drm_crtc->fb, struct tegra_fb, drm_fb); h_ref_to_sync = 1; v_ref_to_sync = 1; /* Setup timing */ rv = dc_setup_clk(sc, drm_crtc, mode, &div); if (rv != 0) { device_printf(sc->dev, "Cannot set pixel clock\n"); return (rv); } /* Timing */ WR4(sc, DC_DISP_DISP_TIMING_OPTIONS, 0); WR4(sc, DC_DISP_REF_TO_SYNC, (v_ref_to_sync << 16) | h_ref_to_sync); WR4(sc, DC_DISP_SYNC_WIDTH, ((mode->vsync_end - mode->vsync_start) << 16) | ((mode->hsync_end - mode->hsync_start) << 0)); WR4(sc, DC_DISP_BACK_PORCH, ((mode->vtotal - mode->vsync_end) << 16) | ((mode->htotal - mode->hsync_end) << 0)); WR4(sc, DC_DISP_FRONT_PORCH, ((mode->vsync_start - mode->vdisplay) << 16) | ((mode->hsync_start - mode->hdisplay) << 0)); WR4(sc, DC_DISP_DISP_ACTIVE, (mode->vdisplay << 16) | mode->hdisplay); WR4(sc, DC_DISP_DISP_INTERFACE_CONTROL, DISP_DATA_FORMAT(DF1P1C)); WR4(sc,DC_DISP_DISP_CLOCK_CONTROL, SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER(PCD1)); memset(&win, 0, sizeof(win)); win.src_x = x; win.src_y = y; win.src_w = mode->hdisplay; win.src_h = mode->vdisplay; win.dst_x = x; win.dst_y = y; win.dst_w = mode->hdisplay; win.dst_h = mode->vdisplay; rv = dc_parse_drm_format(fb, &win); if (rv != 0) { DRM_WARNING("unsupported pixel format %d\n", drm_crtc->fb->pixel_format); return (rv); } dc_setup_window(sc, 0, &win); return (0); } static int dc_crtc_mode_set_base(struct drm_crtc *drm_crtc, int x, int y, struct drm_framebuffer *old_fb) { struct dc_softc *sc; struct tegra_crtc *crtc; struct tegra_fb *fb; int rv; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); fb = container_of(drm_crtc->fb, struct tegra_fb, drm_fb); sc = device_get_softc(crtc->dev); rv = dc_set_base(sc, x, y, fb); /* Commit */ WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_UPDATE | WIN_A_UPDATE); WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_ACT_REQ | WIN_A_ACT_REQ); return (rv); } static void dc_crtc_prepare(struct drm_crtc *drm_crtc) { struct dc_softc *sc; struct tegra_crtc *crtc; uint32_t val; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); WR4(sc, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL, SYNCPT_CNTRL_NO_STALL); /* XXX allocate syncpoint from host1x */ WR4(sc, DC_CMD_CONT_SYNCPT_VSYNC, SYNCPT_VSYNC_ENABLE | (sc->tegra_crtc.nvidia_head == 0 ? SYNCPT_VBLANK0: SYNCPT_VBLANK1)); WR4(sc, DC_CMD_DISPLAY_POWER_CONTROL, PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); val = RD4(sc, DC_CMD_DISPLAY_COMMAND); val |= DISPLAY_CTRL_MODE(CTRL_MODE_C_DISPLAY); WR4(sc, DC_CMD_DISPLAY_COMMAND, val); WR4(sc, DC_CMD_INT_MASK, WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT); WR4(sc, DC_CMD_INT_ENABLE, VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT); } static void dc_crtc_commit(struct drm_crtc *drm_crtc) { struct dc_softc *sc; struct tegra_crtc *crtc; uint32_t val; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_UPDATE | WIN_A_UPDATE); val = RD4(sc, DC_CMD_INT_MASK); val |= FRAME_END_INT; WR4(sc, DC_CMD_INT_MASK, val); val = RD4(sc, DC_CMD_INT_ENABLE); val |= FRAME_END_INT; WR4(sc, DC_CMD_INT_ENABLE, val); WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_ACT_REQ | WIN_A_ACT_REQ); } static void dc_crtc_load_lut(struct drm_crtc *crtc) { /* empty function */ } static const struct drm_crtc_helper_funcs dc_crtc_helper_funcs = { .dpms = dc_crtc_dpms, .mode_fixup = dc_crtc_mode_fixup, .mode_set = dc_crtc_mode_set, .mode_set_base = dc_crtc_mode_set_base, .prepare = dc_crtc_prepare, .commit = dc_crtc_commit, .load_lut = dc_crtc_load_lut, }; static int drm_crtc_index(struct drm_crtc *crtc) { int idx; struct drm_crtc *tmp; idx = 0; list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) { if (tmp == crtc) return (idx); idx++; } panic("Cannot find CRTC"); } /* ------------------------------------------------------------------- * * Exported functions (mainly vsync related). * * XXX revisit this -> convert to bus methods? */ int tegra_dc_get_pipe(struct drm_crtc *drm_crtc) { struct tegra_crtc *crtc; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); return (crtc->nvidia_head); } void tegra_dc_enable_vblank(struct drm_crtc *drm_crtc) { struct dc_softc *sc; struct tegra_crtc *crtc; uint32_t val; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); LOCK(sc); val = RD4(sc, DC_CMD_INT_MASK); val |= VBLANK_INT; WR4(sc, DC_CMD_INT_MASK, val); UNLOCK(sc); } void tegra_dc_disable_vblank(struct drm_crtc *drm_crtc) { struct dc_softc *sc; struct tegra_crtc *crtc; uint32_t val; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); LOCK(sc); val = RD4(sc, DC_CMD_INT_MASK); val &= ~VBLANK_INT; WR4(sc, DC_CMD_INT_MASK, val); UNLOCK(sc); } static void dc_finish_page_flip(struct dc_softc *sc) { struct drm_crtc *drm_crtc; struct drm_device *drm; struct tegra_fb *fb; struct tegra_bo *bo; uint32_t base; int idx; drm_crtc = &sc->tegra_crtc.drm_crtc; drm = drm_crtc->dev; fb = container_of(drm_crtc->fb, struct tegra_fb, drm_fb); mtx_lock(&drm->event_lock); if (sc->event == NULL) { mtx_unlock(&drm->event_lock); return; } LOCK(sc); /* Read active copy of WINBUF_START_ADDR */ WR4(sc, DC_CMD_DISPLAY_WINDOW_HEADER, WINDOW_A_SELECT); WR4(sc, DC_CMD_STATE_ACCESS, READ_MUX); base = RD4(sc, DC_WINBUF_START_ADDR); WR4(sc, DC_CMD_STATE_ACCESS, 0); UNLOCK(sc); /* Is already active */ bo = tegra_fb_get_plane(fb, 0); if (base == (bo->pbase + fb->drm_fb.offsets[0])) { idx = drm_crtc_index(drm_crtc); drm_send_vblank_event(drm, idx, sc->event); drm_vblank_put(drm, idx); sc->event = NULL; } mtx_unlock(&drm->event_lock); } void tegra_dc_cancel_page_flip(struct drm_crtc *drm_crtc, struct drm_file *file) { struct dc_softc *sc; struct tegra_crtc *crtc; struct drm_device *drm; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); drm = drm_crtc->dev; mtx_lock(&drm->event_lock); if ((sc->event != NULL) && (sc->event->base.file_priv == file)) { sc->event->base.destroy(&sc->event->base); drm_vblank_put(drm, drm_crtc_index(drm_crtc)); sc->event = NULL; } mtx_unlock(&drm->event_lock); } /* ------------------------------------------------------------------- * * CRTC functions. * */ static int dc_page_flip(struct drm_crtc *drm_crtc, struct drm_framebuffer *drm_fb, struct drm_pending_vblank_event *event) { struct dc_softc *sc; struct tegra_crtc *crtc; struct tegra_fb *fb; struct drm_device *drm; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); fb = container_of(drm_crtc->fb, struct tegra_fb, drm_fb); drm = drm_crtc->dev; if (sc->event != NULL) return (-EBUSY); if (event != NULL) { event->pipe = sc->tegra_crtc.nvidia_head; sc->event = event; drm_vblank_get(drm, event->pipe); } dc_set_base(sc, drm_crtc->x, drm_crtc->y, fb); drm_crtc->fb = drm_fb; /* Commit */ WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_UPDATE | WIN_A_UPDATE); return (0); } static int dc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file, uint32_t handle, uint32_t width, uint32_t height) { struct dc_softc *sc; struct tegra_crtc *crtc; struct drm_gem_object *gem; struct tegra_bo *bo; int i; uint32_t val, *src, *dst; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); if (width != height) return (-EINVAL); switch (width) { case 32: val = CURSOR_SIZE(C32x32); break; case 64: val = CURSOR_SIZE(C64x64); break; case 128: val = CURSOR_SIZE(C128x128); break; case 256: val = CURSOR_SIZE(C256x256); break; default: return (-EINVAL); } bo = NULL; gem = NULL; if (handle != 0) { gem = drm_gem_object_lookup(drm_crtc->dev, file, handle); if (gem == NULL) return (-ENOENT); bo = container_of(gem, struct tegra_bo, gem_obj); } if (sc->cursor_gem != NULL) { drm_gem_object_unreference(sc->cursor_gem); } sc->cursor_gem = gem; if (bo != NULL) { /* * Copy cursor into cache and convert it from ARGB to RGBA. * XXXX - this is broken by design - client can write to BO at * any time. We can dedicate other window for cursor or switch * to sw cursor in worst case. */ src = (uint32_t *)bo->vbase; dst = (uint32_t *)crtc->cursor_vbase; for (i = 0; i < width * height; i++) dst[i] = (src[i] << 8) | (src[i] >> 24); val |= CURSOR_CLIP(CC_DISPLAY); val |= CURSOR_START_ADDR(crtc->cursor_pbase); WR4(sc, DC_DISP_CURSOR_START_ADDR, val); val = RD4(sc, DC_DISP_BLEND_CURSOR_CONTROL); val &= ~CURSOR_DST_BLEND_FACTOR_SELECT(~0); val &= ~CURSOR_SRC_BLEND_FACTOR_SELECT(~0); val |= CURSOR_MODE_SELECT; val |= CURSOR_DST_BLEND_FACTOR_SELECT(DST_NEG_K1_TIMES_SRC); val |= CURSOR_SRC_BLEND_FACTOR_SELECT(SRC_BLEND_K1_TIMES_SRC); val |= CURSOR_ALPHA(~0); WR4(sc, DC_DISP_BLEND_CURSOR_CONTROL, val); val = RD4(sc, DC_DISP_DISP_WIN_OPTIONS); val |= CURSOR_ENABLE; WR4(sc, DC_DISP_DISP_WIN_OPTIONS, val); } else { val = RD4(sc, DC_DISP_DISP_WIN_OPTIONS); val &= ~CURSOR_ENABLE; WR4(sc, DC_DISP_DISP_WIN_OPTIONS, val); } /* XXX This fixes cursor underflow issues, but why ? */ WR4(sc, DC_DISP_CURSOR_UNDERFLOW_CTRL, CURSOR_UFLOW_CYA); WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_UPDATE | CURSOR_UPDATE ); WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_ACT_REQ | CURSOR_ACT_REQ); return (0); } static int dc_cursor_move(struct drm_crtc *drm_crtc, int x, int y) { struct dc_softc *sc; struct tegra_crtc *crtc; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); WR4(sc, DC_DISP_CURSOR_POSITION, CURSOR_POSITION(x, y)); WR4(sc, DC_CMD_STATE_CONTROL, CURSOR_UPDATE); WR4(sc, DC_CMD_STATE_CONTROL, CURSOR_ACT_REQ); return (0); } static void dc_destroy(struct drm_crtc *crtc) { drm_crtc_cleanup(crtc); memset(crtc, 0, sizeof(*crtc)); } static const struct drm_crtc_funcs dc_crtc_funcs = { .page_flip = dc_page_flip, .cursor_set = dc_cursor_set, .cursor_move = dc_cursor_move, .set_config = drm_crtc_helper_set_config, .destroy = dc_destroy, }; /* ------------------------------------------------------------------- * * Bus and infrastructure. * */ static int dc_init_planes(struct dc_softc *sc, struct tegra_drm *drm) { int i, rv; struct tegra_plane *plane; rv = 0; for (i = 0; i < DC_MAX_PLANES; i++) { plane = malloc(sizeof(*plane), DRM_MEM_KMS, M_WAITOK | M_ZERO); plane->index = i + 1; rv = drm_plane_init(&drm->drm_dev, &plane->drm_plane, 1 << sc->tegra_crtc.nvidia_head, &dc_plane_funcs, dc_plane_formats, nitems(dc_plane_formats), false); if (rv != 0) { free(plane, DRM_MEM_KMS); return (rv); } } return 0; } static void dc_display_enable(device_t dev, bool enable) { struct dc_softc *sc; uint32_t val; sc = device_get_softc(dev); /* Set display mode */ val = enable ? CTRL_MODE_C_DISPLAY: CTRL_MODE_STOP; WR4(sc, DC_CMD_DISPLAY_COMMAND, DISPLAY_CTRL_MODE(val)); /* and commit it*/ WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_UPDATE); WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_ACT_REQ); } static void dc_hdmi_enable(device_t dev, bool enable) { struct dc_softc *sc; uint32_t val; sc = device_get_softc(dev); val = RD4(sc, DC_DISP_DISP_WIN_OPTIONS); if (enable) val |= HDMI_ENABLE; else val &= ~HDMI_ENABLE; WR4(sc, DC_DISP_DISP_WIN_OPTIONS, val); } static void dc_setup_timing(device_t dev, int h_pulse_start) { struct dc_softc *sc; sc = device_get_softc(dev); /* Setup display timing */ WR4(sc, DC_DISP_DISP_TIMING_OPTIONS, VSYNC_H_POSITION(1)); WR4(sc, DC_DISP_DISP_COLOR_CONTROL, DITHER_CONTROL(DITHER_DISABLE) | BASE_COLOR_SIZE(SIZE_BASE888)); WR4(sc, DC_DISP_DISP_SIGNAL_OPTIONS0, H_PULSE2_ENABLE); WR4(sc, DC_DISP_H_PULSE2_CONTROL, PULSE_CONTROL_QUAL(QUAL_VACTIVE) | PULSE_CONTROL_LAST(LAST_END_A)); WR4(sc, DC_DISP_H_PULSE2_POSITION_A, PULSE_START(h_pulse_start) | PULSE_END(h_pulse_start + 8)); } static void dc_intr(void *arg) { struct dc_softc *sc; uint32_t status; sc = arg; /* Confirm interrupt */ status = RD4(sc, DC_CMD_INT_STATUS); WR4(sc, DC_CMD_INT_STATUS, status); if (status & VBLANK_INT) { drm_handle_vblank(sc->tegra_crtc.drm_crtc.dev, sc->tegra_crtc.nvidia_head); dc_finish_page_flip(sc); } } static int dc_init_client(device_t dev, device_t host1x, struct tegra_drm *drm) { struct dc_softc *sc; int rv; sc = device_get_softc(dev); if (drm->pitch_align < sc->pitch_align) drm->pitch_align = sc->pitch_align; drm_crtc_init(&drm->drm_dev, &sc->tegra_crtc.drm_crtc, &dc_crtc_funcs); drm_mode_crtc_set_gamma_size(&sc->tegra_crtc.drm_crtc, 256); drm_crtc_helper_add(&sc->tegra_crtc.drm_crtc, &dc_crtc_helper_funcs); rv = dc_init_planes(sc, drm); if (rv!= 0){ device_printf(dev, "Cannot init planes\n"); return (rv); } WR4(sc, DC_CMD_INT_TYPE, WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT); WR4(sc, DC_CMD_INT_POLARITY, WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT); WR4(sc, DC_CMD_INT_ENABLE, 0); WR4(sc, DC_CMD_INT_MASK, 0); rv = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, dc_intr, sc, &sc->irq_ih); if (rv != 0) { device_printf(dev, "Cannot register interrupt handler\n"); return (rv); } /* allocate memory for cursor cache */ sc->tegra_crtc.cursor_vbase = kmem_alloc_contig(256 * 256 * 4, M_WAITOK | M_ZERO, 0, -1UL, PAGE_SIZE, 0, VM_MEMATTR_WRITE_COMBINING); sc->tegra_crtc.cursor_pbase = vtophys((uintptr_t)sc->tegra_crtc.cursor_vbase); return (0); } static int dc_exit_client(device_t dev, device_t host1x, struct tegra_drm *drm) { struct dc_softc *sc; sc = device_get_softc(dev); if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); sc->irq_ih = NULL; return (0); } static int get_fdt_resources(struct dc_softc *sc, phandle_t node) { int rv; rv = hwreset_get_by_ofw_name(sc->dev, 0, "dc", &sc->hwreset_dc); if (rv != 0) { device_printf(sc->dev, "Cannot get 'dc' reset\n"); return (rv); } rv = clk_get_by_ofw_name(sc->dev, 0, "parent", &sc->clk_parent); if (rv != 0) { device_printf(sc->dev, "Cannot get 'parent' clock\n"); return (rv); } rv = clk_get_by_ofw_name(sc->dev, 0, "dc", &sc->clk_dc); if (rv != 0) { device_printf(sc->dev, "Cannot get 'dc' clock\n"); return (rv); } rv = OF_getencprop(node, "nvidia,head", &sc->tegra_crtc.nvidia_head, sizeof(sc->tegra_crtc.nvidia_head)); if (rv <= 0) { device_printf(sc->dev, "Cannot get 'nvidia,head' property\n"); return (rv); } return (0); } static int enable_fdt_resources(struct dc_softc *sc) { int id, rv; rv = clk_set_parent_by_clk(sc->clk_dc, sc->clk_parent); if (rv != 0) { device_printf(sc->dev, "Cannot set parent for 'dc' clock\n"); return (rv); } id = (sc->tegra_crtc.nvidia_head == 0) ? TEGRA_POWERGATE_DIS: TEGRA_POWERGATE_DISB; rv = tegra_powergate_sequence_power_up(id, sc->clk_dc, sc->hwreset_dc); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'DIS' powergate\n"); return (rv); } return (0); } static int dc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Tegra Display Controller"); return (BUS_PROBE_DEFAULT); } static int dc_attach(device_t dev) { struct dc_softc *sc; phandle_t node; int rid, rv; sc = device_get_softc(dev); sc->dev = dev; sc->tegra_crtc.dev = dev; node = ofw_bus_get_node(sc->dev); LOCK_INIT(sc); rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot allocate memory resources\n"); goto fail; } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate IRQ resources\n"); goto fail; } rv = get_fdt_resources(sc, node); if (rv != 0) { device_printf(dev, "Cannot parse FDT resources\n"); goto fail; } rv = enable_fdt_resources(sc); if (rv != 0) { device_printf(dev, "Cannot enable FDT resources\n"); goto fail; } /* * Tegra124 * - 64 for RGB modes * - 128 for YUV planar modes * - 256 for block linear modes */ sc->pitch_align = 256; rv = TEGRA_DRM_REGISTER_CLIENT(device_get_parent(sc->dev), sc->dev); if (rv != 0) { device_printf(dev, "Cannot register DRM device\n"); goto fail; } return (bus_generic_attach(dev)); fail: TEGRA_DRM_DEREGISTER_CLIENT(device_get_parent(sc->dev), sc->dev); if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); if (sc->clk_parent != NULL) clk_release(sc->clk_parent); if (sc->clk_dc != NULL) clk_release(sc->clk_dc); if (sc->hwreset_dc != NULL) hwreset_release(sc->hwreset_dc); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (ENXIO); } static int dc_detach(device_t dev) { struct dc_softc *sc; sc = device_get_softc(dev); TEGRA_DRM_DEREGISTER_CLIENT(device_get_parent(sc->dev), sc->dev); if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); if (sc->clk_parent != NULL) clk_release(sc->clk_parent); if (sc->clk_dc != NULL) clk_release(sc->clk_dc); if (sc->hwreset_dc != NULL) hwreset_release(sc->hwreset_dc); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (bus_generic_detach(dev)); } static device_method_t tegra_dc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dc_probe), DEVMETHOD(device_attach, dc_attach), DEVMETHOD(device_detach, dc_detach), /* tegra drm interface */ DEVMETHOD(tegra_drm_init_client, dc_init_client), DEVMETHOD(tegra_drm_exit_client, dc_exit_client), /* tegra dc interface */ DEVMETHOD(tegra_dc_display_enable, dc_display_enable), DEVMETHOD(tegra_dc_hdmi_enable, dc_hdmi_enable), DEVMETHOD(tegra_dc_setup_timing, dc_setup_timing), DEVMETHOD_END }; DEFINE_CLASS_0(tegra_dc, tegra_dc_driver, tegra_dc_methods, sizeof(struct dc_softc)); DRIVER_MODULE(tegra_dc, host1x, tegra_dc_driver, NULL, NULL); diff --git a/sys/arm/nvidia/drm2/tegra_drm_subr.c b/sys/arm/nvidia/drm2/tegra_drm_subr.c index 920419cc777e..060445966d68 100644 --- a/sys/arm/nvidia/drm2/tegra_drm_subr.c +++ b/sys/arm/nvidia/drm2/tegra_drm_subr.c @@ -1,174 +1,174 @@ /*- * Copyright (c) 2015 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include int tegra_drm_connector_get_modes(struct drm_connector *connector) { struct tegra_drm_encoder *output; struct edid *edid = NULL; int rv; output = container_of(connector, struct tegra_drm_encoder, connector); /* Panel is first */ if (output->panel != NULL) { /* XXX panel parsing */ return (0); } /* static EDID is second*/ edid = output->edid; /* EDID from monitor is last */ if (edid == NULL) edid = drm_get_edid(connector, output->ddc); if (edid == NULL) return (0); /* Process EDID */ drm_mode_connector_update_edid_property(connector, edid); rv = drm_add_edid_modes(connector, edid); drm_edid_to_eld(connector, edid); return (rv); } struct drm_encoder * tegra_drm_connector_best_encoder(struct drm_connector *connector) { struct tegra_drm_encoder *output; output = container_of(connector, struct tegra_drm_encoder, connector); return &(output->encoder); } enum drm_connector_status tegra_drm_connector_detect(struct drm_connector *connector, bool force) { struct tegra_drm_encoder *output; bool active; int rv; output = container_of(connector, struct tegra_drm_encoder, connector); if (output->gpio_hpd == NULL) { return ((output->panel != NULL) ? connector_status_connected: connector_status_disconnected); } rv = gpio_pin_is_active(output->gpio_hpd, &active); if (rv != 0) { device_printf(output->dev, " GPIO read failed: %d\n", rv); return (connector_status_unknown); } return (active ? connector_status_connected : connector_status_disconnected); } int tegra_drm_encoder_attach(struct tegra_drm_encoder *output, phandle_t node) { int rv; phandle_t ddc; /* XXX parse output panel here */ rv = OF_getencprop_alloc(node, "nvidia,edid", (void **)&output->edid); /* EDID exist but have invalid size */ if ((rv >= 0) && (rv != sizeof(struct edid))) { device_printf(output->dev, "Malformed \"nvidia,edid\" property\n"); if (output->edid != NULL) free(output->edid, M_OFWPROP); return (ENXIO); } gpio_pin_get_by_ofw_property(output->dev, node, "nvidia,hpd-gpio", &output->gpio_hpd); ddc = 0; OF_getencprop(node, "nvidia,ddc-i2c-bus", &ddc, sizeof(ddc)); if (ddc > 0) output->ddc = OF_device_from_xref(ddc); if ((output->edid == NULL) && (output->ddc == NULL)) return (ENXIO); if (output->gpio_hpd != NULL) { output->connector.polled = // DRM_CONNECTOR_POLL_HPD; DRM_CONNECTOR_POLL_DISCONNECT | DRM_CONNECTOR_POLL_CONNECT; } return (0); } int tegra_drm_encoder_init(struct tegra_drm_encoder *output, struct tegra_drm *drm) { if (output->panel) { /* attach panel */ } return (0); } int tegra_drm_encoder_exit(struct tegra_drm_encoder *output, struct tegra_drm *drm) { if (output->panel) { /* detach panel */ } return (0); } diff --git a/sys/arm/nvidia/drm2/tegra_fb.c b/sys/arm/nvidia/drm2/tegra_fb.c index 23994060700b..d6a74a32fffb 100644 --- a/sys/arm/nvidia/drm2/tegra_fb.c +++ b/sys/arm/nvidia/drm2/tegra_fb.c @@ -1,335 +1,335 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include -#include +#include #include #include #include #include static void fb_destroy(struct drm_framebuffer *drm_fb) { struct tegra_fb *fb; struct tegra_bo *bo; unsigned int i; fb = container_of(drm_fb, struct tegra_fb, drm_fb); for (i = 0; i < fb->nplanes; i++) { bo = fb->planes[i]; if (bo != NULL) drm_gem_object_unreference_unlocked(&bo->gem_obj); } drm_framebuffer_cleanup(drm_fb); free(fb->planes, DRM_MEM_DRIVER); } static int fb_create_handle(struct drm_framebuffer *drm_fb, struct drm_file *file, unsigned int *handle) { struct tegra_fb *fb; int rv; fb = container_of(drm_fb, struct tegra_fb, drm_fb); rv = drm_gem_handle_create(file, &fb->planes[0]->gem_obj, handle); return (rv); } /* XXX Probably not needed */ static int fb_dirty(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned flags, unsigned color, struct drm_clip_rect *clips, unsigned num_clips) { return (0); } static const struct drm_framebuffer_funcs fb_funcs = { .destroy = fb_destroy, .create_handle = fb_create_handle, .dirty = fb_dirty, }; static int fb_alloc(struct drm_device *drm, struct drm_mode_fb_cmd2 *mode_cmd, struct tegra_bo **planes, int num_planes, struct tegra_fb **res_fb) { struct tegra_fb *fb; int i; int rv; fb = malloc(sizeof(*fb), DRM_MEM_DRIVER, M_WAITOK | M_ZERO); fb->planes = malloc(num_planes * sizeof(*fb->planes), DRM_MEM_DRIVER, M_WAITOK | M_ZERO); fb->nplanes = num_planes; drm_helper_mode_fill_fb_struct(&fb->drm_fb, mode_cmd); for (i = 0; i < fb->nplanes; i++) fb->planes[i] = planes[i]; rv = drm_framebuffer_init(drm, &fb->drm_fb, &fb_funcs); if (rv < 0) { device_printf(drm->dev, "Cannot initialize frame buffer %d\n", rv); free(fb->planes, DRM_MEM_DRIVER); return (rv); } *res_fb = fb; return (0); } static int tegra_fb_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { u_int bpp, size; struct tegra_drm *drm; struct tegra_fb *fb; struct fb_info *info; struct tegra_bo *bo; struct drm_mode_fb_cmd2 mode_cmd; struct drm_device *drm_dev; int rv; if (helper->fb != NULL) return (0); DRM_DEBUG_KMS("surface: %d x %d (bpp: %d)\n", sizes->surface_width, sizes->surface_height, sizes->surface_bpp); drm_dev = helper->dev; fb = container_of(helper, struct tegra_fb, fb_helper); drm = container_of(drm_dev, struct tegra_drm, drm_dev); bpp = (sizes->surface_bpp + 7) / 8; /* Create mode_cmd */ memset(&mode_cmd, 0, sizeof(mode_cmd)); mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.pitches[0] = roundup(sizes->surface_width * bpp, drm->pitch_align); mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); size = mode_cmd.pitches[0] * mode_cmd.height; DRM_LOCK(drm_dev); rv = tegra_bo_create(drm_dev, size, &bo); DRM_UNLOCK(drm_dev); if (rv != 0) return (rv); info = framebuffer_alloc(); if (info == NULL) { device_printf(drm_dev->dev, "Cannot allocate DRM framebuffer info.\n"); rv = -ENOMEM; goto err_object; } rv = fb_alloc(drm_dev, &mode_cmd, &bo, 1, &fb); if (rv != 0) { device_printf(drm_dev->dev, "Cannot allocate DRM framebuffer.\n"); goto err_fb; } helper->fb = &fb->drm_fb; helper->fbdev = info; /* Fill FB info */ info->fb_vbase = bo->vbase; info->fb_pbase = bo->pbase; info->fb_size = size; info->fb_bpp = sizes->surface_bpp; drm_fb_helper_fill_fix(info, fb->drm_fb.pitches[0], fb->drm_fb.depth); drm_fb_helper_fill_var(info, helper, fb->drm_fb.width, fb->drm_fb.height); DRM_DEBUG_KMS("allocated %dx%d (s %dbits) fb size: %d, bo %p\n", fb->drm_fb.width, fb->drm_fb.height, fb->drm_fb.depth, size, bo); return (1); err_fb: drm_gem_object_unreference_unlocked(&bo->gem_obj); framebuffer_release(info); err_object: drm_gem_object_release(&bo->gem_obj); return (rv); } static struct drm_fb_helper_funcs fb_helper_funcs = { .fb_probe = tegra_fb_probe, }; /* * Exported functions */ struct fb_info * tegra_drm_fb_getinfo(struct drm_device *drm_dev) { struct tegra_fb *fb; struct tegra_drm *drm; drm = container_of(drm_dev, struct tegra_drm, drm_dev); fb = drm->fb; if (fb == NULL) return (NULL); return (fb->fb_helper.fbdev); } struct tegra_bo * tegra_fb_get_plane(struct tegra_fb *fb, int idx) { if (idx >= drm_format_num_planes(fb->drm_fb.pixel_format)) return (NULL); if (idx >= fb->nplanes) return (NULL); return (fb->planes[idx]); } int tegra_drm_fb_init(struct drm_device *drm_dev) { struct tegra_fb *fb; struct tegra_drm *drm; int rv; drm = drm_dev->dev_private; drm = container_of(drm_dev, struct tegra_drm, drm_dev); fb = malloc(sizeof(*fb), DRM_MEM_DRIVER, M_WAITOK | M_ZERO); drm->fb = fb; fb->fb_helper.funcs = &fb_helper_funcs; rv = drm_fb_helper_init(drm_dev, &fb->fb_helper, drm_dev->mode_config.num_crtc, drm_dev->mode_config.num_connector); if (rv != 0) { device_printf(drm_dev->dev, "Cannot initialize frame buffer %d\n", rv); return (rv); } rv = drm_fb_helper_single_add_all_connectors(&fb->fb_helper); if (rv != 0) { device_printf(drm_dev->dev, "Cannot add all connectors: %d\n", rv); goto err_fini; } rv = drm_fb_helper_initial_config(&fb->fb_helper, 32); if (rv != 0) { device_printf(drm_dev->dev, "Cannot set initial config: %d\n", rv); goto err_fini; } /* XXXX Setup initial mode for FB */ /* drm_fb_helper_set_par(fb->fb_helper.fbdev); */ return 0; err_fini: drm_fb_helper_fini(&fb->fb_helper); return (rv); } int tegra_drm_fb_create(struct drm_device *drm, struct drm_file *file, struct drm_mode_fb_cmd2 *cmd, struct drm_framebuffer **fb_res) { int hsub, vsub, i; int width, height, size, bpp; struct tegra_bo *planes[4]; struct drm_gem_object *gem_obj; struct tegra_fb *fb; int rv, nplanes; hsub = drm_format_horz_chroma_subsampling(cmd->pixel_format); vsub = drm_format_vert_chroma_subsampling(cmd->pixel_format); nplanes = drm_format_num_planes(cmd->pixel_format); for (i = 0; i < nplanes; i++) { width = cmd->width; height = cmd->height; if (i != 0) { width /= hsub; height /= vsub; } gem_obj = drm_gem_object_lookup(drm, file, cmd->handles[i]); if (gem_obj == NULL) { rv = -ENXIO; goto fail; } bpp = drm_format_plane_cpp(cmd->pixel_format, i); size = (height - 1) * cmd->pitches[i] + width * bpp + cmd->offsets[i]; if (gem_obj->size < size) { rv = -EINVAL; goto fail; } planes[i] = container_of(gem_obj, struct tegra_bo, gem_obj); } rv = fb_alloc(drm, cmd, planes, nplanes, &fb); if (rv != 0) goto fail; *fb_res = &fb->drm_fb; return (0); fail: while (i--) drm_gem_object_unreference_unlocked(&planes[i]->gem_obj); return (rv); } void tegra_drm_fb_destroy(struct drm_device *drm_dev) { struct fb_info *info; struct tegra_fb *fb; struct tegra_drm *drm; drm = container_of(drm_dev, struct tegra_drm, drm_dev); fb = drm->fb; if (fb == NULL) return; info = fb->fb_helper.fbdev; drm_framebuffer_remove(&fb->drm_fb); framebuffer_release(info); drm_fb_helper_fini(&fb->fb_helper); drm_framebuffer_cleanup(&fb->drm_fb); free(fb, DRM_MEM_DRIVER); drm->fb = NULL; } diff --git a/sys/arm/nvidia/drm2/tegra_hdmi.c b/sys/arm/nvidia/drm2/tegra_hdmi.c index c45ca1721ff0..1c5e86bde498 100644 --- a/sys/arm/nvidia/drm2/tegra_hdmi.c +++ b/sys/arm/nvidia/drm2/tegra_hdmi.c @@ -1,1315 +1,1315 @@ /*- * Copyright (c) 2015 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "tegra_dc_if.h" #include "tegra_drm_if.h" #define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, 4 * (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, 4 * (_r)) /* HDA stream format verb. */ #define AC_FMT_CHAN_GET(x) (((x) >> 0) & 0xf) #define AC_FMT_CHAN_BITS_GET(x) (((x) >> 4) & 0x7) #define AC_FMT_DIV_GET(x) (((x) >> 8) & 0x7) #define AC_FMT_MUL_GET(x) (((x) >> 11) & 0x7) #define AC_FMT_BASE_44K (1 << 14) #define AC_FMT_TYPE_NON_PCM (1 << 15) #define HDMI_REKEY_DEFAULT 56 #define HDMI_ELD_BUFFER_SIZE 96 #define HDMI_DC_CLOCK_MULTIPIER 2 struct audio_reg { uint32_t audio_clk; bus_size_t acr_reg; bus_size_t nval_reg; bus_size_t aval_reg; }; static const struct audio_reg audio_regs[] = { { .audio_clk = 32000, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_0320, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320, }, { .audio_clk = 44100, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_0441, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441, }, { .audio_clk = 88200, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_0882, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882, }, { .audio_clk = 176400, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_1764, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764, }, { .audio_clk = 48000, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_0480, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480, }, { .audio_clk = 96000, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_0960, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960, }, { .audio_clk = 192000, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_1920, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920, }, }; struct tmds_config { uint32_t pclk; uint32_t pll0; uint32_t pll1; uint32_t drive_c; uint32_t pe_c; uint32_t peak_c; uint32_t pad_ctls; }; static const struct tmds_config tegra124_tmds_config[] = { { /* 480p/576p / 25.2MHz/27MHz */ .pclk = 27000000, .pll0 = 0x01003010, .pll1 = 0x00301B00, .drive_c = 0x1F1F1F1F, .pe_c = 0x00000000, .peak_c = 0x03030303, .pad_ctls = 0x800034BB, }, { /* 720p/1080i / 74.25MHz */ .pclk = 74250000, .pll0 = 0x01003110, .pll1 = 0x00301500, .drive_c = 0x2C2C2C2C, .pe_c = 0x00000000, .peak_c = 0x07070707, .pad_ctls = 0x800034BB, }, { /* 1080p / 148.5MHz */ .pclk = 148500000, .pll0 = 0x01003310, .pll1 = 0x00301500, .drive_c = 0x33333333, .pe_c = 0x00000000, .peak_c = 0x0C0C0C0C, .pad_ctls = 0x800034BB, }, { /* 2216p / 297MHz */ .pclk = UINT_MAX, .pll0 = 0x01003F10, .pll1 = 0x00300F00, .drive_c = 0x37373737, .pe_c = 0x00000000, .peak_c = 0x17171717, .pad_ctls = 0x800036BB, }, }; struct hdmi_softc { device_t dev; struct resource *mem_res; struct resource *irq_res; void *irq_ih; clk_t clk_parent; clk_t clk_hdmi; hwreset_t hwreset_hdmi; regulator_t supply_hdmi; regulator_t supply_pll; regulator_t supply_vdd; uint64_t pclk; boolean_t hdmi_mode; int audio_src_type; int audio_freq; int audio_chans; struct tegra_drm *drm; struct tegra_drm_encoder output; const struct tmds_config *tmds_config; int n_tmds_configs; }; static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-hdmi", 1}, {NULL, 0}, }; /* These functions have been copied from newer version of drm_edid.c */ /* ELD Header Block */ #define DRM_ELD_HEADER_BLOCK_SIZE 4 #define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */ static int drm_eld_size(const uint8_t *eld) { return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; } static int drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, struct drm_display_mode *mode) { int rv; if (!frame || !mode) return -EINVAL; rv = hdmi_avi_infoframe_init(frame); if (rv < 0) return rv; if (mode->flags & DRM_MODE_FLAG_DBLCLK) frame->pixel_repeat = 1; frame->video_code = drm_match_cea_mode(mode); frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; #ifdef FREEBSD_NOTYET /* * Populate picture aspect ratio from either * user input (if specified) or from the CEA mode list. */ if (mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_4_3 || mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_16_9) frame->picture_aspect = mode->picture_aspect_ratio; else if (frame->video_code > 0) frame->picture_aspect = drm_get_cea_aspect_ratio( frame->video_code); #endif frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN; return 0; } /* --------------------------------------------------------------------- */ static int hdmi_setup_clock(struct tegra_drm_encoder *output, clk_t clk, uint64_t pclk) { struct hdmi_softc *sc; uint64_t freq; int rv; sc = device_get_softc(output->dev); /* Disable consumers clock for while. */ rv = clk_disable(sc->clk_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot disable 'hdmi' clock\n"); return (rv); } rv = clk_disable(clk); if (rv != 0) { device_printf(sc->dev, "Cannot disable display clock\n"); return (rv); } /* Set frequency for Display Controller PLL. */ freq = HDMI_DC_CLOCK_MULTIPIER * pclk; rv = clk_set_freq(sc->clk_parent, freq, 0); if (rv != 0) { device_printf(output->dev, "Cannot set display pixel frequency\n"); return (rv); } /* Reparent display controller */ rv = clk_set_parent_by_clk(clk, sc->clk_parent); if (rv != 0) { device_printf(output->dev, "Cannot set parent clock\n"); return (rv); } rv = clk_set_freq(clk, freq, 0); if (rv != 0) { device_printf(output->dev, "Cannot set display controller frequency\n"); return (rv); } rv = clk_set_freq(sc->clk_hdmi, pclk, 0); if (rv != 0) { device_printf(output->dev, "Cannot set display controller frequency\n"); return (rv); } /* And reenable consumers clock. */ rv = clk_enable(clk); if (rv != 0) { device_printf(sc->dev, "Cannot enable display clock\n"); return (rv); } rv = clk_enable(sc->clk_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'hdmi' clock\n"); return (rv); } rv = clk_get_freq(clk, &freq); if (rv != 0) { device_printf(output->dev, "Cannot get display controller frequency\n"); return (rv); } DRM_DEBUG_KMS("DC frequency: %llu\n", freq); return (0); } /* ------------------------------------------------------------------- * * Infoframes. * */ static void avi_setup_infoframe(struct hdmi_softc *sc, struct drm_display_mode *mode) { struct hdmi_avi_infoframe frame; uint8_t buf[17], *hdr, *pb; ssize_t rv; rv = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); if (rv < 0) { device_printf(sc->dev, "Cannot setup AVI infoframe: %zd\n", rv); return; } rv = hdmi_avi_infoframe_pack(&frame, buf, sizeof(buf)); if (rv < 0) { device_printf(sc->dev, "Cannot pack AVI infoframe: %zd\n", rv); return; } hdr = buf + 0; pb = buf + 3; WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER, (hdr[2] << 16) | (hdr[1] << 8) | (hdr[0] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW, (pb[3] << 24) |(pb[2] << 16) | (pb[1] << 8) | (pb[0] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH, (pb[6] << 16) | (pb[5] << 8) | (pb[4] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW, (pb[10] << 24) |(pb[9] << 16) | (pb[8] << 8) | (pb[7] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH, (pb[13] << 16) | (pb[12] << 8) | (pb[11] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL, AVI_INFOFRAME_CTRL_ENABLE); } static void audio_setup_infoframe(struct hdmi_softc *sc) { struct hdmi_audio_infoframe frame; uint8_t buf[14], *hdr, *pb; ssize_t rv; rv = hdmi_audio_infoframe_init(&frame); frame.channels = sc->audio_chans; rv = hdmi_audio_infoframe_pack(&frame, buf, sizeof(buf)); if (rv < 0) { device_printf(sc->dev, "Cannot pack audio infoframe\n"); return; } hdr = buf + 0; pb = buf + 3; WR4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER, (hdr[2] << 16) | (hdr[1] << 8) | (hdr[0] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW, (pb[3] << 24) |(pb[2] << 16) | (pb[1] << 8) | (pb[0] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH, (pb[5] << 8) | (pb[4] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL, AUDIO_INFOFRAME_CTRL_ENABLE); } /* ------------------------------------------------------------------- * * Audio * */ static void init_hda_eld(struct hdmi_softc *sc) { size_t size; int i ; uint32_t val; size = drm_eld_size(sc->output.connector.eld); for (i = 0; i < HDMI_ELD_BUFFER_SIZE; i++) { val = i << 8; if (i < size) val |= sc->output.connector.eld[i]; WR4(sc, HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR, val); } WR4(sc,HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE, SOR_AUDIO_HDA_PRESENSE_VALID | SOR_AUDIO_HDA_PRESENSE_PRESENT); } static int get_audio_regs(int freq, bus_size_t *acr_reg, bus_size_t *nval_reg, bus_size_t *aval_reg) { int i; const struct audio_reg *reg; for (i = 0; i < nitems(audio_regs) ; i++) { reg = audio_regs + i; if (reg->audio_clk == freq) { if (acr_reg != NULL) *acr_reg = reg->acr_reg; if (nval_reg != NULL) *nval_reg = reg->nval_reg; if (aval_reg != NULL) *aval_reg = reg->aval_reg; return (0); } } return (ERANGE); } #define FR_BITS 16 #define TO_FFP(x) (((int64_t)(x)) << FR_BITS) #define TO_INT(x) ((int)((x) >> FR_BITS)) static int get_hda_cts_n(uint32_t audio_freq_hz, uint32_t pixclk_freq_hz, uint32_t *best_cts, uint32_t *best_n, uint32_t *best_a) { int min_n; int max_n; int ideal_n; int n; int cts; int aval; int64_t err_f; int64_t min_err_f; int64_t cts_f; int64_t aval_f; int64_t half_f; /* constant 0.5 */ bool better_n; /* * All floats are in fixed I48.16 format. * * Ideal ACR interval is 1000 hz (1 ms); * acceptable is 300 hz .. 1500 hz */ min_n = 128 * audio_freq_hz / 1500; max_n = 128 * audio_freq_hz / 300; ideal_n = 128 * audio_freq_hz / 1000; min_err_f = TO_FFP(100); half_f = TO_FFP(1) / 2; *best_n = 0; *best_cts = 0; *best_a = 0; for (n = min_n; n <= max_n; n++) { cts_f = TO_FFP(pixclk_freq_hz); cts_f *= n; cts_f /= 128 * audio_freq_hz; cts = TO_INT(cts_f + half_f); /* round */ err_f = cts_f - TO_FFP(cts); if (err_f < 0) err_f = -err_f; aval_f = TO_FFP(24000000); aval_f *= n; aval_f /= 128 * audio_freq_hz; aval = TO_INT(aval_f); /* truncate */ better_n = abs(n - ideal_n) < abs((int)(*best_n) - ideal_n); if (TO_FFP(aval) == aval_f && (err_f < min_err_f || (err_f == min_err_f && better_n))) { min_err_f = err_f; *best_n = (uint32_t)n; *best_cts = (uint32_t)cts; *best_a = (uint32_t)aval; if (err_f == 0 && n == ideal_n) break; } } return (0); } #undef FR_BITS #undef TO_FFP #undef TO_INT static int audio_setup(struct hdmi_softc *sc) { uint32_t val; uint32_t audio_n; uint32_t audio_cts; uint32_t audio_aval; uint64_t hdmi_freq; bus_size_t aval_reg; int rv; if (!sc->hdmi_mode) return (ENOTSUP); rv = get_audio_regs(sc->audio_freq, NULL, NULL, &aval_reg); if (rv != 0) { device_printf(sc->dev, "Unsupported audio frequency.\n"); return (rv); } rv = clk_get_freq(sc->clk_hdmi, &hdmi_freq); if (rv != 0) { device_printf(sc->dev, "Cannot get hdmi frequency: %d\n", rv); return (rv); } rv = get_hda_cts_n(sc->audio_freq, hdmi_freq, &audio_cts, &audio_n, &audio_aval); if (rv != 0) { device_printf(sc->dev, "Cannot compute audio coefs: %d\n", rv); return (rv); } /* Audio infoframe. */ audio_setup_infoframe(sc); /* Setup audio source */ WR4(sc, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0, SOR_AUDIO_CNTRL0_SOURCE_SELECT(sc->audio_src_type) | SOR_AUDIO_CNTRL0_INJECT_NULLSMPL); val = RD4(sc, HDMI_NV_PDISP_SOR_AUDIO_SPARE0); val |= SOR_AUDIO_SPARE0_HBR_ENABLE; WR4(sc, HDMI_NV_PDISP_SOR_AUDIO_SPARE0, val); WR4(sc, HDMI_NV_PDISP_HDMI_ACR_CTRL, 0); WR4(sc, HDMI_NV_PDISP_AUDIO_N, AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNATE | AUDIO_N_VALUE(audio_n - 1)); WR4(sc, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH, ACR_SUBPACK_N(audio_n) | ACR_ENABLE); WR4(sc, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW, ACR_SUBPACK_CTS(audio_cts)); WR4(sc, HDMI_NV_PDISP_HDMI_SPARE, SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1)); val = RD4(sc, HDMI_NV_PDISP_AUDIO_N); val &= ~AUDIO_N_RESETF; WR4(sc, HDMI_NV_PDISP_AUDIO_N, val); WR4(sc, aval_reg, audio_aval); return (0); } static void audio_disable(struct hdmi_softc *sc) { uint32_t val; /* Disable audio */ val = RD4(sc, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); val &= ~GENERIC_CTRL_AUDIO; WR4(sc, HDMI_NV_PDISP_HDMI_GENERIC_CTRL, val); /* Disable audio infoframes */ val = RD4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); val &= ~AUDIO_INFOFRAME_CTRL_ENABLE; WR4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL, val); } static void audio_enable(struct hdmi_softc *sc) { uint32_t val; if (!sc->hdmi_mode) audio_disable(sc); /* Enable audio infoframes */ val = RD4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); val |= AUDIO_INFOFRAME_CTRL_ENABLE; WR4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL, val); /* Enable audio */ val = RD4(sc, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); val |= GENERIC_CTRL_AUDIO; WR4(sc, HDMI_NV_PDISP_HDMI_GENERIC_CTRL, val); } /* ------------------------------------------------------------------- * * HDMI. * */ /* Process format change notification from HDA */ static void hda_intr(struct hdmi_softc *sc) { uint32_t val; int rv; if (!sc->hdmi_mode) return; val = RD4(sc, HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0); if ((val & (1 << 30)) == 0) { audio_disable(sc); return; } /* XXX Move this to any header */ /* Keep in sync with HDA */ sc->audio_freq = val & 0x00FFFFFF; sc->audio_chans = (val >> 24) & 0x0f; DRM_DEBUG_KMS("%d channel(s) at %dHz\n", sc->audio_chans, sc->audio_freq); rv = audio_setup(sc); if (rv != 0) { audio_disable(sc); return; } audio_enable(sc); } static void tmds_init(struct hdmi_softc *sc, const struct tmds_config *tmds) { WR4(sc, HDMI_NV_PDISP_SOR_PLL0, tmds->pll0); WR4(sc, HDMI_NV_PDISP_SOR_PLL1, tmds->pll1); WR4(sc, HDMI_NV_PDISP_PE_CURRENT, tmds->pe_c); WR4(sc, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT, tmds->drive_c); WR4(sc, HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT, tmds->peak_c); WR4(sc, HDMI_NV_PDISP_SOR_PAD_CTLS0, tmds->pad_ctls); } static int hdmi_sor_start(struct hdmi_softc *sc, struct drm_display_mode *mode) { int i; uint32_t val; /* Enable TMDS macro */ val = RD4(sc, HDMI_NV_PDISP_SOR_PLL0); val &= ~SOR_PLL0_PWR; val &= ~SOR_PLL0_VCOPD; val &= ~SOR_PLL0_PULLDOWN; WR4(sc, HDMI_NV_PDISP_SOR_PLL0, val); DELAY(10); val = RD4(sc, HDMI_NV_PDISP_SOR_PLL0); val &= ~SOR_PLL0_PDBG; WR4(sc, HDMI_NV_PDISP_SOR_PLL0, val); WR4(sc, HDMI_NV_PDISP_SOR_PWR, SOR_PWR_SETTING_NEW); WR4(sc, HDMI_NV_PDISP_SOR_PWR, 0); /* Wait until SOR is ready */ for (i = 1000; i > 0; i--) { val = RD4(sc, HDMI_NV_PDISP_SOR_PWR); if ((val & SOR_PWR_SETTING_NEW) == 0) break; DELAY(10); } if (i <= 0) { device_printf(sc->dev, "Timeouted while enabling SOR power.\n"); return (ETIMEDOUT); } val = SOR_STATE2_ASY_OWNER(ASY_OWNER_HEAD0) | SOR_STATE2_ASY_SUBOWNER(SUBOWNER_BOTH) | SOR_STATE2_ASY_CRCMODE(ASY_CRCMODE_COMPLETE) | SOR_STATE2_ASY_PROTOCOL(ASY_PROTOCOL_SINGLE_TMDS_A); if (mode->flags & DRM_MODE_FLAG_NHSYNC) val |= SOR_STATE2_ASY_HSYNCPOL_NEG; if (mode->flags & DRM_MODE_FLAG_NVSYNC) val |= SOR_STATE2_ASY_VSYNCPOL_NEG; WR4(sc, HDMI_NV_PDISP_SOR_STATE2, val); WR4(sc, HDMI_NV_PDISP_SOR_STATE1, SOR_STATE1_ASY_ORMODE_NORMAL | SOR_STATE1_ASY_HEAD_OPMODE(ASY_HEAD_OPMODE_AWAKE)); WR4(sc, HDMI_NV_PDISP_SOR_STATE0, 0); WR4(sc, HDMI_NV_PDISP_SOR_STATE0, SOR_STATE0_UPDATE); val = RD4(sc, HDMI_NV_PDISP_SOR_STATE1); val |= SOR_STATE1_ATTACHED; WR4(sc, HDMI_NV_PDISP_SOR_STATE1, val); WR4(sc, HDMI_NV_PDISP_SOR_STATE0, 0); return 0; } static int hdmi_disable(struct hdmi_softc *sc) { struct tegra_crtc *crtc; device_t dc; uint32_t val; dc = NULL; if (sc->output.encoder.crtc != NULL) { crtc = container_of(sc->output.encoder.crtc, struct tegra_crtc, drm_crtc); dc = crtc->dev; } if (dc != NULL) { TEGRA_DC_HDMI_ENABLE(dc, false); TEGRA_DC_DISPLAY_ENABLE(dc, false); } audio_disable(sc); val = RD4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); val &= ~AVI_INFOFRAME_CTRL_ENABLE; WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL, val); /* Disable interrupts */ WR4(sc, HDMI_NV_PDISP_INT_ENABLE, 0); WR4(sc, HDMI_NV_PDISP_INT_MASK, 0); return (0); } static int hdmi_enable(struct hdmi_softc *sc) { uint64_t freq; struct drm_display_mode *mode; struct tegra_crtc *crtc; uint32_t val, h_sync_width, h_back_porch, h_front_porch, h_pulse_start; uint32_t h_max_ac_packet, div8_2; device_t dc; int i, rv; mode = &sc->output.encoder.crtc->mode; crtc = container_of(sc->output.encoder.crtc, struct tegra_crtc, drm_crtc); dc = crtc->dev; /* Compute all timings first. */ sc->pclk = mode->clock * 1000; h_sync_width = mode->hsync_end - mode->hsync_start; h_back_porch = mode->htotal - mode->hsync_end; h_front_porch = mode->hsync_start - mode->hdisplay; h_pulse_start = 1 + h_sync_width + h_back_porch - 10; h_max_ac_packet = (h_sync_width + h_back_porch + h_front_porch - HDMI_REKEY_DEFAULT - 18) / 32; /* Check if HDMI device is connected and detected. */ if (sc->output.connector.edid_blob_ptr == NULL) { sc->hdmi_mode = false; } else { sc->hdmi_mode = drm_detect_hdmi_monitor( (struct edid *)sc->output.connector.edid_blob_ptr->data); } /* Get exact HDMI pixel frequency. */ rv = clk_get_freq(sc->clk_hdmi, &freq); if (rv != 0) { device_printf(sc->dev, "Cannot get 'hdmi' clock frequency\n"); return (rv); } DRM_DEBUG_KMS("HDMI frequency: %llu Hz\n", freq); /* Wakeup SOR power */ val = RD4(sc, HDMI_NV_PDISP_SOR_PLL0); val &= ~SOR_PLL0_PDBG; WR4(sc, HDMI_NV_PDISP_SOR_PLL0, val); DELAY(10); val = RD4(sc, HDMI_NV_PDISP_SOR_PLL0); val &= ~SOR_PLL0_PWR; WR4(sc, HDMI_NV_PDISP_SOR_PLL0, val); /* Setup timings */ TEGRA_DC_SETUP_TIMING(dc, h_pulse_start); WR4(sc, HDMI_NV_PDISP_HDMI_VSYNC_WINDOW, VSYNC_WINDOW_START(0x200) | VSYNC_WINDOW_END(0x210) | VSYNC_WINDOW_ENABLE); /* Setup video source and adjust video range */ val = 0; if (crtc->nvidia_head != 0) HDMI_SRC_DISPLAYB; if ((mode->hdisplay != 640) || (mode->vdisplay != 480)) val |= ARM_VIDEO_RANGE_LIMITED; WR4(sc, HDMI_NV_PDISP_INPUT_CONTROL, val); /* Program SOR reference clock - it uses 8.2 fractional divisor */ div8_2 = (freq * 4) / 1000000; val = SOR_REFCLK_DIV_INT(div8_2 >> 2) | SOR_REFCLK_DIV_FRAC(div8_2); WR4(sc, HDMI_NV_PDISP_SOR_REFCLK, val); /* Setup audio */ if (sc->hdmi_mode) { rv = audio_setup(sc); if (rv != 0) sc->hdmi_mode = false; } /* Init HDA ELD */ init_hda_eld(sc); val = HDMI_CTRL_REKEY(HDMI_REKEY_DEFAULT); val |= HDMI_CTRL_MAX_AC_PACKET(h_max_ac_packet); if (sc->hdmi_mode) val |= HDMI_CTRL_ENABLE; WR4(sc, HDMI_NV_PDISP_HDMI_CTRL, val); /* Setup TMDS */ for (i = 0; i < sc->n_tmds_configs; i++) { if (sc->pclk <= sc->tmds_config[i].pclk) { tmds_init(sc, sc->tmds_config + i); break; } } /* Program sequencer. */ WR4(sc, HDMI_NV_PDISP_SOR_SEQ_CTL, SOR_SEQ_PU_PC(0) | SOR_SEQ_PU_PC_ALT(0) | SOR_SEQ_PD_PC(8) | SOR_SEQ_PD_PC_ALT(8)); val = SOR_SEQ_INST_WAIT_TIME(1) | SOR_SEQ_INST_WAIT_UNITS(WAIT_UNITS_VSYNC) | SOR_SEQ_INST_HALT | SOR_SEQ_INST_DRIVE_PWM_OUT_LO; WR4(sc, HDMI_NV_PDISP_SOR_SEQ_INST(0), val); WR4(sc, HDMI_NV_PDISP_SOR_SEQ_INST(8), val); val = RD4(sc,HDMI_NV_PDISP_SOR_CSTM); val &= ~SOR_CSTM_LVDS_ENABLE; val &= ~SOR_CSTM_ROTCLK(~0); val |= SOR_CSTM_ROTCLK(2); val &= ~SOR_CSTM_MODE(~0); val |= SOR_CSTM_MODE(CSTM_MODE_TMDS); val |= SOR_CSTM_PLLDIV; WR4(sc, HDMI_NV_PDISP_SOR_CSTM, val); TEGRA_DC_DISPLAY_ENABLE(dc, false); rv = hdmi_sor_start(sc, mode); if (rv != 0) return (rv); TEGRA_DC_HDMI_ENABLE(dc, true); TEGRA_DC_DISPLAY_ENABLE(dc, true); /* Enable HDA codec interrupt */ WR4(sc, HDMI_NV_PDISP_INT_MASK, INT_CODEC_SCRATCH0); WR4(sc, HDMI_NV_PDISP_INT_ENABLE, INT_CODEC_SCRATCH0); if (sc->hdmi_mode) { avi_setup_infoframe(sc, mode); audio_enable(sc); } return (0); } /* ------------------------------------------------------------------- * * DRM Interface. * */ static enum drm_mode_status hdmi_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct tegra_drm_encoder *output; struct hdmi_softc *sc; int rv; uint64_t freq; output = container_of(connector, struct tegra_drm_encoder, connector); sc = device_get_softc(output->dev); freq = HDMI_DC_CLOCK_MULTIPIER * mode->clock * 1000; rv = clk_test_freq(sc->clk_parent, freq, 0); DRM_DEBUG_KMS("Test HDMI frequency: %u kHz, rv: %d\n", mode->clock, rv); if (rv != 0) return (MODE_NOCLOCK); return (MODE_OK); } static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { .get_modes = tegra_drm_connector_get_modes, .mode_valid = hdmi_connector_mode_valid, .best_encoder = tegra_drm_connector_best_encoder, }; static const struct drm_connector_funcs hdmi_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = tegra_drm_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = drm_connector_cleanup, }; static const struct drm_encoder_funcs hdmi_encoder_funcs = { .destroy = drm_encoder_cleanup, }; static void hdmi_encoder_dpms(struct drm_encoder *encoder, int mode) { /* Empty function. */ } static bool hdmi_encoder_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted) { return (true); } static void hdmi_encoder_prepare(struct drm_encoder *encoder) { /* Empty function. */ } static void hdmi_encoder_commit(struct drm_encoder *encoder) { /* Empty function. */ } static void hdmi_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted) { struct tegra_drm_encoder *output; struct hdmi_softc *sc; int rv; output = container_of(encoder, struct tegra_drm_encoder, encoder); sc = device_get_softc(output->dev); rv = hdmi_enable(sc); if (rv != 0) device_printf(sc->dev, "Cannot enable HDMI port\n"); } static void hdmi_encoder_disable(struct drm_encoder *encoder) { struct tegra_drm_encoder *output; struct hdmi_softc *sc; int rv; output = container_of(encoder, struct tegra_drm_encoder, encoder); sc = device_get_softc(output->dev); if (sc == NULL) return; rv = hdmi_disable(sc); if (rv != 0) device_printf(sc->dev, "Cannot disable HDMI port\n"); } static const struct drm_encoder_helper_funcs hdmi_encoder_helper_funcs = { .dpms = hdmi_encoder_dpms, .mode_fixup = hdmi_encoder_mode_fixup, .prepare = hdmi_encoder_prepare, .commit = hdmi_encoder_commit, .mode_set = hdmi_encoder_mode_set, .disable = hdmi_encoder_disable, }; /* ------------------------------------------------------------------- * * Bus and infrastructure. * */ static int hdmi_init_client(device_t dev, device_t host1x, struct tegra_drm *drm) { struct hdmi_softc *sc; phandle_t node; int rv; sc = device_get_softc(dev); node = ofw_bus_get_node(sc->dev); sc->drm = drm; sc->output.setup_clock = &hdmi_setup_clock; rv = tegra_drm_encoder_attach(&sc->output, node); if (rv != 0) { device_printf(dev, "Cannot attach output connector\n"); return(ENXIO); } /* Connect this encoder + connector to DRM. */ drm_connector_init(&drm->drm_dev, &sc->output.connector, &hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); drm_connector_helper_add(&sc->output.connector, &hdmi_connector_helper_funcs); sc->output.connector.dpms = DRM_MODE_DPMS_OFF; drm_encoder_init(&drm->drm_dev, &sc->output.encoder, &hdmi_encoder_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(&sc->output.encoder, &hdmi_encoder_helper_funcs); drm_mode_connector_attach_encoder(&sc->output.connector, &sc->output.encoder); rv = tegra_drm_encoder_init(&sc->output, drm); if (rv < 0) { device_printf(sc->dev, "Unable to init HDMI output\n"); return (rv); } sc->output.encoder.possible_crtcs = 0x3; return (0); } static int hdmi_exit_client(device_t dev, device_t host1x, struct tegra_drm *drm) { struct hdmi_softc *sc; sc = device_get_softc(dev); tegra_drm_encoder_exit(&sc->output, drm); return (0); } static int get_fdt_resources(struct hdmi_softc *sc, phandle_t node) { int rv; rv = regulator_get_by_ofw_property(sc->dev, 0, "hdmi-supply", &sc->supply_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot get 'hdmi' regulator\n"); return (ENXIO); } rv = regulator_get_by_ofw_property(sc->dev,0, "pll-supply", &sc->supply_pll); if (rv != 0) { device_printf(sc->dev, "Cannot get 'pll' regulator\n"); return (ENXIO); } rv = regulator_get_by_ofw_property(sc->dev, 0, "vdd-supply", &sc->supply_vdd); if (rv != 0) { device_printf(sc->dev, "Cannot get 'vdd' regulator\n"); return (ENXIO); } rv = hwreset_get_by_ofw_name(sc->dev, 0, "hdmi", &sc->hwreset_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot get 'hdmi' reset\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "parent", &sc->clk_parent); if (rv != 0) { device_printf(sc->dev, "Cannot get 'parent' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "hdmi", &sc->clk_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot get 'hdmi' clock\n"); return (ENXIO); } return (0); } static int enable_fdt_resources(struct hdmi_softc *sc) { int rv; rv = clk_set_parent_by_clk(sc->clk_hdmi, sc->clk_parent); if (rv != 0) { device_printf(sc->dev, "Cannot set parent for 'hdmi' clock\n"); return (rv); } /* 594 MHz is arbitrarily selected value */ rv = clk_set_freq(sc->clk_parent, 594000000, 0); if (rv != 0) { device_printf(sc->dev, "Cannot set frequency for 'hdmi' parent clock\n"); return (rv); } rv = clk_set_freq(sc->clk_hdmi, 594000000 / 4, 0); if (rv != 0) { device_printf(sc->dev, "Cannot set frequency for 'hdmi' parent clock\n"); return (rv); } rv = regulator_enable(sc->supply_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'hdmi' regulator\n"); return (rv); } rv = regulator_enable(sc->supply_pll); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'pll' regulator\n"); return (rv); } rv = regulator_enable(sc->supply_vdd); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'vdd' regulator\n"); return (rv); } rv = clk_enable(sc->clk_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'hdmi' clock\n"); return (rv); } rv = hwreset_deassert(sc->hwreset_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot unreset 'hdmi' reset\n"); return (rv); } return (0); } static void hdmi_intr(void *arg) { struct hdmi_softc *sc; uint32_t status; sc = arg; /* Confirm interrupt */ status = RD4(sc, HDMI_NV_PDISP_INT_STATUS); WR4(sc, HDMI_NV_PDISP_INT_STATUS, status); /* process audio verb from HDA */ if (status & INT_CODEC_SCRATCH0) hda_intr(sc); } static int hdmi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Tegra HDMI"); return (BUS_PROBE_DEFAULT); } static int hdmi_attach(device_t dev) { struct hdmi_softc *sc; phandle_t node; int rid, rv; sc = device_get_softc(dev); sc->dev = dev; sc->output.dev = sc->dev; node = ofw_bus_get_node(sc->dev); sc->audio_src_type = SOURCE_SELECT_AUTO; sc->audio_freq = 44100; sc->audio_chans = 2; sc->hdmi_mode = false; sc->tmds_config = tegra124_tmds_config; sc->n_tmds_configs = nitems(tegra124_tmds_config); rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot allocate memory resources\n"); goto fail; } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate IRQ resources\n"); goto fail; } rv = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, hdmi_intr, sc, &sc->irq_ih); if (rv != 0) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); goto fail; } rv = get_fdt_resources(sc, node); if (rv != 0) { device_printf(dev, "Cannot parse FDT resources\n"); goto fail; } rv = enable_fdt_resources(sc); if (rv != 0) { device_printf(dev, "Cannot enable FDT resources\n"); goto fail; } rv = TEGRA_DRM_REGISTER_CLIENT(device_get_parent(sc->dev), sc->dev); if (rv != 0) { device_printf(dev, "Cannot register DRM device\n"); goto fail; } return (bus_generic_attach(dev)); fail: TEGRA_DRM_DEREGISTER_CLIENT(device_get_parent(sc->dev), sc->dev); if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); if (sc->clk_parent != NULL) clk_release(sc->clk_parent); if (sc->clk_hdmi != NULL) clk_release(sc->clk_hdmi); if (sc->hwreset_hdmi != NULL) hwreset_release(sc->hwreset_hdmi); if (sc->supply_hdmi != NULL) regulator_release(sc->supply_hdmi); if (sc->supply_pll != NULL) regulator_release(sc->supply_pll); if (sc->supply_vdd != NULL) regulator_release(sc->supply_vdd); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (ENXIO); } static int hdmi_detach(device_t dev) { struct hdmi_softc *sc; sc = device_get_softc(dev); TEGRA_DRM_DEREGISTER_CLIENT(device_get_parent(sc->dev), sc->dev); if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); if (sc->clk_parent != NULL) clk_release(sc->clk_parent); if (sc->clk_hdmi != NULL) clk_release(sc->clk_hdmi); if (sc->hwreset_hdmi != NULL) hwreset_release(sc->hwreset_hdmi); if (sc->supply_hdmi != NULL) regulator_release(sc->supply_hdmi); if (sc->supply_pll != NULL) regulator_release(sc->supply_pll); if (sc->supply_vdd != NULL) regulator_release(sc->supply_vdd); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (bus_generic_detach(dev)); } static device_method_t tegra_hdmi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hdmi_probe), DEVMETHOD(device_attach, hdmi_attach), DEVMETHOD(device_detach, hdmi_detach), /* tegra drm interface */ DEVMETHOD(tegra_drm_init_client, hdmi_init_client), DEVMETHOD(tegra_drm_exit_client, hdmi_exit_client), DEVMETHOD_END }; DEFINE_CLASS_0(tegra_hdmi, tegra_hdmi_driver, tegra_hdmi_methods, sizeof(struct hdmi_softc)); DRIVER_MODULE(tegra_hdmi, host1x, tegra_hdmi_driver, 0, 0); diff --git a/sys/arm/nvidia/drm2/tegra_host1x.c b/sys/arm/nvidia/drm2/tegra_host1x.c index 080fbef659a1..284c5c2e8465 100644 --- a/sys/arm/nvidia/drm2/tegra_host1x.c +++ b/sys/arm/nvidia/drm2/tegra_host1x.c @@ -1,639 +1,639 @@ /*- * Copyright (c) 2015 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include "fb_if.h" #include "tegra_drm_if.h" #define WR4(_sc, _r, _v) bus_rite_4((_sc)->mem_res, (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, (_r)) #define LOCK(_sc) sx_xlock(&(_sc)->lock) #define UNLOCK(_sc) sx_xunlock(&(_sc)->lock) #define SLEEP(_sc, timeout) sx_sleep(sc, &sc->lock, 0, "host1x", timeout); #define LOCK_INIT(_sc) sx_init(&_sc->lock, "host1x") #define LOCK_DESTROY(_sc) sx_destroy(&_sc->lock) #define ASSERT_LOCKED(_sc) sx_assert(&_sc->lock, SA_LOCKED) #define ASSERT_UNLOCKED(_sc) sx_assert(&_sc->lock, SA_UNLOCKED) static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-host1x", 1}, {NULL, 0} }; #define DRIVER_NAME "tegra" #define DRIVER_DESC "NVIDIA Tegra TK1" #define DRIVER_DATE "20151101" #define DRIVER_MAJOR 0 #define DRIVER_MINOR 0 #define DRIVER_PATCHLEVEL 0 struct client_info; TAILQ_HEAD(client_list, client_info); typedef struct client_list client_list_t; struct client_info { TAILQ_ENTRY(client_info) list_e; device_t client; int activated; }; struct host1x_softc { struct simplebus_softc simplebus_sc; /* must be first */ device_t dev; struct sx lock; int attach_done; struct resource *mem_res; struct resource *syncpt_irq_res; void *syncpt_irq_h; struct resource *gen_irq_res; void *gen_irq_h; clk_t clk; hwreset_t reset; struct intr_config_hook irq_hook; int drm_inited; client_list_t clients; struct tegra_drm *tegra_drm; }; static void host1x_output_poll_changed(struct drm_device *drm_dev) { struct tegra_drm *drm; drm = container_of(drm_dev, struct tegra_drm, drm_dev); if (drm->fb != NULL) drm_fb_helper_hotplug_event(&drm->fb->fb_helper); } static const struct drm_mode_config_funcs mode_config_funcs = { .fb_create = tegra_drm_fb_create, .output_poll_changed = host1x_output_poll_changed, }; static int host1x_drm_init(struct host1x_softc *sc) { struct client_info *entry; int rv; LOCK(sc); TAILQ_FOREACH(entry, &sc->clients, list_e) { if (entry->activated) continue; rv = TEGRA_DRM_INIT_CLIENT(entry->client, sc->dev, sc->tegra_drm); if (rv != 0) { device_printf(sc->dev, "Cannot init DRM client %s: %d\n", device_get_name(entry->client), rv); return (rv); } entry->activated = 1; } UNLOCK(sc); return (0); } static int host1x_drm_exit(struct host1x_softc *sc) { struct client_info *entry; int rv; #ifdef FREEBSD_NOTYET struct drm_device *dev, *tmp; #endif LOCK(sc); if (!sc->drm_inited) { UNLOCK(sc); return (0); } TAILQ_FOREACH_REVERSE(entry, &sc->clients, client_list, list_e) { if (!entry->activated) continue; rv = TEGRA_DRM_EXIT_CLIENT(entry->client, sc->dev, sc->tegra_drm); if (rv != 0) { device_printf(sc->dev, "Cannot exit DRM client %s: %d\n", device_get_name(entry->client), rv); } entry->activated = 0; } #ifdef FREEBSD_NOTYET list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item) drm_put_dev(dev); #endif sc->drm_inited = 0; UNLOCK(sc); return (0); } static int host1x_drm_load(struct drm_device *drm_dev, unsigned long flags) { struct host1x_softc *sc; int rv; sc = device_get_softc(drm_dev->dev); drm_mode_config_init(drm_dev); drm_dev->mode_config.min_width = 32; drm_dev->mode_config.min_height = 32; drm_dev->mode_config.max_width = 4096; drm_dev->mode_config.max_height = 4096; drm_dev->mode_config.funcs = &mode_config_funcs; rv = host1x_drm_init(sc); if (rv != 0) goto fail_host1x; drm_dev->irq_enabled = true; drm_dev->max_vblank_count = 0xffffffff; drm_dev->vblank_disable_allowed = true; rv = drm_vblank_init(drm_dev, drm_dev->mode_config.num_crtc); if (rv != 0) goto fail_vblank; drm_mode_config_reset(drm_dev); rv = tegra_drm_fb_init(drm_dev); if (rv != 0) goto fail_fb; drm_kms_helper_poll_init(drm_dev); return (0); fail_fb: tegra_drm_fb_destroy(drm_dev); drm_vblank_cleanup(drm_dev); fail_vblank: host1x_drm_exit(sc); fail_host1x: drm_mode_config_cleanup(drm_dev); return (rv); } static int host1x_drm_unload(struct drm_device *drm_dev) { struct host1x_softc *sc; int rv; sc = device_get_softc(drm_dev->dev); drm_kms_helper_poll_fini(drm_dev); tegra_drm_fb_destroy(drm_dev); drm_mode_config_cleanup(drm_dev); rv = host1x_drm_exit(sc); if (rv < 0) return (rv); return (0); } static int host1x_drm_open(struct drm_device *drm_dev, struct drm_file *filp) { return (0); } static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file) { struct drm_crtc *crtc; list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) tegra_dc_cancel_page_flip(crtc, file); } static void host1x_drm_lastclose(struct drm_device *drm_dev) { struct tegra_drm *drm; drm = container_of(drm_dev, struct tegra_drm, drm_dev); if (drm->fb != NULL) drm_fb_helper_restore_fbdev_mode(&drm->fb->fb_helper); } static int host1x_drm_enable_vblank(struct drm_device *drm_dev, int pipe) { struct drm_crtc *crtc; list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) { if (pipe == tegra_dc_get_pipe(crtc)) { tegra_dc_enable_vblank(crtc); return (0); } } return (-ENODEV); } static void host1x_drm_disable_vblank(struct drm_device *drm_dev, int pipe) { struct drm_crtc *crtc; list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) { if (pipe == tegra_dc_get_pipe(crtc)) { tegra_dc_disable_vblank(crtc); return; } } } static struct drm_ioctl_desc host1x_drm_ioctls[] = { }; struct drm_driver tegra_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, .load = host1x_drm_load, .unload = host1x_drm_unload, .open = host1x_drm_open, .preclose = tegra_drm_preclose, .lastclose = host1x_drm_lastclose, .get_vblank_counter = drm_vblank_count, .enable_vblank = host1x_drm_enable_vblank, .disable_vblank = host1x_drm_disable_vblank, /* Fields filled by tegra_bo_driver_register() .gem_free_object .gem_pager_ops .dumb_create .dumb_map_offset .dumb_destroy */ .ioctls = host1x_drm_ioctls, .num_ioctls = nitems(host1x_drm_ioctls), .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; /* * ----------------- Device methods ------------------------- */ static void host1x_irq_hook(void *arg) { struct host1x_softc *sc; int rv; sc = arg; config_intrhook_disestablish(&sc->irq_hook); tegra_bo_driver_register(&tegra_drm_driver); rv = drm_get_platform_dev(sc->dev, &sc->tegra_drm->drm_dev, &tegra_drm_driver); if (rv != 0) { device_printf(sc->dev, "drm_get_platform_dev(): %d\n", rv); return; } sc->drm_inited = 1; } static struct fb_info * host1x_fb_helper_getinfo(device_t dev) { struct host1x_softc *sc; sc = device_get_softc(dev); if (sc->tegra_drm == NULL) return (NULL); return (tegra_drm_fb_getinfo(&sc->tegra_drm->drm_dev)); } static int host1x_register_client(device_t dev, device_t client) { struct host1x_softc *sc; struct client_info *entry; sc = device_get_softc(dev); entry = malloc(sizeof(struct client_info), M_DEVBUF, M_WAITOK | M_ZERO); entry->client = client; entry->activated = 0; LOCK(sc); TAILQ_INSERT_TAIL(&sc->clients, entry, list_e); UNLOCK(sc); return (0); } static int host1x_deregister_client(device_t dev, device_t client) { struct host1x_softc *sc; struct client_info *entry; sc = device_get_softc(dev); LOCK(sc); TAILQ_FOREACH(entry, &sc->clients, list_e) { if (entry->client == client) { if (entry->activated) panic("Tegra DRM: Attempt to deregister " "activated client"); TAILQ_REMOVE(&sc->clients, entry, list_e); free(entry, M_DEVBUF); UNLOCK(sc); return (0); } } UNLOCK(sc); return (0); } static void host1x_gen_intr(void *arg) { struct host1x_softc *sc; sc = (struct host1x_softc *)arg; LOCK(sc); UNLOCK(sc); } static void host1x_syncpt_intr(void *arg) { struct host1x_softc *sc; sc = (struct host1x_softc *)arg; LOCK(sc); UNLOCK(sc); } static void host1x_new_pass(device_t dev) { struct host1x_softc *sc; int rv, rid; phandle_t node; /* * We attach during BUS_PASS_BUS (because we must overcome simplebus), * but some of our FDT resources are not ready until BUS_PASS_DEFAULT */ sc = device_get_softc(dev); if (sc->attach_done || bus_current_pass < BUS_PASS_DEFAULT) { bus_generic_new_pass(dev); return; } sc->attach_done = 1; node = ofw_bus_get_node(dev); /* Allocate our IRQ resource. */ rid = 0; sc->syncpt_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->syncpt_irq_res == NULL) { device_printf(dev, "Cannot allocate interrupt.\n"); rv = ENXIO; goto fail; } rid = 1; sc->gen_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->gen_irq_res == NULL) { device_printf(dev, "Cannot allocate interrupt.\n"); rv = ENXIO; goto fail; } /* FDT resources */ rv = hwreset_get_by_ofw_name(sc->dev, 0, "host1x", &sc->reset); if (rv != 0) { device_printf(dev, "Cannot get fuse reset\n"); goto fail; } rv = clk_get_by_ofw_index(sc->dev, 0, 0, &sc->clk); if (rv != 0) { device_printf(dev, "Cannot get i2c clock: %d\n", rv); goto fail; } rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable clock: %d\n", rv); goto fail; } rv = hwreset_deassert(sc->reset); if (rv != 0) { device_printf(sc->dev, "Cannot clear reset\n"); goto fail; } /* Setup interrupts */ rv = bus_setup_intr(dev, sc->gen_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, host1x_gen_intr, sc, &sc->gen_irq_h); if (rv) { device_printf(dev, "Cannot setup gen interrupt.\n"); goto fail; } rv = bus_setup_intr(dev, sc->syncpt_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, host1x_syncpt_intr, sc, &sc->syncpt_irq_h); if (rv) { device_printf(dev, "Cannot setup syncpt interrupt.\n"); goto fail; } simplebus_init(dev, 0); for (node = OF_child(node); node > 0; node = OF_peer(node)) simplebus_add_device(dev, node, 0, NULL, -1, NULL); sc->irq_hook.ich_func = host1x_irq_hook; sc->irq_hook.ich_arg = sc; config_intrhook_establish(&sc->irq_hook); bus_generic_new_pass(dev); return; fail: device_detach(dev); return; } static int host1x_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); return (BUS_PROBE_DEFAULT); } static int host1x_attach(device_t dev) { int rv, rid; struct host1x_softc *sc; sc = device_get_softc(dev); sc->tegra_drm = malloc(sizeof(struct tegra_drm), DRM_MEM_DRIVER, M_WAITOK | M_ZERO); /* crosslink together all worlds */ sc->dev = dev; sc->tegra_drm->drm_dev.dev_private = &sc->tegra_drm; sc->tegra_drm->drm_dev.dev = dev; TAILQ_INIT(&sc->clients); LOCK_INIT(sc); /* Get the memory resource for the register mapping. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot map registers.\n"); rv = ENXIO; goto fail; } return (bus_generic_attach(dev)); fail: if (sc->tegra_drm != NULL) free(sc->tegra_drm, DRM_MEM_DRIVER); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (rv); } static int host1x_detach(device_t dev) { struct host1x_softc *sc; sc = device_get_softc(dev); host1x_drm_exit(sc); if (sc->gen_irq_h != NULL) bus_teardown_intr(dev, sc->gen_irq_res, sc->gen_irq_h); if (sc->tegra_drm != NULL) free(sc->tegra_drm, DRM_MEM_DRIVER); if (sc->clk != NULL) clk_release(sc->clk); if (sc->reset != NULL) hwreset_release(sc->reset); if (sc->syncpt_irq_h != NULL) bus_teardown_intr(dev, sc->syncpt_irq_res, sc->syncpt_irq_h); if (sc->gen_irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 1, sc->gen_irq_res); if (sc->syncpt_irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->syncpt_irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (bus_generic_detach(dev)); } static device_method_t host1x_methods[] = { /* Device interface */ DEVMETHOD(device_probe, host1x_probe), DEVMETHOD(device_attach, host1x_attach), DEVMETHOD(device_detach, host1x_detach), /* Bus interface */ DEVMETHOD(bus_new_pass, host1x_new_pass), /* Framebuffer service methods */ DEVMETHOD(fb_getinfo, host1x_fb_helper_getinfo), /* tegra drm interface */ DEVMETHOD(tegra_drm_register_client, host1x_register_client), DEVMETHOD(tegra_drm_deregister_client, host1x_deregister_client), DEVMETHOD_END }; DEFINE_CLASS_1(host1x, host1x_driver, host1x_methods, sizeof(struct host1x_softc), simplebus_driver); EARLY_DRIVER_MODULE(host1x, simplebus, host1x_driver, 0, 0, BUS_PASS_BUS); /* Bindings for fbd device. */ extern driver_t fbd_driver; DRIVER_MODULE(fbd, host1x, fbd_driver, 0, 0); diff --git a/sys/arm/nvidia/tegra124/tegra124_car.c b/sys/arm/nvidia/tegra124/tegra124_car.c index ebb30b29aaa9..440f5a5b1044 100644 --- a/sys/arm/nvidia/tegra124/tegra124_car.c +++ b/sys/arm/nvidia/tegra124/tegra124_car.c @@ -1,599 +1,599 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include #include #include #include #include #include "clkdev_if.h" #include "hwreset_if.h" #include "tegra124_car.h" static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-car", 1}, {NULL, 0}, }; #define PLIST(x) static const char *x[] /* Pure multiplexer. */ #define MUX(_id, cname, plists, o, s, w) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = plists, \ .clkdef.parent_cnt = nitems(plists), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = o, \ .shift = s, \ .width = w, \ } /* Fractional divider (7.1). */ #define DIV7_1(_id, cname, plist, o, s) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){plist}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = o, \ .i_shift = (s) + 1, \ .i_width = 7, \ .f_shift = s, \ .f_width = 1, \ } /* Integer divider. */ #define DIV(_id, cname, plist, o, s, w, f) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){plist}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = o, \ .i_shift = s, \ .i_width = w, \ .div_flags = f, \ } /* Gate in PLL block. */ #define GATE_PLL(_id, cname, plist, o, s) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){plist}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = o, \ .shift = s, \ .mask = 3, \ .on_value = 3, \ .off_value = 0, \ } /* Standard gate. */ #define GATE(_id, cname, plist, o, s) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){plist}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = o, \ .shift = s, \ .mask = 1, \ .on_value = 1, \ .off_value = 0, \ } /* Inverted gate. */ #define GATE_INV(_id, cname, plist, o, s) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){plist}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = o, \ .shift = s, \ .mask = 1, \ .on_value = 0, \ .off_value = 1, \ } /* Fixed rate clock. */ #define FRATE(_id, cname, _freq) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = NULL, \ .clkdef.parent_cnt = 0, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .freq = _freq, \ } /* Fixed rate multipier/divider. */ #define FACT(_id, cname, pname, _mult, _div) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){pname}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .mult = _mult, \ .div = _div, \ } static uint32_t osc_freqs[16] = { [0] = 13000000, [1] = 16800000, [4] = 19200000, [5] = 38400000, [8] = 12000000, [9] = 48000000, [12] = 260000000, }; /* Parent lists. */ PLIST(mux_pll_srcs) = {"osc_div_clk", NULL, "pllP_out0", NULL}; /* FIXME */ PLIST(mux_plle_src1) = {"osc_div_clk", "pllP_out0"}; PLIST(mux_plle_src) = {"pllE_src1", "pllREFE_out"}; PLIST(mux_plld_out0_plld2_out0) = {"pllD_out0", "pllD2_out0"}; PLIST(mux_xusb_hs) = {"xusb_ss_div2", "pllU_60"}; PLIST(mux_xusb_ss) = {"pc_xusb_ss", "osc_div_clk"}; /* Clocks adjusted online. */ static struct clk_fixed_def fixed_clk_m = FRATE(TEGRA124_CLK_CLK_M, "clk_m", 12000000); static struct clk_fixed_def fixed_osc_div_clk = FACT(0, "osc_div_clk", "clk_m", 1, 1); static struct clk_fixed_def tegra124_fixed_clks[] = { /* Core clocks. */ FRATE(0, "clk_s", 32768), FACT(0, "clk_m_div2", "clk_m", 1, 2), FACT(0, "clk_m_div4", "clk_m", 1, 3), FACT(0, "pllU_60", "pllU_out", 1, 8), FACT(0, "pllU_48", "pllU_out", 1, 10), FACT(0, "pllU_12", "pllU_out", 1, 40), FACT(TEGRA124_CLK_PLL_D_OUT0, "pllD_out0", "pllD_out", 1, 2), FACT(TEGRA124_CLK_PLL_D2_OUT0, "pllD2_out0", "pllD2_out", 1, 1), FACT(0, "pllX_out0", "pllX_out", 1, 2), FACT(0, "pllC_UD", "pllC_out0", 1, 1), FACT(0, "pllM_UD", "pllM_out0", 1, 1), /* Audio clocks. */ FRATE(0, "audio0", 10000000), FRATE(0, "audio1", 10000000), FRATE(0, "audio2", 10000000), FRATE(0, "audio3", 10000000), FRATE(0, "audio4", 10000000), FRATE(0, "ext_vimclk", 10000000), /* XUSB */ FACT(TEGRA124_CLK_XUSB_SS_DIV2, "xusb_ss_div2", "xusb_ss", 1, 2), }; static struct clk_mux_def tegra124_mux_clks[] = { /* Core clocks. */ MUX(0, "pllD2_src", mux_pll_srcs, PLLD2_BASE, 25, 2), MUX(0, "pllDP_src", mux_pll_srcs, PLLDP_BASE, 25, 2), MUX(0, "pllC4_src", mux_pll_srcs, PLLC4_BASE, 25, 2), MUX(0, "pllE_src1", mux_plle_src1, PLLE_AUX, 2, 1), MUX(0, "pllE_src", mux_plle_src, PLLE_AUX, 28, 1), /* Base peripheral clocks. */ MUX(0, "dsia_mux", mux_plld_out0_plld2_out0, PLLD_BASE, 25, 1), MUX(0, "dsib_mux", mux_plld_out0_plld2_out0, PLLD2_BASE, 25, 1), /* USB. */ MUX(TEGRA124_CLK_XUSB_HS_SRC, "xusb_hs", mux_xusb_hs, CLK_SOURCE_XUSB_SS, 25, 1), MUX(0, "xusb_ss_mux", mux_xusb_ss, CLK_SOURCE_XUSB_SS, 24, 1), }; static struct clk_gate_def tegra124_gate_clks[] = { /* Core clocks. */ GATE_PLL(0, "pllC_out1", "pllC_out1_div", PLLC_OUT, 0), GATE_PLL(0, "pllM_out1", "pllM_out1_div", PLLM_OUT, 0), GATE_PLL(TEGRA124_CLK_PLL_U_480M, "pllU_480", "pllU_out", PLLU_BASE, 22), GATE_PLL(0, "pllP_outX0", "pllP_outX0_div", PLLP_RESHIFT, 0), GATE_PLL(0, "pllP_out1", "pllP_out1_div", PLLP_OUTA, 0), GATE_PLL(0, "pllP_out2", "pllP_out2_div", PLLP_OUTA, 16), GATE_PLL(0, "pllP_out3", "pllP_out3_div", PLLP_OUTB, 0), GATE_PLL(0, "pllP_out4", "pllP_out4_div", PLLP_OUTB, 16), GATE_PLL(0, "pllP_out5", "pllP_out5_div", PLLP_OUTC, 16), GATE_PLL(0, "pllA_out0", "pllA_out1_div", PLLA_OUT, 0), /* Base peripheral clocks. */ GATE(TEGRA124_CLK_CML0, "cml0", "pllE_out0", PLLE_AUX, 0), GATE(TEGRA124_CLK_CML1, "cml1", "pllE_out0", PLLE_AUX, 1), GATE_INV(TEGRA124_CLK_HCLK, "hclk", "hclk_div", CLK_SYSTEM_RATE, 7), GATE_INV(TEGRA124_CLK_PCLK, "pclk", "pclk_div", CLK_SYSTEM_RATE, 3), }; static struct clk_div_def tegra124_div_clks[] = { /* Core clocks. */ DIV7_1(0, "pllC_out1_div", "pllC_out0", PLLC_OUT, 2), DIV7_1(0, "pllM_out1_div", "pllM_out0", PLLM_OUT, 8), DIV7_1(0, "pllP_outX0_div", "pllP_out0", PLLP_RESHIFT, 2), DIV7_1(0, "pllP_out1_div", "pllP_out0", PLLP_OUTA, 8), DIV7_1(0, "pllP_out2_div", "pllP_out0", PLLP_OUTA, 24), DIV7_1(0, "pllP_out3_div", "pllP_out0", PLLP_OUTB, 8), DIV7_1(0, "pllP_out4_div", "pllP_out0", PLLP_OUTB, 24), DIV7_1(0, "pllP_out5_div", "pllP_out0", PLLP_OUTC, 24), DIV7_1(0, "pllA_out1_div", "pllA_out", PLLA_OUT, 8), /* Base peripheral clocks. */ DIV(0, "hclk_div", "sclk", CLK_SYSTEM_RATE, 4, 2, 0), DIV(0, "pclk_div", "hclk", CLK_SYSTEM_RATE, 0, 2, 0), }; /* Initial setup table. */ static struct tegra124_init_item clk_init_table[] = { /* clock, partent, frequency, enable */ {"uarta", "pllP_out0", 408000000, 0}, {"uartb", "pllP_out0", 408000000, 0}, {"uartc", "pllP_out0", 408000000, 0}, {"uartd", "pllP_out0", 408000000, 0}, {"pllA_out", NULL, 282240000, 1}, {"pllA_out0", NULL, 11289600, 1}, {"extperiph1", "pllA_out0", 0, 1}, {"i2s0", "pllA_out0", 11289600, 0}, {"i2s1", "pllA_out0", 11289600, 0}, {"i2s2", "pllA_out0", 11289600, 0}, {"i2s3", "pllA_out0", 11289600, 0}, {"i2s4", "pllA_out0", 11289600, 0}, {"vde", "pllP_out0", 0, 0}, {"host1x", "pllP_out0", 136000000, 1}, {"sclk", "pllP_out2", 102000000, 1}, {"dvfs_soc", "pllP_out0", 51000000, 1}, {"dvfs_ref", "pllP_out0", 51000000, 1}, {"pllC_out0", NULL, 600000000, 0}, {"pllC_out1", NULL, 100000000, 0}, {"spi4", "pllP_out0", 12000000, 1}, {"tsec", "pllC3_out0", 0, 0}, {"msenc", "pllC3_out0", 0, 0}, {"pllREFE_out", NULL, 672000000, 0}, {"pc_xusb_ss", "pllU_480", 120000000, 0}, {"xusb_ss", "pc_xusb_ss", 120000000, 0}, {"pc_xusb_fs", "pllU_48", 48000000, 0}, {"xusb_hs", "pllU_60", 60000000, 0}, {"pc_xusb_falcon", "pllREFE_out", 224000000, 0}, {"xusb_core_host", "pllREFE_out", 112000000, 0}, {"sata", "pllP_out0", 102000000, 0}, {"sata_oob", "pllP_out0", 204000000, 0}, {"sata_cold", NULL, 0, 1}, {"emc", NULL, 0, 1}, {"mselect", NULL, 0, 1}, {"csite", NULL, 0, 1}, {"tsensor", "clk_m", 400000, 0}, /* tegra124 only*/ {"soc_therm", "pllP_out0", 51000000, 0}, {"cclk_g", NULL, 0, 1}, {"hda", "pllP_out0", 102000000, 0}, {"hda2codec_2x", "pllP_out0", 48000000, 0}, }; static void init_divs(struct tegra124_car_softc *sc, struct clk_div_def *clks, int nclks) { int i, rv; for (i = 0; i < nclks; i++) { rv = clknode_div_register(sc->clkdom, clks + i); if (rv != 0) panic("clk_div_register failed"); } } static void init_gates(struct tegra124_car_softc *sc, struct clk_gate_def *clks, int nclks) { int i, rv; for (i = 0; i < nclks; i++) { rv = clknode_gate_register(sc->clkdom, clks + i); if (rv != 0) panic("clk_gate_register failed"); } } static void init_muxes(struct tegra124_car_softc *sc, struct clk_mux_def *clks, int nclks) { int i, rv; for (i = 0; i < nclks; i++) { rv = clknode_mux_register(sc->clkdom, clks + i); if (rv != 0) panic("clk_mux_register failed"); } } static void init_fixeds(struct tegra124_car_softc *sc, struct clk_fixed_def *clks, int nclks) { int i, rv; uint32_t val; int osc_idx; CLKDEV_READ_4(sc->dev, OSC_CTRL, &val); osc_idx = val >> OSC_CTRL_OSC_FREQ_SHIFT; fixed_clk_m.freq = osc_freqs[osc_idx]; if (fixed_clk_m.freq == 0) panic("Undefined input frequency"); rv = clknode_fixed_register(sc->clkdom, &fixed_clk_m); if (rv != 0) panic("clk_fixed_register failed"); val = (val >> OSC_CTRL_PLL_REF_DIV_SHIFT) & 3; fixed_osc_div_clk.div = 1 << val; rv = clknode_fixed_register(sc->clkdom, &fixed_osc_div_clk); if (rv != 0) panic("clk_fixed_register failed"); for (i = 0; i < nclks; i++) { rv = clknode_fixed_register(sc->clkdom, clks + i); if (rv != 0) panic("clk_fixed_register failed"); } } static void postinit_clock(struct tegra124_car_softc *sc) { int i; struct tegra124_init_item *tbl; struct clknode *clknode; int rv; for (i = 0; i < nitems(clk_init_table); i++) { tbl = &clk_init_table[i]; clknode = clknode_find_by_name(tbl->name); if (clknode == NULL) { device_printf(sc->dev, "Cannot find clock %s\n", tbl->name); continue; } if (tbl->parent != NULL) { rv = clknode_set_parent_by_name(clknode, tbl->parent); if (rv != 0) { device_printf(sc->dev, "Cannot set parent for %s (to %s): %d\n", tbl->name, tbl->parent, rv); continue; } } if (tbl->frequency != 0) { rv = clknode_set_freq(clknode, tbl->frequency, 0 , 9999); if (rv != 0) { device_printf(sc->dev, "Cannot set frequency for %s: %d\n", tbl->name, rv); continue; } } if (tbl->enable!= 0) { rv = clknode_enable(clknode); if (rv != 0) { device_printf(sc->dev, "Cannot enable %s: %d\n", tbl->name, rv); continue; } } } } static void register_clocks(device_t dev) { struct tegra124_car_softc *sc; sc = device_get_softc(dev); sc->clkdom = clkdom_create(dev); if (sc->clkdom == NULL) panic("clkdom == NULL"); tegra124_init_plls(sc); init_fixeds(sc, tegra124_fixed_clks, nitems(tegra124_fixed_clks)); init_muxes(sc, tegra124_mux_clks, nitems(tegra124_mux_clks)); init_divs(sc, tegra124_div_clks, nitems(tegra124_div_clks)); init_gates(sc, tegra124_gate_clks, nitems(tegra124_gate_clks)); tegra124_periph_clock(sc); tegra124_super_mux_clock(sc); clkdom_finit(sc->clkdom); clkdom_xlock(sc->clkdom); postinit_clock(sc); clkdom_unlock(sc->clkdom); if (bootverbose) clkdom_dump(sc->clkdom); } static int tegra124_car_clkdev_read_4(device_t dev, bus_addr_t addr, uint32_t *val) { struct tegra124_car_softc *sc; sc = device_get_softc(dev); *val = bus_read_4(sc->mem_res, addr); return (0); } static int tegra124_car_clkdev_write_4(device_t dev, bus_addr_t addr, uint32_t val) { struct tegra124_car_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->mem_res, addr, val); return (0); } static int tegra124_car_clkdev_modify_4(device_t dev, bus_addr_t addr, uint32_t clear_mask, uint32_t set_mask) { struct tegra124_car_softc *sc; uint32_t reg; sc = device_get_softc(dev); reg = bus_read_4(sc->mem_res, addr); reg &= ~clear_mask; reg |= set_mask; bus_write_4(sc->mem_res, addr, reg); return (0); } static void tegra124_car_clkdev_device_lock(device_t dev) { struct tegra124_car_softc *sc; sc = device_get_softc(dev); mtx_lock(&sc->mtx); } static void tegra124_car_clkdev_device_unlock(device_t dev) { struct tegra124_car_softc *sc; sc = device_get_softc(dev); mtx_unlock(&sc->mtx); } static int tegra124_car_detach(device_t dev) { device_printf(dev, "Error: Clock driver cannot be detached\n"); return (EBUSY); } static int tegra124_car_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data != 0) { device_set_desc(dev, "Tegra Clock Driver"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int tegra124_car_attach(device_t dev) { struct tegra124_car_softc *sc = device_get_softc(dev); int rid, rv; sc->dev = dev; mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; /* Resource setup. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->mem_res) { device_printf(dev, "cannot allocate memory resource\n"); rv = ENXIO; goto fail; } register_clocks(dev); hwreset_register_ofw_provider(dev); return (0); fail: if (sc->mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (rv); } static int tegra124_car_hwreset_assert(device_t dev, intptr_t id, bool value) { struct tegra124_car_softc *sc = device_get_softc(dev); return (tegra124_hwreset_by_idx(sc, id, value)); } static device_method_t tegra124_car_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra124_car_probe), DEVMETHOD(device_attach, tegra124_car_attach), DEVMETHOD(device_detach, tegra124_car_detach), /* Clkdev interface*/ DEVMETHOD(clkdev_read_4, tegra124_car_clkdev_read_4), DEVMETHOD(clkdev_write_4, tegra124_car_clkdev_write_4), DEVMETHOD(clkdev_modify_4, tegra124_car_clkdev_modify_4), DEVMETHOD(clkdev_device_lock, tegra124_car_clkdev_device_lock), DEVMETHOD(clkdev_device_unlock, tegra124_car_clkdev_device_unlock), /* Reset interface */ DEVMETHOD(hwreset_assert, tegra124_car_hwreset_assert), DEVMETHOD_END }; static DEFINE_CLASS_0(car, tegra124_car_driver, tegra124_car_methods, sizeof(struct tegra124_car_softc)); EARLY_DRIVER_MODULE(tegra124_car, simplebus, tegra124_car_driver, NULL, NULL, BUS_PASS_TIMER); diff --git a/sys/arm/nvidia/tegra124/tegra124_clk_per.c b/sys/arm/nvidia/tegra124/tegra124_clk_per.c index 95af7a75b21b..90edc6d3a8ec 100644 --- a/sys/arm/nvidia/tegra124/tegra124_clk_per.c +++ b/sys/arm/nvidia/tegra124/tegra124_clk_per.c @@ -1,841 +1,841 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include -#include +#include #include #include "tegra124_car.h" /* The TEGRA124_CLK_XUSB_GATE is missing in current * DT bindings, define it localy */ #ifdef TEGRA124_CLK_XUSB_GATE #error "TEGRA124_CLK_XUSB_GATE is now defined, revisit XUSB code!" #else #define TEGRA124_CLK_XUSB_GATE 143 #endif /* Bits in base register. */ #define PERLCK_AMUX_MASK 0x0F #define PERLCK_AMUX_SHIFT 16 #define PERLCK_AMUX_DIS (1 << 20) #define PERLCK_UDIV_DIS (1 << 24) #define PERLCK_ENA_MASK (1 << 28) #define PERLCK_MUX_SHIFT 29 #define PERLCK_MUX_MASK 0x07 struct periph_def { struct clknode_init_def clkdef; uint32_t base_reg; uint32_t div_width; uint32_t div_mask; uint32_t div_f_width; uint32_t div_f_mask; uint32_t flags; }; struct pgate_def { struct clknode_init_def clkdef; uint32_t idx; uint32_t flags; }; #define PLIST(x) static const char *x[] #define GATE(_id, cname, plist, _idx) \ { \ .clkdef.id = TEGRA124_CLK_##_id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){plist}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .idx = _idx, \ .flags = 0, \ } /* Sources for multiplexors. */ PLIST(mux_a_N_audio_N_p_N_clkm) = {"pllA_out0", NULL, "audio", NULL, "pllP_out0", NULL, "clk_m"}; PLIST(mux_a_N_audio0_N_p_N_clkm) = {"pllA_out0", NULL, "audio0", NULL, "pllP_out0", NULL, "clk_m"}; PLIST(mux_a_N_audio1_N_p_N_clkm) = {"pllA_out0", NULL, "audio1", NULL, "pllP_out0", NULL, "clk_m"}; PLIST(mux_a_N_audio2_N_p_N_clkm) = {"pllA_out0", NULL, "audio2", NULL, "pllP_out0", NULL, "clk_m"}; PLIST(mux_a_N_audio3_N_p_N_clkm) = {"pllA_out0", NULL, "audio3", NULL, "pllP_out0", NULL, "clk_m"}; PLIST(mux_a_N_audio4_N_p_N_clkm) = {"pllA_out0", NULL, "audio4", NULL, "pllP_out0", NULL, "clk_m"}; PLIST(mux_a_clks_p_clkm_e) = {"pllA_out0", "clk_s", "pllP_out0", "clk_m", "pllE_out0"}; PLIST(mux_a_c2_c_c3_p_N_clkm) = {"pllA_out0", "pllC2_out0", "pllC_out0", "pllC3_out0", "pllP_out0", NULL, "clk_m"}; PLIST(mux_m_c_p_a_c2_c3) = {"pllM_out0", "pllC_out0", "pllP_out0", "pllA_out0", "pllC2_out0", "pllC3_out0"}; PLIST(mux_m_c_p_a_c2_c3_clkm) = {"pllM_out0", "pllC_out0", "pllP_out0", "pllA_out0", "pllC2_out0", "pllC3_out0", "clk_m"}; PLIST(mux_m_c_p_a_c2_c3_clkm_c4) = {"pllM_out0", "pllC_out0", "pllP_out0", "pllA_out0", "pllC2_out0", "pllC3_out0", "clk_m", "pllC4_out0"}; PLIST(mux_m_c_p_clkm_mud_c2_c3) = {"pllM_out0", "pllC_out0", "pllP_out0", "clk_m", "pllM_UD", "pllC2_out0", "pllC3_out0"}; PLIST(mux_m_c_p_clkm_mud_c2_c3_cud) = {"pllM_out0", "pllC_out0", "pllP_out0", "clk_m", "pllM_UD", "pllC2_out0", "pllC3_out0", "pllC_UD"}; PLIST(mux_m_c2_c_c3_p_N_a) = {"pllM_out0", "pllC2_out0", "pllC_out0", "pllC3_out0", "pllP_out0", NULL, "pllA_out0"}; PLIST(mux_m_c2_c_c3_p_N_a_c4) = {"pllM_out0", "pllC2_out0", "pllC_out0", "pllC3_out0", NULL, "pllA_out0", "pllC4_out0"}; PLIST(mux_p_N_c_N_N_N_clkm) = {"pllP_out0", NULL, "pllC_out0", NULL, NULL, NULL, "clk_m"}; PLIST(mux_p_N_c_N_m_N_clkm) = {"pllP_out0", NULL, "pllC_out0", NULL, "pllM_out0", NULL, "clk_m"}; PLIST(mux_p_c_c2_clkm) = {"pllP_out0", "pllC_out0", "pllC2_out0", "clk_m"}; PLIST(mux_p_c2_c_c3_m) = {"pllP_out0", "pllC2_out0", "pllC_out0", "pllC3_out0", "pllM_out0"}; PLIST(mux_p_c2_c_c3_m_N_clkm) = {"pllP_out0", "pllC2_out0", "pllC_out0", "pllC3_out0", "pllM_out0", NULL, "clk_m"}; PLIST(mux_p_c2_c_c3_m_e_clkm) = {"pllP_out0", "pllC2_out0", "pllC_out0", "pllC3_out0", "pllM_out0", "pllE_out0", "clk_m"}; PLIST(mux_p_c2_c_c3_m_a_clkm) = {"pllP_out0", "pllC2_out0", "pllC_out0", "pllC3_out0", "pllM_out0", "pllA_out0", "clk_m"}; PLIST(mux_p_c2_c_c3_m_clks_clkm) = {"pllP_out0", "pllC2_out0", "pllC_out0", "pllC3_out0", "pllM_out0", "clk_s", "clk_m"}; PLIST(mux_p_c2_c_c3_clks_N_clkm) = {"pllP_out0", "pllC2_out0", "pllC_out0", "pllC3_out0", "clk_s", NULL, "clk_m"}; PLIST(mux_p_c2_c_c3_clkm_N_clks) = {"pllP_out0", "pllC2_out0", "pllC_out0", "pllC3_out0", "clk_m", NULL, "clk_s"}; PLIST(mux_p_clkm_clks_E) = {"pllP_out0", "clk_m", "clk_s", "pllE_out0"}; PLIST(mux_p_m_d_a_c_d2_clkm) = {"pllP_out0", "pllM_out0", "pllD_out0", "pllA_out0", "pllC_out0", "pllD2_out0", "clk_m"}; PLIST(mux_clkm_N_u48_N_p_N_u480) = {"clk_m", NULL, "pllU_48", NULL, "pllP_out0", NULL, "pllU_480"}; PLIST(mux_clkm_p_c2_c_c3_refre) = {"clk_m", "pllP_out0", "pllC2_out0", "pllC_out0", "pllC3_out0", "pllREFE_out"}; PLIST(mux_clkm_refe_clks_u480_c_c2_c3_oscdiv) = {"clk_m", "pllREFE_out", "clk_s", "pllU_480", "pllC_out0", "pllC2_out0", "pllC3_out0", "osc_div_clk"}; PLIST(mux_sep_audio) = {"pllA_out0", "pllC2_out0", "pllC_out0", "pllC3_out0", "pllP_out0", NULL, "clk_m", NULL, "spdif_in", "i2s0", "i2s1", "i2s2", "i2s4", "pllA_out0", "ext_vimclk"}; static uint32_t clk_enable_reg[] = { CLK_OUT_ENB_L, CLK_OUT_ENB_H, CLK_OUT_ENB_U, CLK_OUT_ENB_V, CLK_OUT_ENB_W, CLK_OUT_ENB_X, }; static uint32_t clk_reset_reg[] = { RST_DEVICES_L, RST_DEVICES_H, RST_DEVICES_U, RST_DEVICES_V, RST_DEVICES_W, RST_DEVICES_X, }; #define L(n) ((0 * 32) + (n)) #define H(n) ((1 * 32) + (n)) #define U(n) ((2 * 32) + (n)) #define V(n) ((3 * 32) + (n)) #define W(n) ((4 * 32) + (n)) #define X(n) ((5 * 32) + (n)) static struct pgate_def pgate_def[] = { /* bank L -> 0-31 */ /* GATE(CPU, "cpu", "clk_m", L(0)), */ GATE(ISPB, "ispb", "clk_m", L(3)), GATE(RTC, "rtc", "clk_s", L(4)), GATE(TIMER, "timer", "clk_m", L(5)), GATE(UARTA, "uarta", "pc_uarta" , L(6)), GATE(UARTB, "uartb", "pc_uartb", L(7)), GATE(VFIR, "vfir", "pc_vfir", L(7)), /* GATE(GPIO, "gpio", "clk_m", L(8)), */ GATE(SDMMC2, "sdmmc2", "pc_sdmmc2", L(9)), GATE(SPDIF_OUT, "spdif_out", "pc_spdif_out", L(10)), GATE(SPDIF_IN, "spdif_in", "pc_spdif_in", L(10)), GATE(I2S1, "i2s1", "pc_i2s1", L(11)), GATE(I2C1, "i2c1", "pc_i2c1", L(12)), GATE(SDMMC1, "sdmmc1", "pc_sdmmc1", L(14)), GATE(SDMMC4, "sdmmc4", "pc_sdmmc4", L(15)), GATE(PWM, "pwm", "pc_pwm", L(17)), GATE(I2S2, "i2s2", "pc_i2s2", L(18)), GATE(VI, "vi", "pc_vi", L(20)), GATE(USBD, "usbd", "clk_m", L(22)), GATE(ISP, "isp", "pc_isp", L(23)), GATE(DISP2, "disp2", "pc_disp2", L(26)), GATE(DISP1, "disp1", "pc_disp1", L(27)), GATE(HOST1X, "host1x", "pc_host1x", L(28)), GATE(VCP, "vcp", "clk_m", L(29)), GATE(I2S0, "i2s0", "pc_i2s0", L(30)), /* GATE(CACHE2, "ccache2", "clk_m", L(31)), */ /* bank H -> 32-63 */ GATE(MC, "mem", "clk_m", H(0)), /* GATE(AHBDMA, "ahbdma", "clk_m", H(1)), */ GATE(APBDMA, "apbdma", "clk_m", H(2)), GATE(KBC, "kbc", "clk_s", H(4)), /* GATE(STAT_MON, "stat_mon", "clk_s", H(5)), */ /* GATE(PMC, "pmc", "clk_s", H(6)), */ GATE(FUSE, "fuse", "clk_m", H(7)), GATE(KFUSE, "kfuse", "clk_m", H(8)), GATE(SBC1, "spi1", "pc_spi1", H(9)), GATE(NOR, "snor", "pc_snor", H(10)), /* GATE(JTAG2TBC, "jtag2tbc", "clk_m", H(11)), */ GATE(SBC2, "spi2", "pc_spi2", H(12)), GATE(SBC3, "spi3", "pc_spi3", H(14)), GATE(I2C5, "i2c5", "pc_i2c5", H(15)), GATE(DSIA, "dsia", "dsia_mux", H(16)), GATE(MIPI, "hsi", "pc_hsi", H(18)), GATE(HDMI, "hdmi", "pc_hdmi", H(19)), GATE(CSI, "csi", "pllP_out3", H(20)), GATE(I2C2, "i2c2", "pc_i2c2", H(22)), GATE(UARTC, "uartc", "pc_uartc", H(23)), GATE(MIPI_CAL, "mipi_cal", "clk_m", H(24)), GATE(EMC, "emc", "pc_emc_2x", H(25)), GATE(USB2, "usb2", "clk_m", H(26)), GATE(USB3, "usb3", "clk_m", H(27)), GATE(VDE, "vde", "pc_vde", H(29)), GATE(BSEA, "bsea", "clk_m", H(30)), GATE(BSEV, "bsev", "clk_m", H(31)), /* bank U -> 64-95 */ GATE(UARTD, "uartd", "pc_uartd", U(1)), GATE(I2C3, "i2c3", "pc_i2c3", U(3)), GATE(SBC4, "spi4", "pc_spi4", U(4)), GATE(SDMMC3, "sdmmc3", "pc_sdmmc3", U(5)), GATE(PCIE, "pcie", "clk_m", U(6)), GATE(OWR, "owr", "pc_owr", U(7)), GATE(AFI, "afi", "clk_m", U(8)), GATE(CSITE, "csite", "pc_csite", U(9)), /* GATE(AVPUCQ, "avpucq", clk_m, U(11)), */ GATE(TRACE, "traceclkin", "pc_traceclkin", U(13)), GATE(SOC_THERM, "soc_therm", "pc_soc_therm", U(14)), GATE(DTV, "dtv", "clk_m", U(15)), GATE(I2CSLOW, "i2c_slow", "pc_i2c_slow", U(17)), GATE(DSIB, "dsib", "dsib_mux", U(18)), GATE(TSEC, "tsec", "pc_tsec", U(19)), /* GATE(IRAMA, "irama", "clk_m", U(20)), */ /* GATE(IRAMB, "iramb", "clk_m", U(21)), */ /* GATE(IRAMC, "iramc", "clk_m", U(22)), */ /* GATE(IRAMD, "iramd", "clk_m", U(23)), */ /* GATE(CRAM2, "cram2", "clk_m", U(24)), */ GATE(XUSB_HOST, "xusb_core_host", "pc_xusb_core_host", U(25)), /* GATE(M_DOUBLER, "m_doubler", "clk_m", U(26)), */ GATE(MSENC, "msenc", "pc_msenc", U(27)), GATE(CSUS, "sus_out", "clk_m", U(28)), /* GATE(DEVD2_OUT, "devd2_out", "clk_m", U(29)), */ /* GATE(DEVD1_OUT, "devd1_out", "clk_m", U(30)), */ GATE(XUSB_DEV, "xusb_core_dev", "pc_xusb_core_dev", U(31)), /* bank V -> 96-127 */ /* GATE(CPUG, "cpug", "clk_m", V(0)), */ /* GATE(CPULP, "cpuLP", "clk_m", V(1)), */ GATE(MSELECT, "mselect", "pc_mselect", V(3)), GATE(TSENSOR, "tsensor", "pc_tsensor", V(4)), GATE(I2S3, "i2s3", "pc_i2s3", V(5)), GATE(I2S4, "i2s4", "pc_i2s4", V(6)), GATE(I2C4, "i2c4", "pc_i2c4", V(7)), GATE(SBC5, "spi5", "pc_spi5", V(8)), GATE(SBC6, "spi6", "pc_spi6", V(9)), GATE(D_AUDIO, "audio", "pc_audio", V(10)), GATE(APBIF, "apbif", "clk_m", V(11)), GATE(DAM0, "dam0", "pc_dam0", V(12)), GATE(DAM1, "dam1", "pc_dam1", V(13)), GATE(DAM2, "dam2", "pc_dam2", V(14)), GATE(HDA2CODEC_2X, "hda2codec_2x", "pc_hda2codec_2x", V(15)), /* GATE(ATOMICS, "atomics", "clk_m", V(16)), */ /* GATE(SPDIF_DOUBLER, "spdif_doubler", "clk_m", V(22)), */ GATE(ACTMON, "actmon", "pc_actmon", V(23)), GATE(EXTERN1, "extperiph1", "pc_extperiph1", V(24)), GATE(EXTERN2, "extperiph2", "pc_extperiph2", V(25)), GATE(EXTERN3, "extperiph3", "pc_extperiph3", V(26)), GATE(SATA_OOB, "sata_oob", "pc_sata_oob", V(27)), GATE(SATA, "sata", "pc_sata", V(28)), GATE(HDA, "hda", "pc_hda", V(29)), /* bank W -> 128-159*/ GATE(HDA2HDMI, "hda2hdmi", "clk_m", W(0)), GATE(SATA_COLD, "sata_cold", "clk_m", W(1)), /* Reset only */ /* GATE(PCIERX0, "pcierx0", "clk_m", W(2)), */ /* GATE(PCIERX1, "pcierx1", "clk_m", W(3)), */ /* GATE(PCIERX2, "pcierx2", "clk_m", W(4)), */ /* GATE(PCIERX3, "pcierx3", "clk_m", W(5)), */ /* GATE(PCIERX4, "pcierx4", "clk_m", W(6)), */ /* GATE(PCIERX5, "pcierx5", "clk_m", W(7)), */ /* GATE(CEC, "cec", "clk_m", W(8)), */ /* GATE(PCIE2_IOBIST, "pcie2_iobist", "clk_m", W(9)), */ /* GATE(EMC_IOBIST, "emc_iobist", "clk_m", W(10)), */ /* GATE(HDMI_IOBIST, "hdmi_iobist", "clk_m", W(11)), */ /* GATE(SATA_IOBIST, "sata_iobist", "clk_m", W(12)), */ /* GATE(MIPI_IOBIST, "mipi_iobist", "clk_m", W(13)), */ GATE(XUSB_GATE, "xusb_gate", "clk_m", W(15)), GATE(CILAB, "cilab", "pc_cilab", W(16)), GATE(CILCD, "cilcd", "pc_cilcd", W(17)), GATE(CILE, "cile", "pc_cile", W(18)), GATE(DSIALP, "dsia_lp", "pc_dsia_lp", W(19)), GATE(DSIBLP, "dsib_lp", "pc_dsib_lp", W(20)), GATE(ENTROPY, "entropy", "pc_entropy", W(21)), GATE(AMX, "amx", "pc_amx", W(25)), GATE(ADX, "adx", "pc_adx", W(26)), GATE(DFLL_REF, "dvfs_ref", "pc_dvfs_ref", W(27)), GATE(DFLL_SOC, "dvfs_soc", "pc_dvfs_soc", W(27)), GATE(XUSB_SS, "xusb_ss", "xusb_ss_mux", W(28)), /* GATE(EMC_LATENCY, "emc_latency", "pc_emc_latency", W(29)), */ /* bank X -> 160-191*/ /* GATE(SPARE, "spare", "clk_m", X(0)), */ /* GATE(CAM_MCLK, "CAM_MCLK", "clk_m", X(4)), */ /* GATE(CAM_MCLK2, "CAM_MCLK2", "clk_m", X(5)), */ GATE(I2C6, "i2c6", "pc_i2c6", X(6)), GATE(VIM2_CLK, "vim2_clk", "clk_m", X(11)), /* GATE(EMC_DLL, "emc_dll", "pc_emc_dll", X(14)), */ GATE(HDMI_AUDIO, "hdmi_audio", "pc_hdmi_audio", X(16)), GATE(CLK72MHZ, "clk72mhz", "pc_clk72mhz", X(17)), GATE(VIC03, "vic", "pc_vic", X(18)), GATE(ADX1, "adx1", "pc_adx1", X(20)), GATE(DPAUX, "dpaux", "clk_m", X(21)), GATE(SOR0_LVDS, "sor0", "pc_sor0", X(22)), GATE(GPU, "gpu", "osc_div_clk", X(24)), GATE(AMX1, "amx1", "pc_amx1", X(26)), }; /* Peripheral clock clock */ #define DCF_HAVE_MUX 0x0100 /* Block with multipexor */ #define DCF_HAVE_ENA 0x0200 /* Block with enable bit */ #define DCF_HAVE_DIV 0x0400 /* Block with divider */ /* Mark block with additional bits / functionality. */ #define DCF_IS_MASK 0x00FF #define DCF_IS_UART 0x0001 #define DCF_IS_VI 0x0002 #define DCF_IS_HOST1X 0x0003 #define DCF_IS_XUSB_SS 0x0004 #define DCF_IS_EMC_DLL 0x0005 #define DCF_IS_SATA 0x0006 #define DCF_IS_VIC 0x0007 #define DCF_IS_AUDIO 0x0008 #define DCF_IS_SOR0 0x0009 #define DCF_IS_EMC 0x000A /* Basic pheripheral clock */ #define PER_CLK(_id, cn, pl, r, diw, fiw, f) \ { \ .clkdef.id = _id, \ .clkdef.name = cn, \ .clkdef.parent_names = pl, \ .clkdef.parent_cnt = nitems(pl), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .base_reg = r, \ .div_width = diw, \ .div_f_width = fiw, \ .flags = f, \ } /* Mux with fractional 8.1 divider. */ #define CLK_8_1(id, cn, pl, r, f) \ PER_CLK(id, cn, pl, r, 8, 1, (f) | DCF_HAVE_MUX | DCF_HAVE_DIV) /* Mux with fractional 16.1 divider. */ #define CLK16_1(id, cn, pl, r, f) \ PER_CLK(id, cn, pl, r, 16, 1, (f) | DCF_HAVE_MUX | DCF_HAVE_DIV) /* Mux with integer 16bits divider. */ #define CLK16_0(id, cn, pl, r, f) \ PER_CLK(id, cn, pl, r, 16, 0, (f) | DCF_HAVE_MUX | DCF_HAVE_DIV) /* Mux wihout divider. */ #define CLK_0_0(id, cn, pl, r, f) \ PER_CLK(id, cn, pl, r, 0, 0, (f) | DCF_HAVE_MUX) static struct periph_def periph_def[] = { CLK_8_1(0, "pc_i2s1", mux_a_N_audio1_N_p_N_clkm, CLK_SOURCE_I2S1, DCF_HAVE_ENA), CLK_8_1(0, "pc_i2s2", mux_a_N_audio2_N_p_N_clkm, CLK_SOURCE_I2S2, DCF_HAVE_ENA), CLK_8_1(0, "pc_spdif_out", mux_a_N_audio_N_p_N_clkm, CLK_SOURCE_SPDIF_OUT, 0), CLK_8_1(0, "pc_spdif_in", mux_p_c2_c_c3_m, CLK_SOURCE_SPDIF_IN, 0), CLK_8_1(0, "pc_pwm", mux_p_c2_c_c3_clks_N_clkm, CLK_SOURCE_PWM, 0), CLK_8_1(0, "pc_spi2", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_SPI2, 0), CLK_8_1(0, "pc_spi3", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_SPI3, 0), CLK16_0(0, "pc_i2c5", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_I2C5, 0), CLK16_0(0, "pc_i2c1", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_I2C1, 0), CLK_8_1(0, "pc_spi1", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_SPI1, 0), CLK_0_0(0, "pc_disp1", mux_p_m_d_a_c_d2_clkm, CLK_SOURCE_DISP1, 0), CLK_0_0(0, "pc_disp2", mux_p_m_d_a_c_d2_clkm, CLK_SOURCE_DISP2, 0), CLK_8_1(0, "pc_isp", mux_m_c_p_a_c2_c3_clkm_c4, CLK_SOURCE_ISP, 0), CLK_8_1(0, "pc_vi", mux_m_c2_c_c3_p_N_a_c4, CLK_SOURCE_VI, DCF_IS_VI), CLK_8_1(0, "pc_sdmmc1", mux_p_c2_c_c3_m_e_clkm, CLK_SOURCE_SDMMC1, 0), CLK_8_1(0, "pc_sdmmc2", mux_p_c2_c_c3_m_e_clkm, CLK_SOURCE_SDMMC2, 0), CLK_8_1(0, "pc_sdmmc4", mux_p_c2_c_c3_m_e_clkm, CLK_SOURCE_SDMMC4, 0), CLK_8_1(0, "pc_vfir", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_VFIR, 0), CLK_8_1(0, "pc_hsi", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_HSI, 0), CLK16_1(0, "pc_uarta", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_UARTA, DCF_IS_UART), CLK16_1(0, "pc_uartb", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_UARTB, DCF_IS_UART), CLK_8_1(0, "pc_host1x", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_HOST1X, DCF_IS_HOST1X), CLK_8_1(0, "pc_hdmi", mux_p_m_d_a_c_d2_clkm, CLK_SOURCE_HDMI, 0), CLK16_0(0, "pc_i2c2", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_I2C2, 0), CLK_8_1(0, "pc_emc_2x", mux_m_c_p_clkm_mud_c2_c3_cud, CLK_SOURCE_EMC, DCF_IS_EMC), CLK16_1(0, "pc_uartc", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_UARTC, DCF_IS_UART), CLK_8_1(0, "pc_vi_sensor", mux_m_c2_c_c3_p_N_a, CLK_SOURCE_VI_SENSOR, 0), CLK_8_1(0, "pc_spi4", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_SPI4, 0), CLK16_0(0, "pc_i2c3", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_I2C3, 0), CLK_8_1(0, "pc_sdmmc3", mux_p_c2_c_c3_m_e_clkm, CLK_SOURCE_SDMMC3, 0), CLK16_1(0, "pc_uartd", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_UARTD, DCF_IS_UART), CLK_8_1(0, "pc_vde", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_VDE, 0), CLK_8_1(0, "pc_owr", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_OWR, 0), CLK_8_1(0, "pc_snor", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_NOR, 0), CLK_8_1(0, "pc_csite", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_CSITE, 0), CLK_8_1(0, "pc_i2s0", mux_a_N_audio0_N_p_N_clkm, CLK_SOURCE_I2S0, 0), /* DTV xxx */ CLK_8_1(0, "pc_msenc", mux_m_c2_c_c3_p_N_a, CLK_SOURCE_MSENC, 0), CLK_8_1(0, "pc_tsec", mux_p_c2_c_c3_m_a_clkm, CLK_SOURCE_TSEC, 0), /* SPARE2 */ CLK_8_1(0, "pc_mselect", mux_p_c2_c_c3_m_clks_clkm, CLK_SOURCE_MSELECT, 0), CLK_8_1(0, "pc_tsensor", mux_p_c2_c_c3_clkm_N_clks, CLK_SOURCE_TSENSOR, 0), CLK_8_1(0, "pc_i2s3", mux_a_N_audio3_N_p_N_clkm, CLK_SOURCE_I2S3, DCF_HAVE_ENA), CLK_8_1(0, "pc_i2s4", mux_a_N_audio4_N_p_N_clkm, CLK_SOURCE_I2S4, DCF_HAVE_ENA), CLK16_0(0, "pc_i2c4", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_I2C4, 0), CLK_8_1(0, "pc_spi5", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_SPI5, 0), CLK_8_1(0, "pc_spi6", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_SPI6, 0), CLK_8_1(0, "pc_audio", mux_sep_audio, CLK_SOURCE_AUDIO, DCF_IS_AUDIO), CLK_8_1(0, "pc_dam0", mux_sep_audio, CLK_SOURCE_DAM0, DCF_IS_AUDIO), CLK_8_1(0, "pc_dam1", mux_sep_audio, CLK_SOURCE_DAM1, DCF_IS_AUDIO), CLK_8_1(0, "pc_dam2", mux_sep_audio, CLK_SOURCE_DAM2, DCF_IS_AUDIO), CLK_8_1(0, "pc_hda2codec_2x", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_HDA2CODEC_2X, 0), CLK_8_1(0, "pc_actmon", mux_p_c2_c_c3_clks_N_clkm, CLK_SOURCE_ACTMON, 0), CLK_8_1(0, "pc_extperiph1", mux_a_clks_p_clkm_e, CLK_SOURCE_EXTPERIPH1, 0), CLK_8_1(0, "pc_extperiph2", mux_a_clks_p_clkm_e, CLK_SOURCE_EXTPERIPH2, 0), CLK_8_1(0, "pc_extperiph3", mux_a_clks_p_clkm_e, CLK_SOURCE_EXTPERIPH3, 0), CLK_8_1(0, "pc_i2c_slow", mux_p_c2_c_c3_clks_N_clkm, CLK_SOURCE_I2C_SLOW, 0), /* SYS */ CLK_8_1(0, "pc_sor0", mux_p_m_d_a_c_d2_clkm, CLK_SOURCE_SOR0, DCF_IS_SOR0), CLK_8_1(0, "pc_sata_oob", mux_p_N_c_N_m_N_clkm, CLK_SOURCE_SATA_OOB, 0), CLK_8_1(0, "pc_sata", mux_p_N_c_N_m_N_clkm, CLK_SOURCE_SATA, DCF_IS_SATA), CLK_8_1(0, "pc_hda", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_HDA, 0), CLK_8_1(TEGRA124_CLK_XUSB_HOST_SRC, "pc_xusb_core_host", mux_clkm_p_c2_c_c3_refre, CLK_SOURCE_XUSB_CORE_HOST, 0), CLK_8_1(TEGRA124_CLK_XUSB_FALCON_SRC, "pc_xusb_falcon", mux_clkm_p_c2_c_c3_refre, CLK_SOURCE_XUSB_FALCON, 0), CLK_8_1(TEGRA124_CLK_XUSB_FS_SRC, "pc_xusb_fs", mux_clkm_N_u48_N_p_N_u480, CLK_SOURCE_XUSB_FS, 0), CLK_8_1(TEGRA124_CLK_XUSB_DEV_SRC, "pc_xusb_core_dev", mux_clkm_p_c2_c_c3_refre, CLK_SOURCE_XUSB_CORE_DEV, 0), CLK_8_1(TEGRA124_CLK_XUSB_SS_SRC, "pc_xusb_ss", mux_clkm_refe_clks_u480_c_c2_c3_oscdiv, CLK_SOURCE_XUSB_SS, DCF_IS_XUSB_SS), CLK_8_1(0, "pc_cilab", mux_p_N_c_N_N_N_clkm, CLK_SOURCE_CILAB, 0), CLK_8_1(0, "pc_cilcd", mux_p_N_c_N_N_N_clkm, CLK_SOURCE_CILCD, 0), CLK_8_1(0, "pc_cile", mux_p_N_c_N_N_N_clkm, CLK_SOURCE_CILE, 0), CLK_8_1(0, "pc_dsia_lp", mux_p_N_c_N_N_N_clkm, CLK_SOURCE_DSIA_LP, 0), CLK_8_1(0, "pc_dsib_lp", mux_p_N_c_N_N_N_clkm, CLK_SOURCE_DSIB_LP, 0), CLK_8_1(0, "pc_entropy", mux_p_clkm_clks_E, CLK_SOURCE_ENTROPY, 0), CLK_8_1(0, "pc_dvfs_ref", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_DVFS_REF, DCF_HAVE_ENA), CLK_8_1(0, "pc_dvfs_soc", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_DVFS_SOC, DCF_HAVE_ENA), CLK_8_1(0, "pc_traceclkin", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_TRACECLKIN, 0), CLK_8_1(0, "pc_adx", mux_a_c2_c_c3_p_N_clkm, CLK_SOURCE_ADX, DCF_HAVE_ENA), CLK_8_1(0, "pc_amx", mux_a_c2_c_c3_p_N_clkm, CLK_SOURCE_AMX, DCF_HAVE_ENA), CLK_8_1(0, "pc_emc_latency", mux_m_c_p_clkm_mud_c2_c3, CLK_SOURCE_EMC_LATENCY, 0), CLK_8_1(0, "pc_soc_therm", mux_m_c_p_a_c2_c3, CLK_SOURCE_SOC_THERM, 0), CLK_8_1(0, "pc_vi_sensor2", mux_m_c2_c_c3_p_N_a, CLK_SOURCE_VI_SENSOR2, 0), CLK16_0(0, "pc_i2c6", mux_p_c2_c_c3_m_N_clkm, CLK_SOURCE_I2C6, 0), CLK_8_1(0, "pc_emc_dll", mux_m_c_p_clkm_mud_c2_c3, CLK_SOURCE_EMC_DLL, DCF_IS_EMC_DLL), CLK_8_1(0, "pc_hdmi_audio", mux_p_c_c2_clkm, CLK_SOURCE_HDMI_AUDIO, 0), CLK_8_1(0, "pc_clk72mhz", mux_p_c_c2_clkm, CLK_SOURCE_CLK72MHZ, 0), CLK_8_1(0, "pc_adx1", mux_a_c2_c_c3_p_N_clkm, CLK_SOURCE_ADX1, DCF_HAVE_ENA), CLK_8_1(0, "pc_amx1", mux_a_c2_c_c3_p_N_clkm, CLK_SOURCE_AMX1, DCF_HAVE_ENA), CLK_8_1(0, "pc_vic", mux_m_c_p_a_c2_c3_clkm, CLK_SOURCE_VIC, DCF_IS_VIC), }; static int periph_init(struct clknode *clk, device_t dev); static int periph_recalc(struct clknode *clk, uint64_t *freq); static int periph_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout, int flags, int *stop); static int periph_set_mux(struct clknode *clk, int idx); struct periph_sc { device_t clkdev; uint32_t base_reg; uint32_t div_shift; uint32_t div_width; uint32_t div_mask; uint32_t div_f_width; uint32_t div_f_mask; uint32_t flags; uint32_t divider; int mux; }; static clknode_method_t periph_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, periph_init), CLKNODEMETHOD(clknode_recalc_freq, periph_recalc), CLKNODEMETHOD(clknode_set_freq, periph_set_freq), CLKNODEMETHOD(clknode_set_mux, periph_set_mux), CLKNODEMETHOD_END }; DEFINE_CLASS_1(tegra124_periph, tegra124_periph_class, periph_methods, sizeof(struct periph_sc), clknode_class); static int periph_init(struct clknode *clk, device_t dev) { struct periph_sc *sc; uint32_t reg; sc = clknode_get_softc(clk); DEVICE_LOCK(sc); if (sc->flags & DCF_HAVE_ENA) MD4(sc, sc->base_reg, PERLCK_ENA_MASK, PERLCK_ENA_MASK); RD4(sc, sc->base_reg, ®); DEVICE_UNLOCK(sc); /* Stnadard mux. */ if (sc->flags & DCF_HAVE_MUX) sc->mux = (reg >> PERLCK_MUX_SHIFT) & PERLCK_MUX_MASK; else sc->mux = 0; if (sc->flags & DCF_HAVE_DIV) sc->divider = (reg & sc->div_mask) + 2; else sc->divider = 1; if ((sc->flags & DCF_IS_MASK) == DCF_IS_UART) { if (!(reg & PERLCK_UDIV_DIS)) sc->divider = 2; } /* AUDIO MUX */ if ((sc->flags & DCF_IS_MASK) == DCF_IS_AUDIO) { if (!(reg & PERLCK_AMUX_DIS) && (sc->mux == 7)) { sc->mux = 8 + ((reg >> PERLCK_AMUX_SHIFT) & PERLCK_MUX_MASK); } } clknode_init_parent_idx(clk, sc->mux); return(0); } static int periph_set_mux(struct clknode *clk, int idx) { struct periph_sc *sc; uint32_t reg; sc = clknode_get_softc(clk); if (!(sc->flags & DCF_HAVE_MUX)) return (ENXIO); sc->mux = idx; DEVICE_LOCK(sc); RD4(sc, sc->base_reg, ®); reg &= ~(PERLCK_MUX_MASK << PERLCK_MUX_SHIFT); if ((sc->flags & DCF_IS_MASK) == DCF_IS_AUDIO) { reg &= ~PERLCK_AMUX_DIS; reg &= ~(PERLCK_MUX_MASK << PERLCK_AMUX_SHIFT); if (idx <= 7) { reg |= idx << PERLCK_MUX_SHIFT; } else { reg |= 7 << PERLCK_MUX_SHIFT; reg |= (idx - 8) << PERLCK_AMUX_SHIFT; } } else { reg |= idx << PERLCK_MUX_SHIFT; } WR4(sc, sc->base_reg, reg); DEVICE_UNLOCK(sc); return(0); } static int periph_recalc(struct clknode *clk, uint64_t *freq) { struct periph_sc *sc; uint32_t reg; sc = clknode_get_softc(clk); if (sc->flags & DCF_HAVE_DIV) { DEVICE_LOCK(sc); RD4(sc, sc->base_reg, ®); DEVICE_UNLOCK(sc); *freq = (*freq << sc->div_f_width) / sc->divider; } return (0); } static int periph_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout, int flags, int *stop) { struct periph_sc *sc; uint64_t tmp, divider; sc = clknode_get_softc(clk); if (!(sc->flags & DCF_HAVE_DIV)) { *stop = 0; return (0); } tmp = fin << sc->div_f_width; divider = tmp / *fout; if ((tmp % *fout) != 0) divider++; if (divider < (1 << sc->div_f_width)) divider = 1 << (sc->div_f_width - 1); if (flags & CLK_SET_DRYRUN) { if (((flags & (CLK_SET_ROUND_UP | CLK_SET_ROUND_DOWN)) == 0) && (*fout != (tmp / divider))) return (ERANGE); } else { DEVICE_LOCK(sc); MD4(sc, sc->base_reg, sc->div_mask, (divider - (1 << sc->div_f_width))); DEVICE_UNLOCK(sc); sc->divider = divider; } *fout = tmp / divider; *stop = 1; return (0); } static int periph_register(struct clkdom *clkdom, struct periph_def *clkdef) { struct clknode *clk; struct periph_sc *sc; clk = clknode_create(clkdom, &tegra124_periph_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->clkdev = clknode_get_device(clk); sc->base_reg = clkdef->base_reg; sc->div_width = clkdef->div_width; sc->div_mask = (1 <div_width) - 1; sc->div_f_width = clkdef->div_f_width; sc->div_f_mask = (1 <div_f_width) - 1; sc->flags = clkdef->flags; clknode_register(clkdom, clk); return (0); } /* -------------------------------------------------------------------------- */ static int pgate_init(struct clknode *clk, device_t dev); static int pgate_set_gate(struct clknode *clk, bool enable); static int pgate_get_gate(struct clknode *clk, bool *enableD); struct pgate_sc { device_t clkdev; uint32_t idx; uint32_t flags; uint32_t enabled; }; static clknode_method_t pgate_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, pgate_init), CLKNODEMETHOD(clknode_set_gate, pgate_set_gate), CLKNODEMETHOD(clknode_get_gate, pgate_get_gate), CLKNODEMETHOD_END }; DEFINE_CLASS_1(tegra124_pgate, tegra124_pgate_class, pgate_methods, sizeof(struct pgate_sc), clknode_class); static uint32_t get_enable_reg(int idx) { KASSERT(idx / 32 < nitems(clk_enable_reg), ("Invalid clock index for enable: %d", idx)); return (clk_enable_reg[idx / 32]); } static uint32_t get_reset_reg(int idx) { KASSERT(idx / 32 < nitems(clk_reset_reg), ("Invalid clock index for reset: %d", idx)); return (clk_reset_reg[idx / 32]); } static int pgate_init(struct clknode *clk, device_t dev) { struct pgate_sc *sc; uint32_t ena_reg, rst_reg, mask; sc = clknode_get_softc(clk); mask = 1 << (sc->idx % 32); DEVICE_LOCK(sc); RD4(sc, get_enable_reg(sc->idx), &ena_reg); RD4(sc, get_reset_reg(sc->idx), &rst_reg); DEVICE_UNLOCK(sc); sc->enabled = ena_reg & mask ? 1 : 0; clknode_init_parent_idx(clk, 0); return(0); } static int pgate_set_gate(struct clknode *clk, bool enable) { struct pgate_sc *sc; uint32_t reg, mask, base_reg; sc = clknode_get_softc(clk); mask = 1 << (sc->idx % 32); sc->enabled = enable; base_reg = get_enable_reg(sc->idx); DEVICE_LOCK(sc); MD4(sc, base_reg, mask, enable ? mask : 0); RD4(sc, base_reg, ®); DEVICE_UNLOCK(sc); DELAY(2); return(0); } static int pgate_get_gate(struct clknode *clk, bool *enabled) { struct pgate_sc *sc; uint32_t reg, mask, base_reg; sc = clknode_get_softc(clk); mask = 1 << (sc->idx % 32); base_reg = get_enable_reg(sc->idx); DEVICE_LOCK(sc); RD4(sc, base_reg, ®); DEVICE_UNLOCK(sc); *enabled = reg & mask ? true: false; return(0); } int tegra124_hwreset_by_idx(struct tegra124_car_softc *sc, intptr_t idx, bool reset) { uint32_t reg, mask, reset_reg; mask = 1 << (idx % 32); reset_reg = get_reset_reg(idx); CLKDEV_DEVICE_LOCK(sc->dev); CLKDEV_MODIFY_4(sc->dev, reset_reg, mask, reset ? mask : 0); CLKDEV_READ_4(sc->dev, reset_reg, ®); CLKDEV_DEVICE_UNLOCK(sc->dev); return(0); } static int pgate_register(struct clkdom *clkdom, struct pgate_def *clkdef) { struct clknode *clk; struct pgate_sc *sc; clk = clknode_create(clkdom, &tegra124_pgate_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->clkdev = clknode_get_device(clk); sc->idx = clkdef->idx; sc->flags = clkdef->flags; clknode_register(clkdom, clk); return (0); } void tegra124_periph_clock(struct tegra124_car_softc *sc) { int i, rv; for (i = 0; i < nitems(periph_def); i++) { rv = periph_register(sc->clkdom, &periph_def[i]); if (rv != 0) panic("tegra124_periph_register failed"); } for (i = 0; i < nitems(pgate_def); i++) { rv = pgate_register(sc->clkdom, &pgate_def[i]); if (rv != 0) panic("tegra124_pgate_register failed"); } } diff --git a/sys/arm/nvidia/tegra124/tegra124_clk_pll.c b/sys/arm/nvidia/tegra124/tegra124_clk_pll.c index 524b31d299cd..fb2480436d78 100644 --- a/sys/arm/nvidia/tegra124/tegra124_clk_pll.c +++ b/sys/arm/nvidia/tegra124/tegra124_clk_pll.c @@ -1,1150 +1,1150 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include -#include +#include #include #include "tegra124_car.h" /* #define TEGRA_PLL_DEBUG */ #ifdef TEGRA_PLL_DEBUG #define dprintf(...) printf(__VA_ARGS__) #else #define dprintf(...) #endif /* All PLLs. */ enum pll_type { PLL_M, PLL_X, PLL_C, PLL_C2, PLL_C3, PLL_C4, PLL_P, PLL_A, PLL_U, PLL_D, PLL_D2, PLL_DP, PLL_E, PLL_REFE}; /* Common base register bits. */ #define PLL_BASE_BYPASS (1U << 31) #define PLL_BASE_ENABLE (1 << 30) #define PLL_BASE_REFDISABLE (1 << 29) #define PLL_BASE_LOCK (1 << 27) #define PLL_BASE_DIVM_SHIFT 0 #define PLL_BASE_DIVN_SHIFT 8 #define PLLRE_MISC_LOCK (1 << 24) #define PLL_MISC_LOCK_ENABLE (1 << 18) #define PLLC_MISC_LOCK_ENABLE (1 << 24) #define PLLDU_MISC_LOCK_ENABLE (1 << 22) #define PLLRE_MISC_LOCK_ENABLE (1 << 30) #define PLLSS_MISC_LOCK_ENABLE (1 << 30) #define PLLC_IDDQ_BIT 26 #define PLLX_IDDQ_BIT 3 #define PLLRE_IDDQ_BIT 16 #define PLLSS_IDDQ_BIT 19 #define PLL_LOCK_TIMEOUT 5000 /* Post divider <-> register value mapping. */ struct pdiv_table { uint32_t divider; /* real divider */ uint32_t value; /* register value */ }; /* Bits definition of M, N and P fields. */ struct mnp_bits { uint32_t m_width; uint32_t n_width; uint32_t p_width; uint32_t p_shift; }; struct clk_pll_def { struct clknode_init_def clkdef; enum pll_type type; uint32_t base_reg; uint32_t misc_reg; uint32_t lock_mask; uint32_t lock_enable; uint32_t iddq_reg; uint32_t iddq_mask; uint32_t flags; struct pdiv_table *pdiv_table; struct mnp_bits mnp_bits; }; #define PLL(_id, cname, pname) \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){pname}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS /* Tegra K1 PLLs PLLM: Clock source for EMC 2x clock PLLX: Clock source for the fast CPU cluster and the shadow CPU PLLC: Clock source for general use PLLC2: Clock source for engine scaling PLLC3: Clock source for engine scaling PLLC4: Clock source for ISP/VI units PLLP: Clock source for most peripherals PLLA: Audio clock sources: (11.2896 MHz, 12.288 MHz, 24.576 MHz) PLLU: Clock source for USB PHY, provides 12/60/480 MHz PLLD: Clock sources for the DSI and display subsystem PLLD2: Clock sources for the DSI and display subsystem refPLLe: PLLE: generate the 100 MHz reference clock for USB 3.0 (spread spectrum) PLLDP: Clock source for eDP/LVDS (spread spectrum) DFLLCPU: DFLL clock source for the fast CPU cluster GPCPLL: Clock source for the GPU */ static struct pdiv_table pllm_map[] = { {1, 0}, {2, 1}, {0, 0} }; static struct pdiv_table pllxc_map[] = { { 1, 0}, { 2, 1}, { 3, 2}, { 4, 3}, { 5, 4}, { 6, 5}, { 8, 6}, {10, 7}, {12, 8}, {16, 9}, {12, 10}, {16, 11}, {20, 12}, {24, 13}, {32, 14}, { 0, 0} }; static struct pdiv_table pllc_map[] = { { 1, 0}, { 2, 1}, { 3, 2}, { 4, 3}, { 6, 4}, { 8, 5}, {12, 6}, {16, 7}, { 0, 0} }; static struct pdiv_table pll12g_ssd_esd_map[] = { { 1, 0}, { 2, 1}, { 3, 2}, { 4, 3}, { 5, 4}, { 6, 5}, { 8, 6}, {10, 7}, {12, 8}, {16, 9}, {12, 10}, {16, 11}, {20, 12}, {24, 13}, {32, 14}, { 0, 0} }; static struct pdiv_table pllu_map[] = { {1, 1}, {2, 0}, {0, 0} }; static struct pdiv_table pllrefe_map[] = { {1, 0}, {2, 1}, {3, 2}, {4, 3}, {5, 4}, {6, 5}, {0, 0}, }; static struct clk_pll_def pll_clks[] = { /* PLLM: 880 MHz Clock source for EMC 2x clock */ { PLL(TEGRA124_CLK_PLL_M, "pllM_out0", "osc_div_clk"), .type = PLL_M, .base_reg = PLLM_BASE, .misc_reg = PLLM_MISC, .lock_mask = PLL_BASE_LOCK, .lock_enable = PLL_MISC_LOCK_ENABLE, .pdiv_table = pllm_map, .mnp_bits = {8, 8, 1, 20}, }, /* PLLX: 1GHz Clock source for the fast CPU cluster and the shadow CPU */ { PLL(TEGRA124_CLK_PLL_X, "pllX_out", "osc_div_clk"), .type = PLL_X, .base_reg = PLLX_BASE, .misc_reg = PLLX_MISC, .lock_mask = PLL_BASE_LOCK, .lock_enable = PLL_MISC_LOCK_ENABLE, .iddq_reg = PLLX_MISC3, .iddq_mask = 1 << PLLX_IDDQ_BIT, .pdiv_table = pllxc_map, .mnp_bits = {8, 8, 4, 20}, }, /* PLLC: 600 MHz Clock source for general use */ { PLL(TEGRA124_CLK_PLL_C, "pllC_out0", "osc_div_clk"), .type = PLL_C, .base_reg = PLLC_BASE, .misc_reg = PLLC_MISC, .lock_mask = PLL_BASE_LOCK, .lock_enable = PLLC_MISC_LOCK_ENABLE, .iddq_reg = PLLC_MISC, .iddq_mask = 1 << PLLC_IDDQ_BIT, .pdiv_table = pllc_map, .mnp_bits = {8, 8, 4, 20}, }, /* PLLC2: 600 MHz Clock source for engine scaling */ { PLL(TEGRA124_CLK_PLL_C2, "pllC2_out0", "osc_div_clk"), .type = PLL_C2, .base_reg = PLLC2_BASE, .misc_reg = PLLC2_MISC, .lock_mask = PLL_BASE_LOCK, .lock_enable = PLL_MISC_LOCK_ENABLE, .pdiv_table = pllc_map, .mnp_bits = {2, 8, 3, 20}, }, /* PLLC3: 600 MHz Clock source for engine scaling */ { PLL(TEGRA124_CLK_PLL_C3, "pllC3_out0", "osc_div_clk"), .type = PLL_C3, .base_reg = PLLC3_BASE, .misc_reg = PLLC3_MISC, .lock_mask = PLL_BASE_LOCK, .lock_enable = PLL_MISC_LOCK_ENABLE, .pdiv_table = pllc_map, .mnp_bits = {2, 8, 3, 20}, }, /* PLLC4: 600 MHz Clock source for ISP/VI units */ { PLL(TEGRA124_CLK_PLL_C4, "pllC4_out0", "pllC4_src"), .type = PLL_C4, .base_reg = PLLC4_BASE, .misc_reg = PLLC4_MISC, .lock_mask = PLL_BASE_LOCK, .lock_enable = PLLSS_MISC_LOCK_ENABLE, .iddq_reg = PLLC4_BASE, .iddq_mask = 1 << PLLSS_IDDQ_BIT, .pdiv_table = pll12g_ssd_esd_map, .mnp_bits = {8, 8, 4, 20}, }, /* PLLP: 408 MHz Clock source for most peripherals */ { PLL(TEGRA124_CLK_PLL_P, "pllP_out0", "osc_div_clk"), .type = PLL_P, .base_reg = PLLP_BASE, .misc_reg = PLLP_MISC, .lock_mask = PLL_BASE_LOCK, .lock_enable = PLL_MISC_LOCK_ENABLE, .mnp_bits = {5, 10, 3, 20}, }, /* PLLA: Audio clock sources: (11.2896 MHz, 12.288 MHz, 24.576 MHz) */ { PLL(TEGRA124_CLK_PLL_A, "pllA_out", "pllP_out1"), .type = PLL_A, .base_reg = PLLA_BASE, .misc_reg = PLLA_MISC, .lock_mask = PLL_BASE_LOCK, .lock_enable = PLL_MISC_LOCK_ENABLE, .mnp_bits = {5, 10, 3, 20}, }, /* PLLU: 480 MHz Clock source for USB PHY, provides 12/60/480 MHz */ { PLL(TEGRA124_CLK_PLL_U, "pllU_out", "osc_div_clk"), .type = PLL_U, .base_reg = PLLU_BASE, .misc_reg = PLLU_MISC, .lock_mask = PLL_BASE_LOCK, .lock_enable = PLLDU_MISC_LOCK_ENABLE, .pdiv_table = pllu_map, .mnp_bits = {5, 10, 1, 20}, }, /* PLLD: 600 MHz Clock sources for the DSI and display subsystem */ { PLL(TEGRA124_CLK_PLL_D, "pllD_out", "osc_div_clk"), .type = PLL_D, .base_reg = PLLD_BASE, .misc_reg = PLLD_MISC, .lock_mask = PLL_BASE_LOCK, .lock_enable = PLL_MISC_LOCK_ENABLE, .mnp_bits = {5, 11, 3, 20}, }, /* PLLD2: 600 MHz Clock sources for the DSI and display subsystem */ { PLL(TEGRA124_CLK_PLL_D2, "pllD2_out", "pllD2_src"), .type = PLL_D2, .base_reg = PLLD2_BASE, .misc_reg = PLLD2_MISC, .lock_mask = PLL_BASE_LOCK, .lock_enable = PLLSS_MISC_LOCK_ENABLE, .iddq_reg = PLLD2_BASE, .iddq_mask = 1 << PLLSS_IDDQ_BIT, .pdiv_table = pll12g_ssd_esd_map, .mnp_bits = {8, 8, 4, 20}, }, /* refPLLe: */ { PLL(0, "pllREFE_out", "osc_div_clk"), .type = PLL_REFE, .base_reg = PLLRE_BASE, .misc_reg = PLLRE_MISC, .lock_mask = PLLRE_MISC_LOCK, .lock_enable = PLLRE_MISC_LOCK_ENABLE, .iddq_reg = PLLRE_MISC, .iddq_mask = 1 << PLLRE_IDDQ_BIT, .pdiv_table = pllrefe_map, .mnp_bits = {8, 8, 4, 16}, }, /* PLLE: generate the 100 MHz reference clock for USB 3.0 (spread spectrum) */ { PLL(TEGRA124_CLK_PLL_E, "pllE_out0", "pllE_src"), .type = PLL_E, .base_reg = PLLE_BASE, .misc_reg = PLLE_MISC, .lock_mask = PLLE_MISC_LOCK, .lock_enable = PLLE_MISC_LOCK_ENABLE, .mnp_bits = {8, 8, 4, 24}, }, /* PLLDP: 600 MHz Clock source for eDP/LVDS (spread spectrum) */ { PLL(0, "pllDP_out0", "pllDP_src"), .type = PLL_DP, .base_reg = PLLDP_BASE, .misc_reg = PLLDP_MISC, .lock_mask = PLL_BASE_LOCK, .lock_enable = PLLSS_MISC_LOCK_ENABLE, .iddq_reg = PLLDP_BASE, .iddq_mask = 1 << PLLSS_IDDQ_BIT, .pdiv_table = pll12g_ssd_esd_map, .mnp_bits = {8, 8, 4, 20}, }, }; static int tegra124_pll_init(struct clknode *clk, device_t dev); static int tegra124_pll_set_gate(struct clknode *clk, bool enable); static int tegra124_pll_get_gate(struct clknode *clk, bool *enabled); static int tegra124_pll_recalc(struct clknode *clk, uint64_t *freq); static int tegra124_pll_set_freq(struct clknode *clknode, uint64_t fin, uint64_t *fout, int flags, int *stop); struct pll_sc { device_t clkdev; enum pll_type type; uint32_t base_reg; uint32_t misc_reg; uint32_t lock_mask; uint32_t lock_enable; uint32_t iddq_reg; uint32_t iddq_mask; uint32_t flags; struct pdiv_table *pdiv_table; struct mnp_bits mnp_bits; }; static clknode_method_t tegra124_pll_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, tegra124_pll_init), CLKNODEMETHOD(clknode_set_gate, tegra124_pll_set_gate), CLKNODEMETHOD(clknode_get_gate, tegra124_pll_get_gate), CLKNODEMETHOD(clknode_recalc_freq, tegra124_pll_recalc), CLKNODEMETHOD(clknode_set_freq, tegra124_pll_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(tegra124_pll, tegra124_pll_class, tegra124_pll_methods, sizeof(struct pll_sc), clknode_class); static int pll_enable(struct pll_sc *sc) { uint32_t reg; RD4(sc, sc->base_reg, ®); if (sc->type != PLL_E) reg &= ~PLL_BASE_BYPASS; reg |= PLL_BASE_ENABLE; WR4(sc, sc->base_reg, reg); return (0); } static int pll_disable(struct pll_sc *sc) { uint32_t reg; RD4(sc, sc->base_reg, ®); if (sc->type != PLL_E) reg |= PLL_BASE_BYPASS; reg &= ~PLL_BASE_ENABLE; WR4(sc, sc->base_reg, reg); return (0); } static uint32_t pdiv_to_reg(struct pll_sc *sc, uint32_t p_div) { struct pdiv_table *tbl; tbl = sc->pdiv_table; if (tbl == NULL) return (ffs(p_div) - 1); while (tbl->divider != 0) { if (p_div <= tbl->divider) return (tbl->value); tbl++; } return (0xFFFFFFFF); } static uint32_t reg_to_pdiv(struct pll_sc *sc, uint32_t reg) { struct pdiv_table *tbl; tbl = sc->pdiv_table; if (tbl == NULL) return (1 << reg); while (tbl->divider) { if (reg == tbl->value) return (tbl->divider); tbl++; } return (0); } static uint32_t get_masked(uint32_t val, uint32_t shift, uint32_t width) { return ((val >> shift) & ((1 << width) - 1)); } static uint32_t set_masked(uint32_t val, uint32_t v, uint32_t shift, uint32_t width) { val &= ~(((1 << width) - 1) << shift); val |= (v & ((1 << width) - 1)) << shift; return (val); } static void get_divisors(struct pll_sc *sc, uint32_t *m, uint32_t *n, uint32_t *p) { uint32_t val; struct mnp_bits *mnp_bits; mnp_bits = &sc->mnp_bits; RD4(sc, sc->base_reg, &val); *m = get_masked(val, PLL_BASE_DIVM_SHIFT, mnp_bits->m_width); *n = get_masked(val, PLL_BASE_DIVN_SHIFT, mnp_bits->n_width); *p = get_masked(val, mnp_bits->p_shift, mnp_bits->p_width); } static uint32_t set_divisors(struct pll_sc *sc, uint32_t val, uint32_t m, uint32_t n, uint32_t p) { struct mnp_bits *mnp_bits; mnp_bits = &sc->mnp_bits; val = set_masked(val, m, PLL_BASE_DIVM_SHIFT, mnp_bits->m_width); val = set_masked(val, n, PLL_BASE_DIVN_SHIFT, mnp_bits->n_width); val = set_masked(val, p, mnp_bits->p_shift, mnp_bits->p_width); return (val); } static bool is_locked(struct pll_sc *sc) { uint32_t reg; switch (sc->type) { case PLL_REFE: RD4(sc, sc->misc_reg, ®); reg &= PLLRE_MISC_LOCK; break; case PLL_E: RD4(sc, sc->misc_reg, ®); reg &= PLLE_MISC_LOCK; break; default: RD4(sc, sc->base_reg, ®); reg &= PLL_BASE_LOCK; break; } return (reg != 0); } static int wait_for_lock(struct pll_sc *sc) { int i; for (i = PLL_LOCK_TIMEOUT / 10; i > 0; i--) { if (is_locked(sc)) break; DELAY(10); } if (i <= 0) { printf("PLL lock timeout\n"); return (ETIMEDOUT); } return (0); } static int plle_enable(struct pll_sc *sc) { uint32_t reg; int rv; uint32_t pll_m = 1; uint32_t pll_n = 200; uint32_t pll_p = 13; uint32_t pll_cml = 13; /* Disable lock override. */ RD4(sc, sc->base_reg, ®); reg &= ~PLLE_BASE_LOCK_OVERRIDE; WR4(sc, sc->base_reg, reg); RD4(sc, PLLE_AUX, ®); reg |= PLLE_AUX_ENABLE_SWCTL; reg &= ~PLLE_AUX_SEQ_ENABLE; WR4(sc, PLLE_AUX, reg); DELAY(10); RD4(sc, sc->misc_reg, ®); reg |= PLLE_MISC_LOCK_ENABLE; reg |= PLLE_MISC_IDDQ_SWCTL; reg &= ~PLLE_MISC_IDDQ_OVERRIDE_VALUE; reg |= PLLE_MISC_PTS; reg |= PLLE_MISC_VREG_BG_CTRL_MASK; reg |= PLLE_MISC_VREG_CTRL_MASK; WR4(sc, sc->misc_reg, reg); DELAY(10); RD4(sc, PLLE_SS_CNTL, ®); reg |= PLLE_SS_CNTL_DISABLE; WR4(sc, PLLE_SS_CNTL, reg); RD4(sc, sc->base_reg, ®); reg = set_divisors(sc, reg, pll_m, pll_n, pll_p); reg &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT); reg |= pll_cml << PLLE_BASE_DIVCML_SHIFT; WR4(sc, sc->base_reg, reg); DELAY(10); pll_enable(sc); rv = wait_for_lock(sc); if (rv != 0) return (rv); RD4(sc, PLLE_SS_CNTL, ®); reg &= ~PLLE_SS_CNTL_SSCCENTER; reg &= ~PLLE_SS_CNTL_SSCINVERT; reg &= ~PLLE_SS_CNTL_COEFFICIENTS_MASK; reg |= PLLE_SS_CNTL_COEFFICIENTS_VAL; WR4(sc, PLLE_SS_CNTL, reg); reg &= ~PLLE_SS_CNTL_SSCBYP; reg &= ~PLLE_SS_CNTL_BYPASS_SS; WR4(sc, PLLE_SS_CNTL, reg); DELAY(10); reg &= ~PLLE_SS_CNTL_INTERP_RESET; WR4(sc, PLLE_SS_CNTL, reg); DELAY(10); /* HW control of brick pll. */ RD4(sc, sc->misc_reg, ®); reg &= ~PLLE_MISC_IDDQ_SWCTL; WR4(sc, sc->misc_reg, reg); RD4(sc, PLLE_AUX, ®); reg |= PLLE_AUX_USE_LOCKDET; reg |= PLLE_AUX_SEQ_START_STATE; reg &= ~PLLE_AUX_ENABLE_SWCTL; reg &= ~PLLE_AUX_SS_SWCTL; WR4(sc, PLLE_AUX, reg); reg |= PLLE_AUX_SEQ_START_STATE; DELAY(10); reg |= PLLE_AUX_SEQ_ENABLE; WR4(sc, PLLE_AUX, reg); RD4(sc, XUSBIO_PLL_CFG0, ®); reg |= XUSBIO_PLL_CFG0_PADPLL_USE_LOCKDET; reg |= XUSBIO_PLL_CFG0_SEQ_START_STATE; reg &= ~XUSBIO_PLL_CFG0_CLK_ENABLE_SWCTL; reg &= ~XUSBIO_PLL_CFG0_PADPLL_RESET_SWCTL; WR4(sc, XUSBIO_PLL_CFG0, reg); DELAY(10); reg |= XUSBIO_PLL_CFG0_SEQ_ENABLE; WR4(sc, XUSBIO_PLL_CFG0, reg); /* Enable HW control and unreset SATA PLL. */ RD4(sc, SATA_PLL_CFG0, ®); reg &= ~SATA_PLL_CFG0_PADPLL_RESET_SWCTL; reg &= ~SATA_PLL_CFG0_PADPLL_RESET_OVERRIDE_VALUE; reg |= SATA_PLL_CFG0_PADPLL_USE_LOCKDET; reg &= ~SATA_PLL_CFG0_SEQ_IN_SWCTL; reg &= ~SATA_PLL_CFG0_SEQ_RESET_INPUT_VALUE; reg &= ~SATA_PLL_CFG0_SEQ_LANE_PD_INPUT_VALUE; reg &= ~SATA_PLL_CFG0_SEQ_PADPLL_PD_INPUT_VALUE; reg &= ~SATA_PLL_CFG0_SEQ_ENABLE; reg |= SATA_PLL_CFG0_SEQ_START_STATE; WR4(sc, SATA_PLL_CFG0, reg); DELAY(10); reg |= SATA_PLL_CFG0_SEQ_ENABLE; WR4(sc, SATA_PLL_CFG0, reg); /* Enable HW control of PCIe PLL. */ RD4(sc, PCIE_PLL_CFG0, ®); reg |= PCIE_PLL_CFG0_SEQ_ENABLE; WR4(sc, PCIE_PLL_CFG0, reg); return (0); } static int tegra124_pll_set_gate(struct clknode *clknode, bool enable) { int rv; struct pll_sc *sc; sc = clknode_get_softc(clknode); if (enable == 0) { rv = pll_disable(sc); return(rv); } if (sc->type == PLL_E) rv = plle_enable(sc); else rv = pll_enable(sc); return (rv); } static int tegra124_pll_get_gate(struct clknode *clknode, bool *enabled) { uint32_t reg; struct pll_sc *sc; sc = clknode_get_softc(clknode); RD4(sc, sc->base_reg, ®); *enabled = reg & PLL_BASE_ENABLE ? true: false; WR4(sc, sc->base_reg, reg); return (0); } static int pll_set_std(struct pll_sc *sc, uint64_t fin, uint64_t *fout, int flags, uint32_t m, uint32_t n, uint32_t p) { uint32_t reg; struct mnp_bits *mnp_bits; int rv; mnp_bits = &sc->mnp_bits; if (m >= (1 << mnp_bits->m_width)) return (ERANGE); if (n >= (1 << mnp_bits->n_width)) return (ERANGE); if (pdiv_to_reg(sc, p) >= (1 << mnp_bits->p_width)) return (ERANGE); if (flags & CLK_SET_DRYRUN) { if (((flags & (CLK_SET_ROUND_UP | CLK_SET_ROUND_DOWN)) == 0) && (*fout != (((fin / m) * n) /p))) return (ERANGE); *fout = ((fin / m) * n) /p; return (0); } pll_disable(sc); /* take pll out of IDDQ */ if (sc->iddq_reg != 0) MD4(sc, sc->iddq_reg, sc->iddq_mask, 0); RD4(sc, sc->base_reg, ®); reg = set_masked(reg, m, PLL_BASE_DIVM_SHIFT, mnp_bits->m_width); reg = set_masked(reg, n, PLL_BASE_DIVN_SHIFT, mnp_bits->n_width); reg = set_masked(reg, pdiv_to_reg(sc, p), mnp_bits->p_shift, mnp_bits->p_width); WR4(sc, sc->base_reg, reg); /* Enable PLL. */ RD4(sc, sc->base_reg, ®); reg |= PLL_BASE_ENABLE; WR4(sc, sc->base_reg, reg); /* Enable lock detection. */ RD4(sc, sc->misc_reg, ®); reg |= sc->lock_enable; WR4(sc, sc->misc_reg, reg); rv = wait_for_lock(sc); if (rv != 0) { /* Disable PLL */ RD4(sc, sc->base_reg, ®); reg &= ~PLL_BASE_ENABLE; WR4(sc, sc->base_reg, reg); return (rv); } RD4(sc, sc->misc_reg, ®); pll_enable(sc); *fout = ((fin / m) * n) / p; return 0; } static int plla_set_freq(struct pll_sc *sc, uint64_t fin, uint64_t *fout, int flags) { uint32_t m, n, p; p = 1; m = 5; n = (*fout * p * m + fin / 2)/ fin; dprintf("%s: m: %d, n: %d, p: %d\n", __func__, m, n, p); return (pll_set_std(sc, fin, fout, flags, m, n, p)); } static int pllc_set_freq(struct pll_sc *sc, uint64_t fin, uint64_t *fout, int flags) { uint32_t m, n, p; p = 2; m = 1; n = (*fout * p * m + fin / 2)/ fin; dprintf("%s: m: %d, n: %d, p: %d\n", __func__, m, n, p); return (pll_set_std( sc, fin, fout, flags, m, n, p)); } /* * PLLD2 is used as source for pixel clock for HDMI. * We must be able to set it frequency very flexibly and * precisely (within 5% tolerance limit allowed by HDMI specs). * * For this reason, it is necessary to search the full state space. * Fortunately, thanks to early cycle terminations, performance * is within acceptable limits. */ #define PLLD2_PFD_MIN 12000000 /* 12 MHz */ #define PLLD2_PFD_MAX 38000000 /* 38 MHz */ #define PLLD2_VCO_MIN 600000000 /* 600 MHz */ #define PLLD2_VCO_MAX 1200000000 /* 1.2 GHz */ static int plld2_set_freq(struct pll_sc *sc, uint64_t fin, uint64_t *fout, int flags) { uint32_t m, n, p; uint32_t best_m, best_n, best_p; uint64_t vco, pfd; int64_t err, best_err; struct mnp_bits *mnp_bits; struct pdiv_table *tbl; int p_idx, rv; mnp_bits = &sc->mnp_bits; tbl = sc->pdiv_table; best_err = INT64_MAX; for (p_idx = 0; tbl[p_idx].divider != 0; p_idx++) { p = tbl[p_idx].divider; /* Check constraints */ vco = *fout * p; if (vco < PLLD2_VCO_MIN) continue; if (vco > PLLD2_VCO_MAX) break; for (m = 1; m < (1 << mnp_bits->m_width); m++) { n = (*fout * p * m + fin / 2) / fin; /* Check constraints */ if (n == 0) continue; if (n >= (1 << mnp_bits->n_width)) break; vco = (fin * n) / m; if (vco > PLLD2_VCO_MAX || vco < PLLD2_VCO_MIN) continue; pfd = fin / m; if (pfd > PLLD2_PFD_MAX || vco < PLLD2_PFD_MIN) continue; /* Constraints passed, save best result */ err = *fout - vco / p; if (err < 0) err = -err; if (err < best_err) { best_err = err; best_p = p; best_m = m; best_n = n; } if (err == 0) goto done; } } done: /* * HDMI specification allows 5% pixel clock tolerance, * we will by a slightly stricter */ if (best_err > ((*fout * 100) / 4)) return (ERANGE); if (flags & CLK_SET_DRYRUN) return (0); rv = pll_set_std(sc, fin, fout, flags, best_m, best_n, best_p); /* XXXX Panic for rv == ERANGE ? */ return (rv); } static int pllrefe_set_freq(struct pll_sc *sc, uint64_t fin, uint64_t *fout, int flags) { uint32_t m, n, p; m = 1; p = 1; n = *fout * p * m / fin; dprintf("%s: m: %d, n: %d, p: %d\n", __func__, m, n, p); return (pll_set_std(sc, fin, fout, flags, m, n, p)); } static int pllx_set_freq(struct pll_sc *sc, uint64_t fin, uint64_t *fout, int flags) { uint32_t reg; uint32_t m, n, p; struct mnp_bits *mnp_bits; int rv; mnp_bits = &sc->mnp_bits; p = 1; m = 1; n = (*fout * p * m + fin / 2)/ fin; dprintf("%s: m: %d, n: %d, p: %d\n", __func__, m, n, p); if (m >= (1 << mnp_bits->m_width)) return (ERANGE); if (n >= (1 << mnp_bits->n_width)) return (ERANGE); if (pdiv_to_reg(sc, p) >= (1 << mnp_bits->p_width)) return (ERANGE); if (flags & CLK_SET_DRYRUN) { if (((flags & (CLK_SET_ROUND_UP | CLK_SET_ROUND_DOWN)) == 0) && (*fout != (((fin / m) * n) /p))) return (ERANGE); *fout = ((fin / m) * n) /p; return (0); } /* PLLX doesn't have bypass, disable it first. */ RD4(sc, sc->base_reg, ®); reg &= ~PLL_BASE_ENABLE; WR4(sc, sc->base_reg, reg); /* Set PLL. */ RD4(sc, sc->base_reg, ®); reg = set_masked(reg, m, PLL_BASE_DIVM_SHIFT, mnp_bits->m_width); reg = set_masked(reg, n, PLL_BASE_DIVN_SHIFT, mnp_bits->n_width); reg = set_masked(reg, pdiv_to_reg(sc, p), mnp_bits->p_shift, mnp_bits->p_width); WR4(sc, sc->base_reg, reg); RD4(sc, sc->base_reg, ®); DELAY(100); /* Enable lock detection. */ RD4(sc, sc->misc_reg, ®); reg |= sc->lock_enable; WR4(sc, sc->misc_reg, reg); /* Enable PLL. */ RD4(sc, sc->base_reg, ®); reg |= PLL_BASE_ENABLE; WR4(sc, sc->base_reg, reg); rv = wait_for_lock(sc); if (rv != 0) { /* Disable PLL */ RD4(sc, sc->base_reg, ®); reg &= ~PLL_BASE_ENABLE; WR4(sc, sc->base_reg, reg); return (rv); } RD4(sc, sc->misc_reg, ®); *fout = ((fin / m) * n) / p; return (0); } static int tegra124_pll_set_freq(struct clknode *clknode, uint64_t fin, uint64_t *fout, int flags, int *stop) { *stop = 1; int rv; struct pll_sc *sc; sc = clknode_get_softc(clknode); dprintf("%s: %s requested freq: %llu, input freq: %llu\n", __func__, clknode_get_name(clknode), *fout, fin); switch (sc->type) { case PLL_A: rv = plla_set_freq(sc, fin, fout, flags); break; case PLL_C: rv = pllc_set_freq(sc, fin, fout, flags); break; case PLL_D2: rv = plld2_set_freq(sc, fin, fout, flags); break; case PLL_REFE: rv = pllrefe_set_freq(sc, fin, fout, flags); break; case PLL_X: rv = pllx_set_freq(sc, fin, fout, flags); break; case PLL_U: if (*fout == 480000000) /* PLLU is fixed to 480 MHz */ rv = 0; else rv = ERANGE; break; default: rv = ENXIO; break; } return (rv); } static int tegra124_pll_init(struct clknode *clk, device_t dev) { struct pll_sc *sc; uint32_t reg; sc = clknode_get_softc(clk); /* If PLL is enabled, enable lock detect too. */ RD4(sc, sc->base_reg, ®); if (reg & PLL_BASE_ENABLE) { RD4(sc, sc->misc_reg, ®); reg |= sc->lock_enable; WR4(sc, sc->misc_reg, reg); } if (sc->type == PLL_REFE) { RD4(sc, sc->misc_reg, ®); reg &= ~(1 << 29); /* Diasble lock override */ WR4(sc, sc->misc_reg, reg); } clknode_init_parent_idx(clk, 0); return(0); } static int tegra124_pll_recalc(struct clknode *clk, uint64_t *freq) { struct pll_sc *sc; uint32_t m, n, p, pr; uint32_t reg, misc_reg; sc = clknode_get_softc(clk); RD4(sc, sc->base_reg, ®); RD4(sc, sc->misc_reg, &misc_reg); get_divisors(sc, &m, &n, &pr); if (sc->type != PLL_E) p = reg_to_pdiv(sc, pr); else p = 2 * (pr - 1); dprintf("%s: %s (0x%08x, 0x%08x) - m: %d, n: %d, p: %d (%d): " "e: %d, r: %d, o: %d - %s\n", __func__, clknode_get_name(clk), reg, misc_reg, m, n, p, pr, (reg >> 30) & 1, (reg >> 29) & 1, (reg >> 28) & 1, is_locked(sc) ? "locked" : "unlocked"); if ((m == 0) || (n == 0) || (p == 0)) { *freq = 0; return (EINVAL); } *freq = ((*freq / m) * n) / p; return (0); } static int pll_register(struct clkdom *clkdom, struct clk_pll_def *clkdef) { struct clknode *clk; struct pll_sc *sc; clk = clknode_create(clkdom, &tegra124_pll_class, &clkdef->clkdef); if (clk == NULL) return (ENXIO); sc = clknode_get_softc(clk); sc->clkdev = clknode_get_device(clk); sc->type = clkdef->type; sc->base_reg = clkdef->base_reg; sc->misc_reg = clkdef->misc_reg; sc->lock_mask = clkdef->lock_mask; sc->lock_enable = clkdef->lock_enable; sc->iddq_reg = clkdef->iddq_reg; sc->iddq_mask = clkdef->iddq_mask; sc->flags = clkdef->flags; sc->pdiv_table = clkdef->pdiv_table; sc->mnp_bits = clkdef->mnp_bits; clknode_register(clkdom, clk); return (0); } static void config_utmi_pll(struct tegra124_car_softc *sc) { uint32_t reg; /* * XXX Simplified UTMIP settings for 12MHz base clock. */ #define ENABLE_DELAY_COUNT 0x02 #define STABLE_COUNT 0x2F #define ACTIVE_DELAY_COUNT 0x04 #define XTAL_FREQ_COUNT 0x76 CLKDEV_READ_4(sc->dev, UTMIP_PLL_CFG2, ®); reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0); reg |= UTMIP_PLL_CFG2_STABLE_COUNT(STABLE_COUNT); reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0); reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(ACTIVE_DELAY_COUNT); reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN; reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN; reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN; CLKDEV_WRITE_4(sc->dev, UTMIP_PLL_CFG2, reg); CLKDEV_READ_4(sc->dev, UTMIP_PLL_CFG1, ®); reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0); reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(ENABLE_DELAY_COUNT); reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0); reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(XTAL_FREQ_COUNT); reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN; reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP; reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN; CLKDEV_WRITE_4(sc->dev, UTMIP_PLL_CFG1, reg); /* Prepare UTMIP requencer. */ CLKDEV_READ_4(sc->dev, UTMIPLL_HW_PWRDN_CFG0, ®); reg |= UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET; reg &= ~UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL; reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE; CLKDEV_WRITE_4(sc->dev, UTMIPLL_HW_PWRDN_CFG0, reg); /* Powerup UTMIP. */ CLKDEV_READ_4(sc->dev, UTMIP_PLL_CFG1, ®); reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP; reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; CLKDEV_WRITE_4(sc->dev, UTMIP_PLL_CFG1, reg); DELAY(10); /* SW override for UTMIPLL */ CLKDEV_READ_4(sc->dev, UTMIPLL_HW_PWRDN_CFG0, ®); reg |= UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL; reg &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE; CLKDEV_WRITE_4(sc->dev, UTMIPLL_HW_PWRDN_CFG0, reg); DELAY(10); /* HW control of UTMIPLL. */ CLKDEV_READ_4(sc->dev, UTMIPLL_HW_PWRDN_CFG0, ®); reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE; CLKDEV_WRITE_4(sc->dev, UTMIPLL_HW_PWRDN_CFG0, reg); } void tegra124_init_plls(struct tegra124_car_softc *sc) { int i, rv; for (i = 0; i < nitems(pll_clks); i++) { rv = pll_register(sc->clkdom, pll_clks + i); if (rv != 0) panic("pll_register failed"); } config_utmi_pll(sc); } diff --git a/sys/arm/nvidia/tegra124/tegra124_clk_super.c b/sys/arm/nvidia/tegra124/tegra124_clk_super.c index 8fb5e524e5fd..28245c257aea 100644 --- a/sys/arm/nvidia/tegra124/tegra124_clk_super.c +++ b/sys/arm/nvidia/tegra124/tegra124_clk_super.c @@ -1,261 +1,261 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include -#include +#include #include #include "tegra124_car.h" /* Flags */ #define SMF_HAVE_DIVIDER_2 1 struct super_mux_def { struct clknode_init_def clkdef; uint32_t base_reg; uint32_t flags; int src_pllx; int src_div2; }; #define PLIST(x) static const char *x[] #define SM(_id, cn, pl, r, x, d, f) \ { \ .clkdef.id = _id, \ .clkdef.name = cn, \ .clkdef.parent_names = pl, \ .clkdef.parent_cnt = nitems(pl), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .base_reg = r, \ .src_pllx = x, \ .src_div2 = d, \ .flags = f, \ } PLIST(cclk_g_parents) = { "clk_m", "pllC_out0", "clk_s", "pllM_out0", "pllP_out0", "pllP_out4", "pllC2_out0", "pllC3_out0", "pllX_out", NULL, NULL, NULL, NULL, NULL, NULL,NULL, // "dfllCPU_out0" }; PLIST(cclk_lp_parents) = { "clk_m", "pllC_out0", "clk_s", "pllM_out0", "pllP_out0", "pllP_out4", "pllC2_out0", "pllC3_out0", "pllX_out", NULL, NULL, NULL, NULL, NULL, NULL, NULL, "pllX_out0" }; PLIST(sclk_parents) = { "clk_m", "pllC_out1", "pllP_out4", "pllP_out0", "pllP_out2", "pllC_out0", "clk_s", "pllM_out1", }; static struct super_mux_def super_mux_def[] = { SM(TEGRA124_CLK_CCLK_G, "cclk_g", cclk_g_parents, CCLKG_BURST_POLICY, 0, 0, 0), SM(TEGRA124_CLK_CCLK_LP, "cclk_lp", cclk_lp_parents, CCLKLP_BURST_POLICY, 8, 16, SMF_HAVE_DIVIDER_2), SM(TEGRA124_CLK_SCLK, "sclk", sclk_parents, SCLK_BURST_POLICY, 0, 0, 0), }; static int super_mux_init(struct clknode *clk, device_t dev); static int super_mux_set_mux(struct clknode *clk, int idx); struct super_mux_sc { device_t clkdev; uint32_t base_reg; int src_pllx; int src_div2; uint32_t flags; int mux; }; static clknode_method_t super_mux_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, super_mux_init), CLKNODEMETHOD(clknode_set_mux, super_mux_set_mux), CLKNODEMETHOD_END }; DEFINE_CLASS_1(tegra124_super_mux, tegra124_super_mux_class, super_mux_methods, sizeof(struct super_mux_sc), clknode_class); /* Mux status. */ #define SUPER_MUX_STATE_STDBY 0 #define SUPER_MUX_STATE_IDLE 1 #define SUPER_MUX_STATE_RUN 2 #define SUPER_MUX_STATE_IRQ 3 #define SUPER_MUX_STATE_FIQ 4 /* Mux register bits. */ #define SUPER_MUX_STATE_BIT_SHIFT 28 #define SUPER_MUX_STATE_BIT_MASK 0xF /* State is Priority encoded */ #define SUPER_MUX_STATE_BIT_STDBY 0x00 #define SUPER_MUX_STATE_BIT_IDLE 0x01 #define SUPER_MUX_STATE_BIT_RUN 0x02 #define SUPER_MUX_STATE_BIT_IRQ 0x04 #define SUPER_MUX_STATE_BIT_FIQ 0x08 #define SUPER_MUX_MUX_WIDTH 4 #define SUPER_MUX_LP_DIV2_BYPASS (1 << 16) static uint32_t super_mux_get_state(uint32_t reg) { reg = (reg >> SUPER_MUX_STATE_BIT_SHIFT) & SUPER_MUX_STATE_BIT_MASK; if (reg & SUPER_MUX_STATE_BIT_FIQ) return (SUPER_MUX_STATE_FIQ); if (reg & SUPER_MUX_STATE_BIT_IRQ) return (SUPER_MUX_STATE_IRQ); if (reg & SUPER_MUX_STATE_BIT_RUN) return (SUPER_MUX_STATE_RUN); if (reg & SUPER_MUX_STATE_BIT_IDLE) return (SUPER_MUX_STATE_IDLE); return (SUPER_MUX_STATE_STDBY); } static int super_mux_init(struct clknode *clk, device_t dev) { struct super_mux_sc *sc; uint32_t reg; int shift, state; sc = clknode_get_softc(clk); DEVICE_LOCK(sc); RD4(sc, sc->base_reg, ®); DEVICE_UNLOCK(sc); state = super_mux_get_state(reg); if ((state != SUPER_MUX_STATE_RUN) && (state != SUPER_MUX_STATE_IDLE)) { panic("Unexpected super mux state: %u", state); } shift = state * SUPER_MUX_MUX_WIDTH; sc->mux = (reg >> shift) & ((1 << SUPER_MUX_MUX_WIDTH) - 1); /* * CCLKLP uses PLLX/2 as source if LP_DIV2_BYPASS isn't set * and source mux is set to PLLX. */ if (sc->flags & SMF_HAVE_DIVIDER_2) { if (((reg & SUPER_MUX_LP_DIV2_BYPASS) == 0) && (sc->mux == sc->src_pllx)) sc->mux = sc->src_div2; } clknode_init_parent_idx(clk, sc->mux); return(0); } static int super_mux_set_mux(struct clknode *clk, int idx) { struct super_mux_sc *sc; int shift, state; uint32_t reg, dummy; sc = clknode_get_softc(clk); DEVICE_LOCK(sc); RD4(sc, sc->base_reg, ®); state = super_mux_get_state(reg); if ((state != SUPER_MUX_STATE_RUN) && (state != SUPER_MUX_STATE_IDLE)) { panic("Unexpected super mux state: %u", state); } shift = (state - 1) * SUPER_MUX_MUX_WIDTH; sc->mux = idx; if (sc->flags & SMF_HAVE_DIVIDER_2) { if (idx == sc->src_div2) { idx = sc->src_pllx; reg &= ~SUPER_MUX_LP_DIV2_BYPASS; WR4(sc, sc->base_reg, reg); RD4(sc, sc->base_reg, &dummy); } else if (idx == sc->src_pllx) { reg = SUPER_MUX_LP_DIV2_BYPASS; WR4(sc, sc->base_reg, reg); RD4(sc, sc->base_reg, &dummy); } } reg &= ~(((1 << SUPER_MUX_MUX_WIDTH) - 1) << shift); reg |= idx << shift; WR4(sc, sc->base_reg, reg); RD4(sc, sc->base_reg, &dummy); DEVICE_UNLOCK(sc); return(0); } static int super_mux_register(struct clkdom *clkdom, struct super_mux_def *clkdef) { struct clknode *clk; struct super_mux_sc *sc; clk = clknode_create(clkdom, &tegra124_super_mux_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->clkdev = clknode_get_device(clk); sc->base_reg = clkdef->base_reg; sc->src_pllx = clkdef->src_pllx; sc->src_div2 = clkdef->src_div2; sc->flags = clkdef->flags; clknode_register(clkdom, clk); return (0); } void tegra124_super_mux_clock(struct tegra124_car_softc *sc) { int i, rv; for (i = 0; i < nitems(super_mux_def); i++) { rv = super_mux_register(sc->clkdom, &super_mux_def[i]); if (rv != 0) panic("super_mux_register failed"); } } diff --git a/sys/arm/nvidia/tegra124/tegra124_coretemp.c b/sys/arm/nvidia/tegra124/tegra124_coretemp.c index a22618ca5338..42ed02de4f86 100644 --- a/sys/arm/nvidia/tegra124/tegra124_coretemp.c +++ b/sys/arm/nvidia/tegra124/tegra124_coretemp.c @@ -1,260 +1,260 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include "tegra_soctherm_if.h" enum therm_info { CORETEMP_TEMP, CORETEMP_DELTA, CORETEMP_RESOLUTION, CORETEMP_TJMAX, }; struct tegra124_coretemp_softc { device_t dev; int overheat_log; int core_max_temp; int cpu_id; device_t tsens_dev; intptr_t tsens_id; }; static int coretemp_get_val_sysctl(SYSCTL_HANDLER_ARGS) { device_t dev; int val, temp, rv; struct tegra124_coretemp_softc *sc; enum therm_info type; char stemp[16]; dev = (device_t) arg1; sc = device_get_softc(dev); type = arg2; rv = TEGRA_SOCTHERM_GET_TEMPERATURE(sc->tsens_dev, sc->dev, sc->tsens_id, &temp); if (rv != 0) { device_printf(sc->dev, "Cannot read temperature sensor %d: %d\n", sc->tsens_id, rv); return (rv); } switch (type) { case CORETEMP_TEMP: val = temp / 100; val += 2731; break; case CORETEMP_DELTA: val = (sc->core_max_temp - temp) / 1000; break; case CORETEMP_RESOLUTION: val = 1; break; case CORETEMP_TJMAX: val = sc->core_max_temp / 100; val += 2731; break; } if ((temp > sc->core_max_temp) && !sc->overheat_log) { sc->overheat_log = 1; /* * Check for Critical Temperature Status and Critical * Temperature Log. It doesn't really matter if the * current temperature is invalid because the "Critical * Temperature Log" bit will tell us if the Critical * Temperature has * been reached in past. It's not * directly related to the current temperature. * * If we reach a critical level, allow devctl(4) * to catch this and shutdown the system. */ device_printf(dev, "critical temperature detected, " "suggest system shutdown\n"); snprintf(stemp, sizeof(stemp), "%d", val); devctl_notify("coretemp", "Thermal", stemp, "notify=0xcc"); } else { sc->overheat_log = 0; } return (sysctl_handle_int(oidp, 0, val, req)); } static int tegra124_coretemp_ofw_parse(struct tegra124_coretemp_softc *sc) { int rv, ncells; phandle_t node, xnode; pcell_t *cells; node = OF_peer(0); node = ofw_bus_find_child(node, "thermal-zones"); if (node <= 0) { device_printf(sc->dev, "Cannot find 'thermal-zones'.\n"); return (ENXIO); } node = ofw_bus_find_child(node, "cpu"); if (node <= 0) { device_printf(sc->dev, "Cannot find 'cpu'\n"); return (ENXIO); } rv = ofw_bus_parse_xref_list_alloc(node, "thermal-sensors", "#thermal-sensor-cells", 0, &xnode, &ncells, &cells); if (rv != 0) { device_printf(sc->dev, "Cannot parse 'thermal-sensors' property.\n"); return (ENXIO); } if (ncells != 1) { device_printf(sc->dev, "Invalid format of 'thermal-sensors' property(%d).\n", ncells); return (ENXIO); } sc->tsens_id = 0x100 + sc->cpu_id; //cells[0]; OF_prop_free(cells); sc->tsens_dev = OF_device_from_xref(xnode); if (sc->tsens_dev == NULL) { device_printf(sc->dev, "Cannot find thermal sensors device."); return (ENXIO); } return (0); } static void tegra124_coretemp_identify(driver_t *driver, device_t parent) { phandle_t root; root = OF_finddevice("/"); if (!ofw_bus_node_is_compatible(root, "nvidia,tegra124")) return; if (device_find_child(parent, "tegra124_coretemp", -1) != NULL) return; if (BUS_ADD_CHILD(parent, 0, "tegra124_coretemp", -1) == NULL) device_printf(parent, "add child failed\n"); } static int tegra124_coretemp_probe(device_t dev) { device_set_desc(dev, "CPU Thermal Sensor"); return (0); } static int tegra124_coretemp_attach(device_t dev) { struct tegra124_coretemp_softc *sc; device_t pdev; struct sysctl_oid *oid; struct sysctl_ctx_list *ctx; int rv; sc = device_get_softc(dev); sc->dev = dev; sc->cpu_id = device_get_unit(dev); sc->core_max_temp = 102000; pdev = device_get_parent(dev); rv = tegra124_coretemp_ofw_parse(sc); if (rv != 0) return (rv); ctx = device_get_sysctl_ctx(dev); oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(pdev)), OID_AUTO, "coretemp", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Per-CPU thermal information"); /* * Add the MIBs to dev.cpu.N and dev.cpu.N.coretemp. */ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(pdev)), OID_AUTO, "temperature", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, CORETEMP_TEMP, coretemp_get_val_sysctl, "IK", "Current temperature"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "delta", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, CORETEMP_DELTA, coretemp_get_val_sysctl, "I", "Delta between TCC activation and current temperature"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "resolution", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, CORETEMP_RESOLUTION, coretemp_get_val_sysctl, "I", "Resolution of CPU thermal sensor"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "tjmax", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, CORETEMP_TJMAX, coretemp_get_val_sysctl, "IK", "TCC activation temperature"); return (0); } static int tegra124_coretemp_detach(device_t dev) { return (0); } static device_method_t tegra124_coretemp_methods[] = { /* Device interface */ DEVMETHOD(device_identify, tegra124_coretemp_identify), DEVMETHOD(device_probe, tegra124_coretemp_probe), DEVMETHOD(device_attach, tegra124_coretemp_attach), DEVMETHOD(device_detach, tegra124_coretemp_detach), DEVMETHOD_END }; static DEFINE_CLASS_0(tegra124_coretemp, tegra124_coretemp_driver, tegra124_coretemp_methods, sizeof(struct tegra124_coretemp_softc)); DRIVER_MODULE(tegra124_coretemp, cpu, tegra124_coretemp_driver, NULL, NULL); diff --git a/sys/arm/nvidia/tegra124/tegra124_cpufreq.c b/sys/arm/nvidia/tegra124/tegra124_cpufreq.c index f407d093455a..7cf83978557e 100644 --- a/sys/arm/nvidia/tegra124/tegra124_cpufreq.c +++ b/sys/arm/nvidia/tegra124/tegra124_cpufreq.c @@ -1,588 +1,588 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "cpufreq_if.h" #define XXX /* CPU voltage table entry */ struct speedo_entry { uint64_t freq; /* Frequency point */ int c0; /* Coeeficient values for */ int c1; /* quadratic equation: */ int c2; /* c2 * speedo^2 + c1 * speedo + c0 */ }; struct cpu_volt_def { int min_uvolt; /* Min allowed CPU voltage */ int max_uvolt; /* Max allowed CPU voltage */ int step_uvolt; /* Step of CPU voltage */ int speedo_scale; /* Scaling factor for cvt */ int speedo_nitems; /* Size of speedo table */ struct speedo_entry *speedo_tbl; /* CPU voltage table */ }; struct cpu_speed_point { uint64_t freq; /* Frequecy */ int uvolt; /* Requested voltage */ }; static struct speedo_entry tegra124_speedo_dpll_tbl[] = { { 204000000ULL, 1112619, -29295, 402}, { 306000000ULL, 1150460, -30585, 402}, { 408000000ULL, 1190122, -31865, 402}, { 510000000ULL, 1231606, -33155, 402}, { 612000000ULL, 1274912, -34435, 402}, { 714000000ULL, 1320040, -35725, 402}, { 816000000ULL, 1366990, -37005, 402}, { 918000000ULL, 1415762, -38295, 402}, {1020000000ULL, 1466355, -39575, 402}, {1122000000ULL, 1518771, -40865, 402}, {1224000000ULL, 1573009, -42145, 402}, {1326000000ULL, 1629068, -43435, 402}, {1428000000ULL, 1686950, -44715, 402}, {1530000000ULL, 1746653, -46005, 402}, {1632000000ULL, 1808179, -47285, 402}, {1734000000ULL, 1871526, -48575, 402}, {1836000000ULL, 1936696, -49855, 402}, {1938000000ULL, 2003687, -51145, 402}, {2014500000ULL, 2054787, -52095, 402}, {2116500000ULL, 2124957, -53385, 402}, {2218500000ULL, 2196950, -54665, 402}, {2320500000ULL, 2270765, -55955, 402}, {2320500000ULL, 2270765, -55955, 402}, {2422500000ULL, 2346401, -57235, 402}, {2524500000ULL, 2437299, -58535, 402}, }; static struct cpu_volt_def tegra124_cpu_volt_dpll_def = { .min_uvolt = 900000, /* 0.9 V */ .max_uvolt = 1260000, /* 1.26 */ .step_uvolt = 10000, /* 10 mV */ .speedo_scale = 100, .speedo_nitems = nitems(tegra124_speedo_dpll_tbl), .speedo_tbl = tegra124_speedo_dpll_tbl, }; static struct speedo_entry tegra124_speedo_pllx_tbl[] = { { 204000000ULL, 800000, 0, 0}, { 306000000ULL, 800000, 0, 0}, { 408000000ULL, 800000, 0, 0}, { 510000000ULL, 800000, 0, 0}, { 612000000ULL, 800000, 0, 0}, { 714000000ULL, 800000, 0, 0}, { 816000000ULL, 820000, 0, 0}, { 918000000ULL, 840000, 0, 0}, {1020000000ULL, 880000, 0, 0}, {1122000000ULL, 900000, 0, 0}, {1224000000ULL, 930000, 0, 0}, {1326000000ULL, 960000, 0, 0}, {1428000000ULL, 990000, 0, 0}, {1530000000ULL, 1020000, 0, 0}, {1632000000ULL, 1070000, 0, 0}, {1734000000ULL, 1100000, 0, 0}, {1836000000ULL, 1140000, 0, 0}, {1938000000ULL, 1180000, 0, 0}, {2014500000ULL, 1220000, 0, 0}, {2116500000ULL, 1260000, 0, 0}, {2218500000ULL, 1310000, 0, 0}, {2320500000ULL, 1360000, 0, 0}, {2397000000ULL, 1400000, 0, 0}, {2499000000ULL, 1400000, 0, 0}, }; static struct cpu_volt_def tegra124_cpu_volt_pllx_def = { .min_uvolt = 1000000, /* XXX 0.9 V doesn't work on all boards */ .max_uvolt = 1260000, /* 1.26 */ .step_uvolt = 10000, /* 10 mV */ .speedo_scale = 100, .speedo_nitems = nitems(tegra124_speedo_pllx_tbl), .speedo_tbl = tegra124_speedo_pllx_tbl, }; static uint64_t cpu_freq_tbl[] = { 204000000ULL, 306000000ULL, 408000000ULL, 510000000ULL, 612000000ULL, 714000000ULL, 816000000ULL, 918000000ULL, 1020000000ULL, 1122000000ULL, 1224000000ULL, 1326000000ULL, 1428000000ULL, 1530000000ULL, 1632000000ULL, 1734000000ULL, 1836000000ULL, 1938000000ULL, 2014000000ULL, 2116000000ULL, 2218000000ULL, 2320000000ULL, 2422000000ULL, 2524000000ULL, }; static uint64_t cpu_max_freq[] = { 2014500000ULL, 2320500000ULL, 2116500000ULL, 2524500000ULL, }; struct tegra124_cpufreq_softc { device_t dev; phandle_t node; regulator_t supply_vdd_cpu; clk_t clk_cpu_g; clk_t clk_cpu_lp; clk_t clk_pll_x; clk_t clk_pll_p; clk_t clk_dfll; int process_id; int speedo_id; int speedo_value; uint64_t cpu_max_freq; struct cpu_volt_def *cpu_def; struct cpu_speed_point *speed_points; int nspeed_points; struct cpu_speed_point *act_speed_point; int latency; }; static int cpufreq_lowest_freq = 1; TUNABLE_INT("hw.tegra124.cpufreq.lowest_freq", &cpufreq_lowest_freq); #define DIV_ROUND_CLOSEST(val, div) (((val) + ((div) / 2)) / (div)) #define ROUND_UP(val, div) roundup(val, div) #define ROUND_DOWN(val, div) rounddown(val, div) /* * Compute requesetd voltage for given frequency and SoC process variations, * - compute base voltage from speedo value using speedo table * - round up voltage to next regulator step * - clamp it to regulator limits */ static int freq_to_voltage(struct tegra124_cpufreq_softc *sc, uint64_t freq) { int uv, scale, min_uvolt, max_uvolt, step_uvolt; struct speedo_entry *ent; int i; /* Get speedo entry with higher frequency */ ent = NULL; for (i = 0; i < sc->cpu_def->speedo_nitems; i++) { if (sc->cpu_def->speedo_tbl[i].freq >= freq) { ent = &sc->cpu_def->speedo_tbl[i]; break; } } if (ent == NULL) ent = &sc->cpu_def->speedo_tbl[sc->cpu_def->speedo_nitems - 1]; scale = sc->cpu_def->speedo_scale; /* uV = (c2 * speedo / scale + c1) * speedo / scale + c0) */ uv = DIV_ROUND_CLOSEST(ent->c2 * sc->speedo_value, scale); uv = DIV_ROUND_CLOSEST((uv + ent->c1) * sc->speedo_value, scale) + ent->c0; step_uvolt = sc->cpu_def->step_uvolt; /* Round up it to next regulator step */ uv = ROUND_UP(uv, step_uvolt); /* Clamp result */ min_uvolt = ROUND_UP(sc->cpu_def->min_uvolt, step_uvolt); max_uvolt = ROUND_DOWN(sc->cpu_def->max_uvolt, step_uvolt); if (uv < min_uvolt) uv = min_uvolt; if (uv > max_uvolt) uv = max_uvolt; return (uv); } static void build_speed_points(struct tegra124_cpufreq_softc *sc) { int i; sc->nspeed_points = nitems(cpu_freq_tbl); sc->speed_points = malloc(sizeof(struct cpu_speed_point) * sc->nspeed_points, M_DEVBUF, M_NOWAIT); for (i = 0; i < sc->nspeed_points; i++) { sc->speed_points[i].freq = cpu_freq_tbl[i]; sc->speed_points[i].uvolt = freq_to_voltage(sc, cpu_freq_tbl[i]); } } static struct cpu_speed_point * get_speed_point(struct tegra124_cpufreq_softc *sc, uint64_t freq) { int i; if (sc->speed_points[0].freq >= freq) return (sc->speed_points + 0); for (i = 0; i < sc->nspeed_points - 1; i++) { if (sc->speed_points[i + 1].freq > freq) return (sc->speed_points + i); } return (sc->speed_points + sc->nspeed_points - 1); } static int tegra124_cpufreq_settings(device_t dev, struct cf_setting *sets, int *count) { struct tegra124_cpufreq_softc *sc; int i, j; if (sets == NULL || count == NULL) return (EINVAL); sc = device_get_softc(dev); memset(sets, CPUFREQ_VAL_UNKNOWN, sizeof(*sets) * (*count)); for (i = 0, j = sc->nspeed_points - 1; j >= 0; j--) { if (sc->cpu_max_freq < sc->speed_points[j].freq) continue; sets[i].freq = sc->speed_points[j].freq / 1000000; sets[i].volts = sc->speed_points[j].uvolt / 1000; sets[i].lat = sc->latency; sets[i].dev = dev; i++; } *count = i; return (0); } static int set_cpu_freq(struct tegra124_cpufreq_softc *sc, uint64_t freq) { struct cpu_speed_point *point; int rv; point = get_speed_point(sc, freq); if (sc->act_speed_point->uvolt < point->uvolt) { /* set cpu voltage */ rv = regulator_set_voltage(sc->supply_vdd_cpu, point->uvolt, point->uvolt); DELAY(10000); if (rv != 0) return (rv); } /* Switch supermux to PLLP first */ rv = clk_set_parent_by_clk(sc->clk_cpu_g, sc->clk_pll_p); if (rv != 0) { device_printf(sc->dev, "Can't set parent to PLLP\n"); return (rv); } /* Set PLLX frequency */ rv = clk_set_freq(sc->clk_pll_x, point->freq, CLK_SET_ROUND_DOWN); if (rv != 0) { device_printf(sc->dev, "Can't set CPU clock frequency\n"); return (rv); } rv = clk_set_parent_by_clk(sc->clk_cpu_g, sc->clk_pll_x); if (rv != 0) { device_printf(sc->dev, "Can't set parent to PLLX\n"); return (rv); } if (sc->act_speed_point->uvolt > point->uvolt) { /* set cpu voltage */ rv = regulator_set_voltage(sc->supply_vdd_cpu, point->uvolt, point->uvolt); if (rv != 0) return (rv); } sc->act_speed_point = point; return (0); } static int tegra124_cpufreq_set(device_t dev, const struct cf_setting *cf) { struct tegra124_cpufreq_softc *sc; uint64_t freq; int rv; if (cf == NULL || cf->freq < 0) return (EINVAL); sc = device_get_softc(dev); freq = cf->freq; if (freq < cpufreq_lowest_freq) freq = cpufreq_lowest_freq; freq *= 1000000; if (freq >= sc->cpu_max_freq) freq = sc->cpu_max_freq; rv = set_cpu_freq(sc, freq); return (rv); } static int tegra124_cpufreq_get(device_t dev, struct cf_setting *cf) { struct tegra124_cpufreq_softc *sc; if (cf == NULL) return (EINVAL); sc = device_get_softc(dev); memset(cf, CPUFREQ_VAL_UNKNOWN, sizeof(*cf)); cf->dev = NULL; cf->freq = sc->act_speed_point->freq / 1000000; cf->volts = sc->act_speed_point->uvolt / 1000; /* Transition latency in us. */ cf->lat = sc->latency; /* Driver providing this setting. */ cf->dev = dev; return (0); } static int tegra124_cpufreq_type(device_t dev, int *type) { if (type == NULL) return (EINVAL); *type = CPUFREQ_TYPE_ABSOLUTE; return (0); } static int get_fdt_resources(struct tegra124_cpufreq_softc *sc, phandle_t node) { int rv; device_t parent_dev; parent_dev = device_get_parent(sc->dev); rv = regulator_get_by_ofw_property(parent_dev, 0, "vdd-cpu-supply", &sc->supply_vdd_cpu); if (rv != 0) { device_printf(sc->dev, "Cannot get 'vdd-cpu' regulator\n"); return (rv); } rv = clk_get_by_ofw_name(parent_dev, 0, "cpu_g", &sc->clk_cpu_g); if (rv != 0) { device_printf(sc->dev, "Cannot get 'cpu_g' clock: %d\n", rv); return (ENXIO); } rv = clk_get_by_ofw_name(parent_dev, 0, "cpu_lp", &sc->clk_cpu_lp); if (rv != 0) { device_printf(sc->dev, "Cannot get 'cpu_lp' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(parent_dev, 0, "pll_x", &sc->clk_pll_x); if (rv != 0) { device_printf(sc->dev, "Cannot get 'pll_x' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(parent_dev, 0, "pll_p", &sc->clk_pll_p); if (rv != 0) { device_printf(parent_dev, "Cannot get 'pll_p' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(parent_dev, 0, "dfll", &sc->clk_dfll); if (rv != 0) { /* XXX DPLL is not implemented yet */ /* device_printf(sc->dev, "Cannot get 'dfll' clock\n"); return (ENXIO); */ } return (0); } static void tegra124_cpufreq_identify(driver_t *driver, device_t parent) { phandle_t root; root = OF_finddevice("/"); if (!ofw_bus_node_is_compatible(root, "nvidia,tegra124")) return; if (device_get_unit(parent) != 0) return; if (device_find_child(parent, "tegra124_cpufreq", -1) != NULL) return; if (BUS_ADD_CHILD(parent, 0, "tegra124_cpufreq", -1) == NULL) device_printf(parent, "add child failed\n"); } static int tegra124_cpufreq_probe(device_t dev) { device_set_desc(dev, "CPU Frequency Control"); return (0); } static int tegra124_cpufreq_attach(device_t dev) { struct tegra124_cpufreq_softc *sc; uint64_t freq; int rv; sc = device_get_softc(dev); sc->dev = dev; sc->node = ofw_bus_get_node(device_get_parent(dev)); sc->process_id = tegra_sku_info.cpu_process_id; sc->speedo_id = tegra_sku_info.cpu_speedo_id; sc->speedo_value = tegra_sku_info.cpu_speedo_value; /* Tegra 124 */ /* XXX DPLL is not implemented yet */ if (1) sc->cpu_def = &tegra124_cpu_volt_pllx_def; else sc->cpu_def = &tegra124_cpu_volt_dpll_def; rv = get_fdt_resources(sc, sc->node); if (rv != 0) { return (rv); } build_speed_points(sc); rv = clk_get_freq(sc->clk_cpu_g, &freq); if (rv != 0) { device_printf(dev, "Can't get CPU clock frequency\n"); return (rv); } if (sc->speedo_id < nitems(cpu_max_freq)) sc->cpu_max_freq = cpu_max_freq[sc->speedo_id]; else sc->cpu_max_freq = cpu_max_freq[0]; sc->act_speed_point = get_speed_point(sc, freq); /* Set safe startup CPU frequency. */ rv = set_cpu_freq(sc, 1632000000); if (rv != 0) { device_printf(dev, "Can't set initial CPU clock frequency\n"); return (rv); } /* This device is controlled by cpufreq(4). */ cpufreq_register(dev); return (0); } static int tegra124_cpufreq_detach(device_t dev) { struct tegra124_cpufreq_softc *sc; sc = device_get_softc(dev); cpufreq_unregister(dev); if (sc->supply_vdd_cpu != NULL) regulator_release(sc->supply_vdd_cpu); if (sc->clk_cpu_g != NULL) clk_release(sc->clk_cpu_g); if (sc->clk_cpu_lp != NULL) clk_release(sc->clk_cpu_lp); if (sc->clk_pll_x != NULL) clk_release(sc->clk_pll_x); if (sc->clk_pll_p != NULL) clk_release(sc->clk_pll_p); if (sc->clk_dfll != NULL) clk_release(sc->clk_dfll); return (0); } static device_method_t tegra124_cpufreq_methods[] = { /* Device interface */ DEVMETHOD(device_identify, tegra124_cpufreq_identify), DEVMETHOD(device_probe, tegra124_cpufreq_probe), DEVMETHOD(device_attach, tegra124_cpufreq_attach), DEVMETHOD(device_detach, tegra124_cpufreq_detach), /* cpufreq interface */ DEVMETHOD(cpufreq_drv_set, tegra124_cpufreq_set), DEVMETHOD(cpufreq_drv_get, tegra124_cpufreq_get), DEVMETHOD(cpufreq_drv_settings, tegra124_cpufreq_settings), DEVMETHOD(cpufreq_drv_type, tegra124_cpufreq_type), DEVMETHOD_END }; static DEFINE_CLASS_0(tegra124_cpufreq, tegra124_cpufreq_driver, tegra124_cpufreq_methods, sizeof(struct tegra124_cpufreq_softc)); DRIVER_MODULE(tegra124_cpufreq, cpu, tegra124_cpufreq_driver, NULL, NULL); diff --git a/sys/arm/nvidia/tegra124/tegra124_pmc.c b/sys/arm/nvidia/tegra124/tegra124_pmc.c index 1742d624384b..6b42659d453a 100644 --- a/sys/arm/nvidia/tegra124/tegra124_pmc.c +++ b/sys/arm/nvidia/tegra124/tegra124_pmc.c @@ -1,559 +1,559 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #define PMC_CNTRL 0x000 #define PMC_CNTRL_CPUPWRGOOD_SEL_MASK (0x3 << 20) #define PMC_CNTRL_CPUPWRGOOD_SEL_SHIFT 20 #define PMC_CNTRL_CPUPWRGOOD_EN (1 << 19) #define PMC_CNTRL_FUSE_OVERRIDE (1 << 18) #define PMC_CNTRL_INTR_POLARITY (1 << 17) #define PMC_CNTRL_CPU_PWRREQ_OE (1 << 16) #define PMC_CNTRL_CPU_PWRREQ_POLARITY (1 << 15) #define PMC_CNTRL_SIDE_EFFECT_LP0 (1 << 14) #define PMC_CNTRL_AOINIT (1 << 13) #define PMC_CNTRL_PWRGATE_DIS (1 << 12) #define PMC_CNTRL_SYSCLK_OE (1 << 11) #define PMC_CNTRL_SYSCLK_POLARITY (1 << 10) #define PMC_CNTRL_PWRREQ_OE (1 << 9) #define PMC_CNTRL_PWRREQ_POLARITY (1 << 8) #define PMC_CNTRL_BLINK_EN (1 << 7) #define PMC_CNTRL_GLITCHDET_DIS (1 << 6) #define PMC_CNTRL_LATCHWAKE_EN (1 << 5) #define PMC_CNTRL_MAIN_RST (1 << 4) #define PMC_CNTRL_KBC_RST (1 << 3) #define PMC_CNTRL_RTC_RST (1 << 2) #define PMC_CNTRL_RTC_CLK_DIS (1 << 1) #define PMC_CNTRL_KBC_CLK_DIS (1 << 0) #define PMC_DPD_SAMPLE 0x020 #define PMC_CLAMP_STATUS 0x02C #define PMC_CLAMP_STATUS_PARTID(x) (1 << ((x) & 0x1F)) #define PMC_PWRGATE_TOGGLE 0x030 #define PMC_PWRGATE_TOGGLE_START (1 << 8) #define PMC_PWRGATE_TOGGLE_PARTID(x) (((x) & 0x1F) << 0) #define PMC_REMOVE_CLAMPING_CMD 0x034 #define PMC_REMOVE_CLAMPING_CMD_PARTID(x) (1 << ((x) & 0x1F)) #define PMC_PWRGATE_STATUS 0x038 #define PMC_PWRGATE_STATUS_PARTID(x) (1 << ((x) & 0x1F)) #define PMC_SCRATCH0 0x050 #define PMC_SCRATCH0_MODE_RECOVERY (1 << 31) #define PMC_SCRATCH0_MODE_BOOTLOADER (1 << 30) #define PMC_SCRATCH0_MODE_RCM (1 << 1) #define PMC_SCRATCH0_MODE_MASK (PMC_SCRATCH0_MODE_RECOVERY | \ PMC_SCRATCH0_MODE_BOOTLOADER | \ PMC_SCRATCH0_MODE_RCM) #define PMC_CPUPWRGOOD_TIMER 0x0c8 #define PMC_CPUPWROFF_TIMER 0x0cc #define PMC_SCRATCH41 0x140 #define PMC_SENSOR_CTRL 0x1b0 #define PMC_SENSOR_CTRL_BLOCK_SCRATCH_WRITE (1 << 2) #define PMC_SENSOR_CTRL_ENABLE_RST (1 << 1) #define PMC_SENSOR_CTRL_ENABLE_PG (1 << 0) #define PMC_IO_DPD_REQ 0x1b8 #define PMC_IO_DPD_REQ_CODE_IDLE (0 << 30) #define PMC_IO_DPD_REQ_CODE_OFF (1 << 30) #define PMC_IO_DPD_REQ_CODE_ON (2 << 30) #define PMC_IO_DPD_REQ_CODE_MASK (3 << 30) #define PMC_IO_DPD_STATUS 0x1bc #define PMC_IO_DPD_STATUS_HDMI (1 << 28) #define PMC_IO_DPD2_REQ 0x1c0 #define PMC_IO_DPD2_STATUS 0x1c4 #define PMC_IO_DPD2_STATUS_HV (1 << 6) #define PMC_SEL_DPD_TIM 0x1c8 #define PMC_SCRATCH54 0x258 #define PMC_SCRATCH54_DATA_SHIFT 8 #define PMC_SCRATCH54_ADDR_SHIFT 0 #define PMC_SCRATCH55 0x25c #define PMC_SCRATCH55_RST_ENABLE (1 << 31) #define PMC_SCRATCH55_CNTRL_TYPE (1 << 30) #define PMC_SCRATCH55_CNTRL_ID_SHIFT 27 #define PMC_SCRATCH55_CNTRL_ID_MASK 0x07 #define PMC_SCRATCH55_PINMUX_SHIFT 24 #define PMC_SCRATCH55_PINMUX_MASK 0x07 #define PMC_SCRATCH55_CHECKSUM_SHIFT 16 #define PMC_SCRATCH55_CHECKSUM_MASK 0xFF #define PMC_SCRATCH55_16BITOP (1 << 15) #define PMC_SCRATCH55_I2CSLV1_SHIFT 0 #define PMC_SCRATCH55_I2CSLV1_MASK 0x7F #define PMC_GPU_RG_CNTRL 0x2d4 #define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, (_r)) #define PMC_LOCK(_sc) mtx_lock(&(_sc)->mtx) #define PMC_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define PMC_LOCK_INIT(_sc) mtx_init(&(_sc)->mtx, \ device_get_nameunit(_sc->dev), "tegra124_pmc", MTX_DEF) #define PMC_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx); #define PMC_ASSERT_LOCKED(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED); #define PMC_ASSERT_UNLOCKED(_sc) mtx_assert(&(_sc)->mtx, MA_NOTOWNED); struct tegra124_pmc_softc { device_t dev; struct resource *mem_res; clk_t clk; struct mtx mtx; uint32_t rate; enum tegra_suspend_mode suspend_mode; uint32_t cpu_good_time; uint32_t cpu_off_time; uint32_t core_osc_time; uint32_t core_pmu_time; uint32_t core_off_time; int corereq_high; int sysclkreq_high; int combined_req; int cpu_pwr_good_en; uint32_t lp0_vec_phys; uint32_t lp0_vec_size; }; static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-pmc", 1}, {NULL, 0}, }; static struct tegra124_pmc_softc *pmc_sc; static inline struct tegra124_pmc_softc * tegra124_pmc_get_sc(void) { if (pmc_sc == NULL) panic("To early call to Tegra PMC driver.\n"); return (pmc_sc); } static int tegra124_pmc_set_powergate(struct tegra124_pmc_softc *sc, enum tegra_powergate_id id, int ena) { uint32_t reg; int i; PMC_LOCK(sc); reg = RD4(sc, PMC_PWRGATE_STATUS) & PMC_PWRGATE_STATUS_PARTID(id); if (((reg != 0) && ena) || ((reg == 0) && !ena)) { PMC_UNLOCK(sc); return (0); } for (i = 100; i > 0; i--) { reg = RD4(sc, PMC_PWRGATE_TOGGLE); if ((reg & PMC_PWRGATE_TOGGLE_START) == 0) break; DELAY(1); } if (i <= 0) device_printf(sc->dev, "Timeout when waiting for TOGGLE_START\n"); WR4(sc, PMC_PWRGATE_TOGGLE, PMC_PWRGATE_TOGGLE_START | PMC_PWRGATE_TOGGLE_PARTID(id)); for (i = 100; i > 0; i--) { reg = RD4(sc, PMC_PWRGATE_TOGGLE); if ((reg & PMC_PWRGATE_TOGGLE_START) == 0) break; DELAY(1); } if (i <= 0) device_printf(sc->dev, "Timeout when waiting for TOGGLE_START\n"); PMC_UNLOCK(sc); return (0); } int tegra_powergate_remove_clamping(enum tegra_powergate_id id) { struct tegra124_pmc_softc *sc; uint32_t reg; enum tegra_powergate_id swid; int i; sc = tegra124_pmc_get_sc(); if (id == TEGRA_POWERGATE_3D) { WR4(sc, PMC_GPU_RG_CNTRL, 0); return (0); } reg = RD4(sc, PMC_PWRGATE_STATUS); if ((reg & PMC_PWRGATE_STATUS_PARTID(id)) == 0) panic("Attempt to remove clamping for unpowered partition.\n"); if (id == TEGRA_POWERGATE_PCX) swid = TEGRA_POWERGATE_VDE; else if (id == TEGRA_POWERGATE_VDE) swid = TEGRA_POWERGATE_PCX; else swid = id; WR4(sc, PMC_REMOVE_CLAMPING_CMD, PMC_REMOVE_CLAMPING_CMD_PARTID(swid)); for (i = 100; i > 0; i--) { reg = RD4(sc, PMC_REMOVE_CLAMPING_CMD); if ((reg & PMC_REMOVE_CLAMPING_CMD_PARTID(swid)) == 0) break; DELAY(1); } if (i <= 0) device_printf(sc->dev, "Timeout when remove clamping\n"); reg = RD4(sc, PMC_CLAMP_STATUS); if ((reg & PMC_CLAMP_STATUS_PARTID(id)) != 0) panic("Cannot remove clamping\n"); return (0); } int tegra_powergate_is_powered(enum tegra_powergate_id id) { struct tegra124_pmc_softc *sc; uint32_t reg; sc = tegra124_pmc_get_sc(); reg = RD4(sc, PMC_PWRGATE_STATUS); return ((reg & PMC_PWRGATE_STATUS_PARTID(id)) ? 1 : 0); } int tegra_powergate_power_on(enum tegra_powergate_id id) { struct tegra124_pmc_softc *sc; int rv, i; sc = tegra124_pmc_get_sc(); rv = tegra124_pmc_set_powergate(sc, id, 1); if (rv != 0) { device_printf(sc->dev, "Cannot set powergate: %d\n", id); return (rv); } for (i = 100; i > 0; i--) { if (tegra_powergate_is_powered(id)) break; DELAY(1); } if (i <= 0) device_printf(sc->dev, "Timeout when waiting on power up\n"); return (rv); } int tegra_powergate_power_off(enum tegra_powergate_id id) { struct tegra124_pmc_softc *sc; int rv, i; sc = tegra124_pmc_get_sc(); rv = tegra124_pmc_set_powergate(sc, id, 0); if (rv != 0) { device_printf(sc->dev, "Cannot set powergate: %d\n", id); return (rv); } for (i = 100; i > 0; i--) { if (!tegra_powergate_is_powered(id)) break; DELAY(1); } if (i <= 0) device_printf(sc->dev, "Timeout when waiting on power off\n"); return (rv); } int tegra_powergate_sequence_power_up(enum tegra_powergate_id id, clk_t clk, hwreset_t rst) { struct tegra124_pmc_softc *sc; int rv; sc = tegra124_pmc_get_sc(); rv = hwreset_assert(rst); if (rv != 0) { device_printf(sc->dev, "Cannot assert reset\n"); return (rv); } rv = clk_stop(clk); if (rv != 0) { device_printf(sc->dev, "Cannot stop clock\n"); goto clk_fail; } rv = tegra_powergate_power_on(id); if (rv != 0) { device_printf(sc->dev, "Cannot power on powergate\n"); goto clk_fail; } rv = clk_enable(clk); if (rv != 0) { device_printf(sc->dev, "Cannot enable clock\n"); goto clk_fail; } DELAY(20); rv = tegra_powergate_remove_clamping(id); if (rv != 0) { device_printf(sc->dev, "Cannot remove clamping\n"); goto fail; } rv = hwreset_deassert(rst); if (rv != 0) { device_printf(sc->dev, "Cannot unreset reset\n"); goto fail; } return 0; fail: clk_disable(clk); clk_fail: hwreset_assert(rst); tegra_powergate_power_off(id); return (rv); } static int tegra124_pmc_parse_fdt(struct tegra124_pmc_softc *sc, phandle_t node) { int rv; uint32_t tmp; uint32_t tmparr[2]; rv = OF_getencprop(node, "nvidia,suspend-mode", &tmp, sizeof(tmp)); if (rv > 0) { switch (tmp) { case 0: sc->suspend_mode = TEGRA_SUSPEND_LP0; break; case 1: sc->suspend_mode = TEGRA_SUSPEND_LP1; break; case 2: sc->suspend_mode = TEGRA_SUSPEND_LP2; break; default: sc->suspend_mode = TEGRA_SUSPEND_NONE; break; } } rv = OF_getencprop(node, "nvidia,cpu-pwr-good-time", &tmp, sizeof(tmp)); if (rv > 0) { sc->cpu_good_time = tmp; sc->suspend_mode = TEGRA_SUSPEND_NONE; } rv = OF_getencprop(node, "nvidia,cpu-pwr-off-time", &tmp, sizeof(tmp)); if (rv > 0) { sc->cpu_off_time = tmp; sc->suspend_mode = TEGRA_SUSPEND_NONE; } rv = OF_getencprop(node, "nvidia,core-pwr-good-time", tmparr, sizeof(tmparr)); if (rv == sizeof(tmparr)) { sc->core_osc_time = tmparr[0]; sc->core_pmu_time = tmparr[1]; sc->suspend_mode = TEGRA_SUSPEND_NONE; } rv = OF_getencprop(node, "nvidia,core-pwr-off-time", &tmp, sizeof(tmp)); if (rv > 0) { sc->core_off_time = tmp; sc->suspend_mode = TEGRA_SUSPEND_NONE; } sc->corereq_high = OF_hasprop(node, "nvidia,core-power-req-active-high"); sc->sysclkreq_high = OF_hasprop(node, "nvidia,sys-clock-req-active-high"); sc->combined_req = OF_hasprop(node, "nvidia,combined-power-req"); sc->cpu_pwr_good_en = OF_hasprop(node, "nvidia,cpu-pwr-good-en"); rv = OF_getencprop(node, "nvidia,lp0-vec", tmparr, sizeof(tmparr)); if (rv == sizeof(tmparr)) { sc->lp0_vec_phys = tmparr[0]; sc->core_pmu_time = tmparr[1]; sc->lp0_vec_size = TEGRA_SUSPEND_NONE; if (sc->suspend_mode == TEGRA_SUSPEND_LP0) sc->suspend_mode = TEGRA_SUSPEND_LP1; } return 0; } static int tegra124_pmc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Tegra PMC"); return (BUS_PROBE_DEFAULT); } static int tegra124_pmc_detach(device_t dev) { /* This device is always present. */ return (EBUSY); } static int tegra124_pmc_attach(device_t dev) { struct tegra124_pmc_softc *sc; int rid, rv; uint32_t reg; phandle_t node; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); rv = tegra124_pmc_parse_fdt(sc, node); if (rv != 0) { device_printf(sc->dev, "Cannot parse FDT data\n"); return (rv); } rv = clk_get_by_ofw_name(sc->dev, 0, "pclk", &sc->clk); if (rv != 0) { device_printf(sc->dev, "Cannot get \"pclk\" clock\n"); return (ENXIO); } rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot allocate memory resources\n"); return (ENXIO); } PMC_LOCK_INIT(sc); /* Enable CPU power request. */ reg = RD4(sc, PMC_CNTRL); reg |= PMC_CNTRL_CPU_PWRREQ_OE; WR4(sc, PMC_CNTRL, reg); /* Set sysclk output polarity */ reg = RD4(sc, PMC_CNTRL); if (sc->sysclkreq_high) reg &= ~PMC_CNTRL_SYSCLK_POLARITY; else reg |= PMC_CNTRL_SYSCLK_POLARITY; WR4(sc, PMC_CNTRL, reg); /* Enable sysclk request. */ reg = RD4(sc, PMC_CNTRL); reg |= PMC_CNTRL_SYSCLK_OE; WR4(sc, PMC_CNTRL, reg); /* * Remove HDMI from deep power down mode. * XXX mote this to HDMI driver */ reg = RD4(sc, PMC_IO_DPD_STATUS); reg &= ~ PMC_IO_DPD_STATUS_HDMI; WR4(sc, PMC_IO_DPD_STATUS, reg); reg = RD4(sc, PMC_IO_DPD2_STATUS); reg &= ~ PMC_IO_DPD2_STATUS_HV; WR4(sc, PMC_IO_DPD2_STATUS, reg); if (pmc_sc != NULL) panic("tegra124_pmc: double driver attach"); pmc_sc = sc; return (0); } static device_method_t tegra124_pmc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra124_pmc_probe), DEVMETHOD(device_attach, tegra124_pmc_attach), DEVMETHOD(device_detach, tegra124_pmc_detach), DEVMETHOD_END }; static DEFINE_CLASS_0(pmc, tegra124_pmc_driver, tegra124_pmc_methods, sizeof(struct tegra124_pmc_softc)); EARLY_DRIVER_MODULE(tegra124_pmc, simplebus, tegra124_pmc_driver, NULL, NULL, 70); diff --git a/sys/arm/nvidia/tegra_ahci.c b/sys/arm/nvidia/tegra_ahci.c index 4f44a09b4a76..eeb49d6ea5a8 100644 --- a/sys/arm/nvidia/tegra_ahci.c +++ b/sys/arm/nvidia/tegra_ahci.c @@ -1,781 +1,781 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * AHCI driver for Tegra SoCs. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #define SATA_CONFIGURATION 0x180 #define SATA_CONFIGURATION_CLK_OVERRIDE (1U << 31) #define SATA_CONFIGURATION_EN_FPCI (1 << 0) #define SATA_FPCI_BAR5 0x94 #define SATA_FPCI_BAR_START(x) (((x) & 0xFFFFFFF) << 4) #define SATA_FPCI_BAR_ACCESS_TYPE (1 << 0) #define SATA_INTR_MASK 0x188 #define SATA_INTR_MASK_IP_INT_MASK (1 << 16) #define SCFG_OFFSET 0x1000 #define T_SATA0_CFG_1 0x04 #define T_SATA0_CFG_1_IO_SPACE (1 << 0) #define T_SATA0_CFG_1_MEMORY_SPACE (1 << 1) #define T_SATA0_CFG_1_BUS_MASTER (1 << 2) #define T_SATA0_CFG_1_SERR (1 << 8) #define T_SATA0_CFG_9 0x24 #define T_SATA0_CFG_9_BASE_ADDRESS_SHIFT 13 #define T_SATA0_CFG_35 0x94 #define T_SATA0_CFG_35_IDP_INDEX_MASK (0x7ff << 2) #define T_SATA0_CFG_35_IDP_INDEX (0x2a << 2) #define T_SATA0_AHCI_IDP1 0x98 #define T_SATA0_AHCI_IDP1_DATA 0x400040 #define T_SATA0_CFG_PHY_1 0x12c #define T_SATA0_CFG_PHY_1_PADS_IDDQ_EN (1 << 23) #define T_SATA0_CFG_PHY_1_PAD_PLL_IDDQ_EN (1 << 22) #define T_SATA0_NVOOB 0x114 #define T_SATA0_NVOOB_SQUELCH_FILTER_LENGTH_MASK (0x3 << 26) #define T_SATA0_NVOOB_SQUELCH_FILTER_LENGTH (0x3 << 26) #define T_SATA0_NVOOB_SQUELCH_FILTER_MODE_MASK (0x3 << 24) #define T_SATA0_NVOOB_SQUELCH_FILTER_MODE (0x1 << 24) #define T_SATA0_NVOOB_COMMA_CNT_MASK (0xff << 16) #define T_SATA0_NVOOB_COMMA_CNT (0x07 << 16) #define T_SATA0_CFG_PHY 0x120 #define T_SATA0_CFG_PHY_MASK_SQUELCH (1 << 24) #define T_SATA0_CFG_PHY_USE_7BIT_ALIGN_DET_FOR_SPD (1 << 11) #define T_SATA0_CFG2NVOOB_2 0x134 #define T_SATA0_CFG2NVOOB_2_COMWAKE_IDLE_CNT_LOW_MASK (0x1ff << 18) #define T_SATA0_CFG2NVOOB_2_COMWAKE_IDLE_CNT_LOW (0xc << 18) #define T_SATA0_AHCI_HBA_CAP_BKDR 0x300 #define T_SATA0_AHCI_HBA_CAP_BKDR_SNCQ (1 << 30) #define T_SATA0_AHCI_HBA_CAP_BKDR_SUPP_PM (1 << 17) #define T_SATA0_AHCI_HBA_CAP_BKDR_SALP (1 << 26) #define T_SATA0_AHCI_HBA_CAP_BKDR_SLUMBER_ST_CAP (1 << 14) #define T_SATA0_AHCI_HBA_CAP_BKDR_PARTIAL_ST_CAP (1 << 13) #define T_SATA0_BKDOOR_CC 0x4a4 #define T_SATA0_BKDOOR_CC_CLASS_CODE_MASK (0xffff << 16) #define T_SATA0_BKDOOR_CC_CLASS_CODE (0x0106 << 16) #define T_SATA0_BKDOOR_CC_PROG_IF_MASK (0xff << 8) #define T_SATA0_BKDOOR_CC_PROG_IF (0x01 << 8) #define T_SATA0_CFG_SATA 0x54c #define T_SATA0_CFG_SATA_BACKDOOR_PROG_IF_EN (1 << 12) #define T_SATA0_CFG_MISC 0x550 #define T_SATA0_INDEX 0x680 #define T_SATA0_CHX_PHY_CTRL1_GEN1 0x690 #define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_MASK 0xff #define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_SHIFT 8 #define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_MASK 0xff #define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_SHIFT 0 #define T_SATA0_CHX_PHY_CTRL1_GEN2 0x694 #define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_MASK 0xff #define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_SHIFT 12 #define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_MASK 0xff #define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_SHIFT 0 #define T_SATA0_CHX_PHY_CTRL2 0x69c #define T_SATA0_CHX_PHY_CTRL2_CDR_CNTL_GEN1 0x23 #define T_SATA0_CHX_PHY_CTRL11 0x6d0 #define T_SATA0_CHX_PHY_CTRL11_GEN2_RX_EQ (0x2800 << 16) #define T_SATA0_CHX_PHY_CTRL17 0x6e8 #define T_SATA0_CHX_PHY_CTRL18 0x6ec #define T_SATA0_CHX_PHY_CTRL20 0x6f4 #define T_SATA0_CHX_PHY_CTRL21 0x6f8 #define FUSE_SATA_CALIB 0x124 #define FUSE_SATA_CALIB_MASK 0x3 #define SATA_AUX_MISC_CNTL 0x1108 #define SATA_AUX_PAD_PLL_CTRL_0 0x1120 #define SATA_AUX_PAD_PLL_CTRL_1 0x1124 #define SATA_AUX_PAD_PLL_CTRL_2 0x1128 #define SATA_AUX_PAD_PLL_CTRL_3 0x112c #define T_AHCI_HBA_CCC_PORTS 0x0018 #define T_AHCI_HBA_CAP_BKDR 0x00A0 #define T_AHCI_HBA_CAP_BKDR_S64A (1 << 31) #define T_AHCI_HBA_CAP_BKDR_SNCQ (1 << 30) #define T_AHCI_HBA_CAP_BKDR_SSNTF (1 << 29) #define T_AHCI_HBA_CAP_BKDR_SMPS (1 << 28) #define T_AHCI_HBA_CAP_BKDR_SUPP_STG_SPUP (1 << 27) #define T_AHCI_HBA_CAP_BKDR_SALP (1 << 26) #define T_AHCI_HBA_CAP_BKDR_SAL (1 << 25) #define T_AHCI_HBA_CAP_BKDR_SUPP_CLO (1 << 24) #define T_AHCI_HBA_CAP_BKDR_INTF_SPD_SUPP(x) (((x) & 0xF) << 20) #define T_AHCI_HBA_CAP_BKDR_SUPP_NONZERO_OFFSET (1 << 19) #define T_AHCI_HBA_CAP_BKDR_SUPP_AHCI_ONLY (1 << 18) #define T_AHCI_HBA_CAP_BKDR_SUPP_PM (1 << 17) #define T_AHCI_HBA_CAP_BKDR_FIS_SWITCHING (1 << 16) #define T_AHCI_HBA_CAP_BKDR_PIO_MULT_DRQ_BLK (1 << 15) #define T_AHCI_HBA_CAP_BKDR_SLUMBER_ST_CAP (1 << 14) #define T_AHCI_HBA_CAP_BKDR_PARTIAL_ST_CAP (1 << 13) #define T_AHCI_HBA_CAP_BKDR_NUM_CMD_SLOTS(x) (((x) & 0x1F) << 8) #define T_AHCI_HBA_CAP_BKDR_CMD_CMPL_COALESING (1 << 7) #define T_AHCI_HBA_CAP_BKDR_ENCL_MGMT_SUPP (1 << 6) #define T_AHCI_HBA_CAP_BKDR_EXT_SATA (1 << 5) #define T_AHCI_HBA_CAP_BKDR_NUM_PORTS(x) (((x) & 0xF) << 0) #define T_AHCI_PORT_BKDR 0x0170 #define T_AHCI_PORT_BKDR_PXDEVSLP_DETO_OVERRIDE_VAL(x) (((x) & 0xFF) << 24) #define T_AHCI_PORT_BKDR_PXDEVSLP_MDAT_OVERRIDE_VAL(x) (((x) & 0x1F) << 16) #define T_AHCI_PORT_BKDR_PXDEVSLP_DETO_OVERRIDE (1 << 15) #define T_AHCI_PORT_BKDR_PXDEVSLP_MDAT_OVERRIDE (1 << 14) #define T_AHCI_PORT_BKDR_PXDEVSLP_DM(x) (((x) & 0xF) << 10) #define T_AHCI_PORT_BKDR_PORT_UNCONNECTED (1 << 9) #define T_AHCI_PORT_BKDR_CLK_CLAMP_CTRL_CLAMP_THIS_CH (1 << 8) #define T_AHCI_PORT_BKDR_CLK_CLAMP_CTRL_TXRXCLK_UNCLAMP (1 << 7) #define T_AHCI_PORT_BKDR_CLK_CLAMP_CTRL_TXRXCLK_CLAMP (1 << 6) #define T_AHCI_PORT_BKDR_CLK_CLAMP_CTRL_DEVCLK_UNCLAMP (1 << 5) #define T_AHCI_PORT_BKDR_CLK_CLAMP_CTRL_DEVCLK_CLAMP (1 << 4) #define T_AHCI_PORT_BKDR_HOTPLUG_CAP (1 << 3) #define T_AHCI_PORT_BKDR_MECH_SWITCH (1 << 2) #define T_AHCI_PORT_BKDR_COLD_PRSN_DET (1 << 1) #define T_AHCI_PORT_BKDR_EXT_SATA_SUPP (1 << 0) /* AUX registers */ #define SATA_AUX_MISC_CNTL_1 0x008 #define SATA_AUX_MISC_CNTL_1_DEVSLP_OVERRIDE (1 << 17) #define SATA_AUX_MISC_CNTL_1_SDS_SUPPORT (1 << 13) #define SATA_AUX_MISC_CNTL_1_DESO_SUPPORT (1 << 15) #define AHCI_WR4(_sc, _r, _v) bus_write_4((_sc)->ctlr.r_mem, (_r), (_v)) #define AHCI_RD4(_sc, _r) bus_read_4((_sc)->ctlr.r_mem, (_r)) #define SATA_WR4(_sc, _r, _v) bus_write_4((_sc)->sata_mem, (_r), (_v)) #define SATA_RD4(_sc, _r) bus_read_4((_sc)->sata_mem, (_r)) struct sata_pad_calibration { uint32_t gen1_tx_amp; uint32_t gen1_tx_peak; uint32_t gen2_tx_amp; uint32_t gen2_tx_peak; }; static const struct sata_pad_calibration tegra124_pad_calibration[] = { {0x18, 0x04, 0x18, 0x0a}, {0x0e, 0x04, 0x14, 0x0a}, {0x0e, 0x07, 0x1a, 0x0e}, {0x14, 0x0e, 0x1a, 0x0e}, }; struct ahci_soc; struct tegra_ahci_sc { struct ahci_controller ctlr; /* Must be first */ device_t dev; struct ahci_soc *soc; struct resource *sata_mem; struct resource *aux_mem; clk_t clk_sata; clk_t clk_sata_oob; clk_t clk_pll_e; clk_t clk_cml; hwreset_t hwreset_sata; hwreset_t hwreset_sata_oob; hwreset_t hwreset_sata_cold; regulator_t regulators[16]; /* Safe maximum */ phy_t phy; }; struct ahci_soc { char **regulator_names; int (*init)(struct tegra_ahci_sc *sc); }; /* Tegra 124 config. */ static char *tegra124_reg_names[] = { "hvdd-supply", "vddio-supply", "avdd-supply", "target-5v-supply", "target-12v-supply", NULL }; static int tegra124_ahci_init(struct tegra_ahci_sc *sc); static struct ahci_soc tegra124_soc = { .regulator_names = tegra124_reg_names, .init = tegra124_ahci_init, }; /* Tegra 210 config. */ static char *tegra210_reg_names[] = { NULL }; static struct ahci_soc tegra210_soc = { .regulator_names = tegra210_reg_names, }; static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-ahci", (uintptr_t)&tegra124_soc}, {"nvidia,tegra210-ahci", (uintptr_t)&tegra210_soc}, {NULL, 0} }; static int get_fdt_resources(struct tegra_ahci_sc *sc, phandle_t node) { int i, rv; /* Regulators. */ for (i = 0; sc->soc->regulator_names[i] != NULL; i++) { if (i >= nitems(sc->regulators)) { device_printf(sc->dev, "Too many regulators present in DT.\n"); return (EOVERFLOW); } rv = regulator_get_by_ofw_property(sc->dev, 0, sc->soc->regulator_names[i], sc->regulators + i); if (rv != 0) { device_printf(sc->dev, "Cannot get '%s' regulator\n", sc->soc->regulator_names[i]); return (ENXIO); } } /* Resets. */ rv = hwreset_get_by_ofw_name(sc->dev, 0, "sata", &sc->hwreset_sata ); if (rv != 0) { device_printf(sc->dev, "Cannot get 'sata' reset\n"); return (ENXIO); } rv = hwreset_get_by_ofw_name(sc->dev, 0, "sata-oob", &sc->hwreset_sata_oob); if (rv != 0) { device_printf(sc->dev, "Cannot get 'sata oob' reset\n"); return (ENXIO); } rv = hwreset_get_by_ofw_name(sc->dev, 0, "sata-cold", &sc->hwreset_sata_cold); if (rv != 0) { device_printf(sc->dev, "Cannot get 'sata cold' reset\n"); return (ENXIO); } /* Phy */ rv = phy_get_by_ofw_name(sc->dev, 0, "sata-0", &sc->phy); if (rv != 0) { rv = phy_get_by_ofw_idx(sc->dev, 0, 0, &sc->phy); if (rv != 0) { device_printf(sc->dev, "Cannot get 'sata' phy\n"); return (ENXIO); } } /* Clocks. */ rv = clk_get_by_ofw_name(sc->dev, 0, "sata", &sc->clk_sata); if (rv != 0) { device_printf(sc->dev, "Cannot get 'sata' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "sata-oob", &sc->clk_sata_oob); if (rv != 0) { device_printf(sc->dev, "Cannot get 'sata oob' clock\n"); return (ENXIO); } /* These are optional */ rv = clk_get_by_ofw_name(sc->dev, 0, "cml1", &sc->clk_cml); if (rv != 0) sc->clk_cml = NULL; rv = clk_get_by_ofw_name(sc->dev, 0, "pll_e", &sc->clk_pll_e); if (rv != 0) sc->clk_pll_e = NULL; return (0); } static int enable_fdt_resources(struct tegra_ahci_sc *sc) { int i, rv; /* Enable regulators. */ for (i = 0; i < nitems(sc->regulators); i++) { if (sc->regulators[i] == NULL) continue; rv = regulator_enable(sc->regulators[i]); if (rv != 0) { device_printf(sc->dev, "Cannot enable '%s' regulator\n", sc->soc->regulator_names[i]); return (rv); } } /* Stop clocks */ clk_stop(sc->clk_sata); clk_stop(sc->clk_sata_oob); tegra_powergate_power_off(TEGRA_POWERGATE_SAX); rv = hwreset_assert(sc->hwreset_sata); if (rv != 0) { device_printf(sc->dev, "Cannot assert 'sata' reset\n"); return (rv); } rv = hwreset_assert(sc->hwreset_sata_oob); if (rv != 0) { device_printf(sc->dev, "Cannot assert 'sata oob' reset\n"); return (rv); } rv = hwreset_assert(sc->hwreset_sata_cold); if (rv != 0) { device_printf(sc->dev, "Cannot assert 'sata cold' reset\n"); return (rv); } rv = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_SAX, sc->clk_sata, sc->hwreset_sata); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'SAX' powergate\n"); return (rv); } rv = clk_enable(sc->clk_sata_oob); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'sata oob' clock\n"); return (rv); } if (sc->clk_cml != NULL) { rv = clk_enable(sc->clk_cml); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'cml' clock\n"); return (rv); } } if (sc->clk_pll_e != NULL) { rv = clk_enable(sc->clk_pll_e); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'pll e' clock\n"); return (rv); } } rv = hwreset_deassert(sc->hwreset_sata_cold); if (rv != 0) { device_printf(sc->dev, "Cannot unreset 'sata cold' reset\n"); return (rv); } rv = hwreset_deassert(sc->hwreset_sata_oob); if (rv != 0) { device_printf(sc->dev, "Cannot unreset 'sata oob' reset\n"); return (rv); } rv = phy_enable(sc->phy); if (rv != 0) { device_printf(sc->dev, "Cannot enable SATA phy\n"); return (rv); } return (0); } static int tegra124_ahci_init(struct tegra_ahci_sc *sc) { uint32_t val; const struct sata_pad_calibration *calib; /* Pad calibration. */ val = tegra_fuse_read_4(FUSE_SATA_CALIB); calib = tegra124_pad_calibration + (val & FUSE_SATA_CALIB_MASK); SATA_WR4(sc, SCFG_OFFSET + T_SATA0_INDEX, 1); val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL1_GEN1); val &= ~(T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_MASK << T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_SHIFT); val &= ~(T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_MASK << T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_SHIFT); val |= calib->gen1_tx_amp << T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_SHIFT; val |= calib->gen1_tx_peak << T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_SHIFT; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL1_GEN1, val); val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL1_GEN2); val &= ~(T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_MASK << T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_SHIFT); val &= ~(T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_MASK << T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_SHIFT); val |= calib->gen2_tx_amp << T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_SHIFT; val |= calib->gen2_tx_peak << T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_SHIFT; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL1_GEN2, val); SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL11, T_SATA0_CHX_PHY_CTRL11_GEN2_RX_EQ); SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL2, T_SATA0_CHX_PHY_CTRL2_CDR_CNTL_GEN1); SATA_WR4(sc, SCFG_OFFSET + T_SATA0_INDEX, 0); return (0); } static int tegra_ahci_ctrl_init(struct tegra_ahci_sc *sc) { uint32_t val; int rv; /* Enable SATA MMIO. */ val = SATA_RD4(sc, SATA_FPCI_BAR5); val &= ~SATA_FPCI_BAR_START(~0); val |= SATA_FPCI_BAR_START(0x10000); val |= SATA_FPCI_BAR_ACCESS_TYPE; SATA_WR4(sc, SATA_FPCI_BAR5, val); /* Enable FPCI access */ val = SATA_RD4(sc, SATA_CONFIGURATION); val |= SATA_CONFIGURATION_EN_FPCI; SATA_WR4(sc, SATA_CONFIGURATION, val); /* Recommended electrical settings for phy */ SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL17, 0x55010000); SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL18, 0x55010000); SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL20, 0x1); SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL21, 0x1); /* SQUELCH and Gen3 */ val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_CFG_PHY); val |= T_SATA0_CFG_PHY_MASK_SQUELCH; val &= ~T_SATA0_CFG_PHY_USE_7BIT_ALIGN_DET_FOR_SPD; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CFG_PHY, val); val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_NVOOB); val &= ~T_SATA0_NVOOB_COMMA_CNT_MASK; val &= ~T_SATA0_NVOOB_SQUELCH_FILTER_LENGTH_MASK; val &= ~T_SATA0_NVOOB_SQUELCH_FILTER_MODE_MASK; val |= T_SATA0_NVOOB_COMMA_CNT; val |= T_SATA0_NVOOB_SQUELCH_FILTER_LENGTH; val |= T_SATA0_NVOOB_SQUELCH_FILTER_MODE; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_NVOOB, val); /* Setup COMWAKE_IDLE_CNT */ val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_CFG2NVOOB_2); val &= ~T_SATA0_CFG2NVOOB_2_COMWAKE_IDLE_CNT_LOW_MASK; val |= T_SATA0_CFG2NVOOB_2_COMWAKE_IDLE_CNT_LOW; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CFG2NVOOB_2, val); if (sc->soc->init != NULL) { rv = sc->soc->init(sc); if (rv != 0) { device_printf(sc->dev, "SOC specific intialization failed: %d\n", rv); return (rv); } } /* Enable backdoor programming. */ val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_CFG_SATA); val |= T_SATA0_CFG_SATA_BACKDOOR_PROG_IF_EN; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CFG_SATA, val); /* Set device class and interface */ val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_BKDOOR_CC); val &= ~T_SATA0_BKDOOR_CC_CLASS_CODE_MASK; val &= ~T_SATA0_BKDOOR_CC_PROG_IF_MASK; val |= T_SATA0_BKDOOR_CC_CLASS_CODE; val |= T_SATA0_BKDOOR_CC_PROG_IF; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_BKDOOR_CC, val); /* Enable LPM capabilities */ val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_AHCI_HBA_CAP_BKDR); val |= T_SATA0_AHCI_HBA_CAP_BKDR_PARTIAL_ST_CAP; val |= T_SATA0_AHCI_HBA_CAP_BKDR_SLUMBER_ST_CAP; val |= T_SATA0_AHCI_HBA_CAP_BKDR_SALP; val |= T_SATA0_AHCI_HBA_CAP_BKDR_SUPP_PM; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_AHCI_HBA_CAP_BKDR, val); /* Disable backdoor programming. */ val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_CFG_SATA); val &= ~T_SATA0_CFG_SATA_BACKDOOR_PROG_IF_EN; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CFG_SATA, val); /* SATA Second Level Clock Gating */ val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_CFG_35); val &= ~T_SATA0_CFG_35_IDP_INDEX_MASK; val |= T_SATA0_CFG_35_IDP_INDEX; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CFG_35, val); SATA_WR4(sc, SCFG_OFFSET + T_SATA0_AHCI_IDP1, 0x400040); val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_CFG_PHY_1); val |= T_SATA0_CFG_PHY_1_PADS_IDDQ_EN; val |= T_SATA0_CFG_PHY_1_PAD_PLL_IDDQ_EN; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CFG_PHY_1, val); /* * Indicate Sata only has the capability to enter DevSleep * from slumber link. */ if (sc->aux_mem != NULL) { val = bus_read_4(sc->aux_mem, SATA_AUX_MISC_CNTL_1); val |= SATA_AUX_MISC_CNTL_1_DESO_SUPPORT; bus_write_4(sc->aux_mem, SATA_AUX_MISC_CNTL_1, val); } /* Enable IPFS Clock Gating */ val = SATA_RD4(sc, SCFG_OFFSET + SATA_CONFIGURATION); val &= ~SATA_CONFIGURATION_CLK_OVERRIDE; SATA_WR4(sc, SCFG_OFFSET + SATA_CONFIGURATION, val); /* Enable IO & memory access, bus master mode */ val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_CFG_1); val |= T_SATA0_CFG_1_IO_SPACE; val |= T_SATA0_CFG_1_MEMORY_SPACE; val |= T_SATA0_CFG_1_BUS_MASTER; val |= T_SATA0_CFG_1_SERR; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CFG_1, val); /* AHCI bar */ SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CFG_9, 0x08000 << T_SATA0_CFG_9_BASE_ADDRESS_SHIFT); /* Unmask interrupts. */ val = SATA_RD4(sc, SATA_INTR_MASK); val |= SATA_INTR_MASK_IP_INT_MASK; SATA_WR4(sc, SATA_INTR_MASK, val); return (0); } static int tegra_ahci_ctlr_reset(device_t dev) { struct tegra_ahci_sc *sc; int rv; uint32_t reg; sc = device_get_softc(dev); rv = ahci_ctlr_reset(dev); if (rv != 0) return (0); AHCI_WR4(sc, T_AHCI_HBA_CCC_PORTS, 1); /* Overwrite AHCI capabilites. */ reg = AHCI_RD4(sc, T_AHCI_HBA_CAP_BKDR); reg &= ~T_AHCI_HBA_CAP_BKDR_NUM_PORTS(~0); reg |= T_AHCI_HBA_CAP_BKDR_NUM_PORTS(0); reg |= T_AHCI_HBA_CAP_BKDR_EXT_SATA; reg |= T_AHCI_HBA_CAP_BKDR_CMD_CMPL_COALESING; reg |= T_AHCI_HBA_CAP_BKDR_FIS_SWITCHING; reg |= T_AHCI_HBA_CAP_BKDR_SUPP_PM; reg |= T_AHCI_HBA_CAP_BKDR_SUPP_CLO; reg |= T_AHCI_HBA_CAP_BKDR_SUPP_STG_SPUP; AHCI_WR4(sc, T_AHCI_HBA_CAP_BKDR, reg); /* Overwrite AHCI portcapabilites. */ reg = AHCI_RD4(sc, T_AHCI_PORT_BKDR); reg |= T_AHCI_PORT_BKDR_COLD_PRSN_DET; reg |= T_AHCI_PORT_BKDR_HOTPLUG_CAP; reg |= T_AHCI_PORT_BKDR_EXT_SATA_SUPP; AHCI_WR4(sc, T_AHCI_PORT_BKDR, reg); return (0); } static int tegra_ahci_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc_copy(dev, "AHCI SATA controller"); return (BUS_PROBE_DEFAULT); } static int tegra_ahci_attach(device_t dev) { struct tegra_ahci_sc *sc; struct ahci_controller *ctlr; phandle_t node; int rv, rid; sc = device_get_softc(dev); sc->dev = dev; ctlr = &sc->ctlr; node = ofw_bus_get_node(dev); sc->soc = (struct ahci_soc *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; ctlr->r_rid = 0; ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE); if (ctlr->r_mem == NULL) return (ENXIO); rid = 1; sc->sata_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sata_mem == NULL) { rv = ENXIO; goto fail; } /* Aux is optionall */ rid = 2; sc->aux_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); rv = get_fdt_resources(sc, node); if (rv != 0) { device_printf(sc->dev, "Failed to allocate FDT resource(s)\n"); goto fail; } rv = enable_fdt_resources(sc); if (rv != 0) { device_printf(sc->dev, "Failed to enable FDT resource(s)\n"); goto fail; } rv = tegra_ahci_ctrl_init(sc); if (rv != 0) { device_printf(sc->dev, "Failed to initialize controller)\n"); goto fail; } /* Setup controller defaults. */ ctlr->msi = 0; ctlr->numirqs = 1; ctlr->ccc = 0; /* Reset controller. */ rv = tegra_ahci_ctlr_reset(dev); if (rv != 0) goto fail; rv = ahci_attach(dev); return (rv); fail: /* XXX FDT stuff */ if (sc->sata_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 1, sc->sata_mem); if (ctlr->r_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); return (rv); } static int tegra_ahci_detach(device_t dev) { ahci_detach(dev); return (0); } static int tegra_ahci_suspend(device_t dev) { struct tegra_ahci_sc *sc = device_get_softc(dev); bus_generic_suspend(dev); /* Disable interupts, so the state change(s) doesn't trigger. */ ATA_OUTL(sc->ctlr.r_mem, AHCI_GHC, ATA_INL(sc->ctlr.r_mem, AHCI_GHC) & (~AHCI_GHC_IE)); return (0); } static int tegra_ahci_resume(device_t dev) { int res; if ((res = tegra_ahci_ctlr_reset(dev)) != 0) return (res); ahci_ctlr_setup(dev); return (bus_generic_resume(dev)); } static device_method_t tegra_ahci_methods[] = { DEVMETHOD(device_probe, tegra_ahci_probe), DEVMETHOD(device_attach, tegra_ahci_attach), DEVMETHOD(device_detach, tegra_ahci_detach), DEVMETHOD(device_suspend, tegra_ahci_suspend), DEVMETHOD(device_resume, tegra_ahci_resume), DEVMETHOD(bus_print_child, ahci_print_child), DEVMETHOD(bus_alloc_resource, ahci_alloc_resource), DEVMETHOD(bus_release_resource, ahci_release_resource), DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr, ahci_teardown_intr), DEVMETHOD(bus_child_location, ahci_child_location), DEVMETHOD(bus_get_dma_tag, ahci_get_dma_tag), DEVMETHOD_END }; static DEFINE_CLASS_0(ahci, tegra_ahci_driver, tegra_ahci_methods, sizeof(struct tegra_ahci_sc)); DRIVER_MODULE(tegra_ahci, simplebus, tegra_ahci_driver, NULL, NULL); diff --git a/sys/arm/nvidia/tegra_efuse.c b/sys/arm/nvidia/tegra_efuse.c index 1adfb14213e8..9e151f6ed564 100644 --- a/sys/arm/nvidia/tegra_efuse.c +++ b/sys/arm/nvidia/tegra_efuse.c @@ -1,527 +1,527 @@ /*- * Copyright (c) 2015 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #define FUSES_START 0x100 #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, (FUSES_START + (_r))) struct efuse_soc; struct tegra_efuse_softc { device_t dev; struct resource *mem_res; struct efuse_soc *soc; clk_t clk; hwreset_t reset; }; struct tegra_efuse_softc *dev_sc; struct tegra_sku_info tegra_sku_info; static char *tegra_rev_name[] = { [TEGRA_REVISION_UNKNOWN] = "unknown", [TEGRA_REVISION_A01] = "A01", [TEGRA_REVISION_A02] = "A02", [TEGRA_REVISION_A03] = "A03", [TEGRA_REVISION_A03p] = "A03 prime", [TEGRA_REVISION_A04] = "A04", }; struct efuse_soc { void (*init)(struct tegra_efuse_softc *sc, struct tegra_sku_info *sku); }; static void tegra124_init(struct tegra_efuse_softc *sc, struct tegra_sku_info *sku); struct efuse_soc tegra124_efuse_soc = { .init = tegra124_init, }; static void tegra210_init(struct tegra_efuse_softc *sc, struct tegra_sku_info *sku); struct efuse_soc tegra210_efuse_soc = { .init = tegra210_init, }; static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-efuse", (intptr_t)&tegra124_efuse_soc}, {"nvidia,tegra210-efuse", (intptr_t)&tegra210_efuse_soc}, {NULL, 0} }; /* ---------------------- Tegra 124 specific code & data --------------- */ #define TEGRA124_CPU_PROCESS_CORNERS 2 #define TEGRA124_GPU_PROCESS_CORNERS 2 #define TEGRA124_SOC_PROCESS_CORNERS 2 #define TEGRA124_FUSE_SKU_INFO 0x10 #define TEGRA124_FUSE_CPU_SPEEDO_0 0x14 #define TEGRA124_FUSE_CPU_IDDQ 0x18 #define TEGRA124_FUSE_FT_REV 0x28 #define TEGRA124_FUSE_CPU_SPEEDO_1 0x2c #define TEGRA124_FUSE_CPU_SPEEDO_2 0x30 #define TEGRA124_FUSE_SOC_SPEEDO_0 0x34 #define TEGRA124_FUSE_SOC_SPEEDO_1 0x38 #define TEGRA124_FUSE_SOC_SPEEDO_2 0x3c #define TEGRA124_FUSE_SOC_IDDQ 0x40 #define TEGRA124_FUSE_GPU_IDDQ 0x128 enum { TEGRA124_THRESHOLD_INDEX_0, TEGRA124_THRESHOLD_INDEX_1, TEGRA124_THRESHOLD_INDEX_COUNT, }; static uint32_t tegra124_cpu_process_speedos[][TEGRA124_CPU_PROCESS_CORNERS] = { {2190, UINT_MAX}, {0, UINT_MAX}, }; static uint32_t tegra124_gpu_process_speedos[][TEGRA124_GPU_PROCESS_CORNERS] = { {1965, UINT_MAX}, {0, UINT_MAX}, }; static uint32_t tegra124_soc_process_speedos[][TEGRA124_SOC_PROCESS_CORNERS] = { {2101, UINT_MAX}, {0, UINT_MAX}, }; static void tegra124_rev_sku_to_speedo_ids(struct tegra_efuse_softc *sc, struct tegra_sku_info *sku, int *threshold) { /* Set default */ sku->cpu_speedo_id = 0; sku->soc_speedo_id = 0; sku->gpu_speedo_id = 0; *threshold = TEGRA124_THRESHOLD_INDEX_0; switch (sku->sku_id) { case 0x00: /* Eng sku */ case 0x0F: case 0x23: /* Using the default */ break; case 0x83: sku->cpu_speedo_id = 2; break; case 0x1F: case 0x87: case 0x27: sku->cpu_speedo_id = 2; sku->soc_speedo_id = 0; sku->gpu_speedo_id = 1; *threshold = TEGRA124_THRESHOLD_INDEX_0; break; case 0x81: case 0x21: case 0x07: sku->cpu_speedo_id = 1; sku->soc_speedo_id = 1; sku->gpu_speedo_id = 1; *threshold = TEGRA124_THRESHOLD_INDEX_1; break; case 0x49: case 0x4A: case 0x48: sku->cpu_speedo_id = 4; sku->soc_speedo_id = 2; sku->gpu_speedo_id = 3; *threshold = TEGRA124_THRESHOLD_INDEX_1; break; default: device_printf(sc->dev, " Unknown SKU ID %d\n", sku->sku_id); break; } } static void tegra124_init(struct tegra_efuse_softc *sc, struct tegra_sku_info *sku) { int i, threshold; sku->sku_id = RD4(sc, TEGRA124_FUSE_SKU_INFO); sku->soc_iddq_value = RD4(sc, TEGRA124_FUSE_SOC_IDDQ); sku->cpu_iddq_value = RD4(sc, TEGRA124_FUSE_CPU_IDDQ); sku->gpu_iddq_value = RD4(sc, TEGRA124_FUSE_GPU_IDDQ); sku->soc_speedo_value = RD4(sc, TEGRA124_FUSE_SOC_SPEEDO_0); sku->cpu_speedo_value = RD4(sc, TEGRA124_FUSE_CPU_SPEEDO_0); sku->gpu_speedo_value = RD4(sc, TEGRA124_FUSE_CPU_SPEEDO_2); if (sku->cpu_speedo_value == 0) { device_printf(sc->dev, "CPU Speedo value is not fused.\n"); return; } tegra124_rev_sku_to_speedo_ids(sc, sku, &threshold); for (i = 0; i < TEGRA124_SOC_PROCESS_CORNERS; i++) { if (sku->soc_speedo_value < tegra124_soc_process_speedos[threshold][i]) break; } sku->soc_process_id = i; for (i = 0; i < TEGRA124_CPU_PROCESS_CORNERS; i++) { if (sku->cpu_speedo_value < tegra124_cpu_process_speedos[threshold][i]) break; } sku->cpu_process_id = i; for (i = 0; i < TEGRA124_GPU_PROCESS_CORNERS; i++) { if (sku->gpu_speedo_value < tegra124_gpu_process_speedos[threshold][i]) break; } sku->gpu_process_id = i; } /* ----------------- End of Tegra 124 specific code & data --------------- */ /* -------------------- Tegra 201 specific code & data ------------------- */ #define TEGRA210_CPU_PROCESS_CORNERS 2 #define TEGRA210_GPU_PROCESS_CORNERS 2 #define TEGRA210_SOC_PROCESS_CORNERS 3 #define TEGRA210_FUSE_SKU_INFO 0x010 #define TEGRA210_FUSE_CPU_SPEEDO_0 0x014 #define TEGRA210_FUSE_CPU_IDDQ 0x018 #define TEGRA210_FUSE_FT_REV 0x028 #define TEGRA210_FUSE_CPU_SPEEDO_1 0x02c #define TEGRA210_FUSE_CPU_SPEEDO_2 0x030 #define TEGRA210_FUSE_SOC_SPEEDO_0 0x034 #define TEGRA210_FUSE_SOC_SPEEDO_1 0x038 #define TEGRA210_FUSE_SOC_SPEEDO_2 0x03c #define TEGRA210_FUSE_SOC_IDDQ 0x040 #define TEGRA210_FUSE_GPU_IDDQ 0x128 #define TEGRA210_FUSE_SPARE 0x270 enum { TEGRA210_THRESHOLD_INDEX_0, TEGRA210_THRESHOLD_INDEX_1, TEGRA210_THRESHOLD_INDEX_COUNT, }; static uint32_t tegra210_cpu_process_speedos[][TEGRA210_CPU_PROCESS_CORNERS] = { {2119, UINT_MAX}, {2119, UINT_MAX}, }; static uint32_t tegra210_gpu_process_speedos[][TEGRA210_GPU_PROCESS_CORNERS] = { {UINT_MAX, UINT_MAX}, {UINT_MAX, UINT_MAX}, }; static uint32_t tegra210_soc_process_speedos[][TEGRA210_SOC_PROCESS_CORNERS] = { {1950, 2100, UINT_MAX}, {1950, 2100, UINT_MAX}, }; static uint32_t tegra210_get_speedo_revision(struct tegra_efuse_softc *sc) { uint32_t reg; uint32_t val; val = 0; /* Revision i encoded in spare fields */ reg = RD4(sc, TEGRA210_FUSE_SPARE + 2 * 4); val |= (reg & 1) << 0; reg = RD4(sc, TEGRA210_FUSE_SPARE + 3 * 4); val |= (reg & 1) << 1; reg = RD4(sc, TEGRA210_FUSE_SPARE + 4 * 4); val |= (reg & 1) << 2; return (val); } static void tegra210_rev_sku_to_speedo_ids(struct tegra_efuse_softc *sc, struct tegra_sku_info *sku, int speedo_rev, int *threshold) { /* Set defaults */ sku->cpu_speedo_id = 0; sku->soc_speedo_id = 0; sku->gpu_speedo_id = 0; *threshold = TEGRA210_THRESHOLD_INDEX_0; switch (sku->sku_id) { case 0x00: /* Eng sku */ case 0x01: /* Eng sku */ case 0x07: case 0x17: case 0x27: /* Use defaults */ if (speedo_rev >= 2) sku->gpu_speedo_id = 1; break; case 0x13: if (speedo_rev >= 2) sku->gpu_speedo_id = 1; sku->cpu_speedo_id = 1; break; default: device_printf(sc->dev, " Unknown SKU ID %d\n", sku->sku_id); break; } } static void tegra210_init(struct tegra_efuse_softc *sc, struct tegra_sku_info *sku) { int i, threshold, speedo_rev; uint32_t cpu_speedo[3], soc_speedo[3]; cpu_speedo[0] = RD4(sc, TEGRA210_FUSE_CPU_SPEEDO_0); cpu_speedo[1] = RD4(sc, TEGRA210_FUSE_CPU_SPEEDO_1); cpu_speedo[2] = RD4(sc, TEGRA210_FUSE_CPU_SPEEDO_2); soc_speedo[0] = RD4(sc, TEGRA210_FUSE_SOC_SPEEDO_0); soc_speedo[1] = RD4(sc, TEGRA210_FUSE_SOC_SPEEDO_1); soc_speedo[2] = RD4(sc, TEGRA210_FUSE_SOC_SPEEDO_2); sku->cpu_iddq_value = RD4(sc, TEGRA210_FUSE_CPU_IDDQ); sku->soc_iddq_value = RD4(sc, TEGRA210_FUSE_SOC_IDDQ); sku->gpu_iddq_value = RD4(sc, TEGRA210_FUSE_GPU_IDDQ); speedo_rev = tegra210_get_speedo_revision(sc); device_printf(sc->dev, " Speedo revision: %u\n", speedo_rev); if (speedo_rev >= 3) { sku->cpu_speedo_value = cpu_speedo[0]; sku->gpu_speedo_value = cpu_speedo[2]; sku->soc_speedo_value = soc_speedo[0]; } else if (speedo_rev == 2) { sku->cpu_speedo_value = (-1938 + (1095 * cpu_speedo[0] / 100)) / 10; sku->gpu_speedo_value = (-1662 + (1082 * cpu_speedo[2] / 100)) / 10; sku->soc_speedo_value = ( -705 + (1037 * soc_speedo[0] / 100)) / 10; } else { sku->cpu_speedo_value = 2100; sku->gpu_speedo_value = cpu_speedo[2] - 75; sku->soc_speedo_value = 1900; } tegra210_rev_sku_to_speedo_ids(sc, sku, speedo_rev, &threshold); for (i = 0; i < TEGRA210_SOC_PROCESS_CORNERS; i++) { if (sku->soc_speedo_value < tegra210_soc_process_speedos[threshold][i]) break; } sku->soc_process_id = i; for (i = 0; i < TEGRA210_CPU_PROCESS_CORNERS; i++) { if (sku->cpu_speedo_value < tegra210_cpu_process_speedos[threshold][i]) break; } sku->cpu_process_id = i; for (i = 0; i < TEGRA210_GPU_PROCESS_CORNERS; i++) { if (sku->gpu_speedo_value < tegra210_gpu_process_speedos[threshold][i]) break; } sku->gpu_process_id = i; } /* ----------------- End of Tegra 210 specific code & data --------------- */ uint32_t tegra_fuse_read_4(int addr) { if (dev_sc == NULL) panic("tegra_fuse_read_4 called too early"); return (RD4(dev_sc, addr)); } static void tegra_efuse_dump_sku(void) { printf(" TEGRA SKU Info:\n"); printf(" chip_id: %u\n", tegra_sku_info.chip_id); printf(" sku_id: %u\n", tegra_sku_info.sku_id); printf(" cpu_process_id: %u\n", tegra_sku_info.cpu_process_id); printf(" cpu_speedo_id: %u\n", tegra_sku_info.cpu_speedo_id); printf(" cpu_speedo_value: %u\n", tegra_sku_info.cpu_speedo_value); printf(" cpu_iddq_value: %u\n", tegra_sku_info.cpu_iddq_value); printf(" soc_process_id: %u\n", tegra_sku_info.soc_process_id); printf(" soc_speedo_id: %u\n", tegra_sku_info.soc_speedo_id); printf(" soc_speedo_value: %u\n", tegra_sku_info.soc_speedo_value); printf(" soc_iddq_value: %u\n", tegra_sku_info.soc_iddq_value); printf(" gpu_process_id: %u\n", tegra_sku_info.gpu_process_id); printf(" gpu_speedo_id: %u\n", tegra_sku_info.gpu_speedo_id); printf(" gpu_speedo_value: %u\n", tegra_sku_info.gpu_speedo_value); printf(" gpu_iddq_value: %u\n", tegra_sku_info.gpu_iddq_value); printf(" revision: %s\n", tegra_rev_name[tegra_sku_info.revision]); } static int tegra_efuse_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); return (BUS_PROBE_DEFAULT); } static int tegra_efuse_attach(device_t dev) { int rv, rid; struct tegra_efuse_softc *sc; sc = device_get_softc(dev); sc->dev = dev; sc->soc = (struct efuse_soc *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; /* Get the memory resource for the register mapping. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot map registers.\n"); rv = ENXIO; goto fail; } /* OFW resources. */ rv = clk_get_by_ofw_name(dev, 0, "fuse", &sc->clk); if (rv != 0) { device_printf(dev, "Cannot get fuse clock: %d\n", rv); goto fail; } rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable clock: %d\n", rv); goto fail; } rv = hwreset_get_by_ofw_name(sc->dev, 0, "fuse", &sc->reset); if (rv != 0) { device_printf(dev, "Cannot get fuse reset\n"); goto fail; } rv = hwreset_deassert(sc->reset); if (rv != 0) { device_printf(sc->dev, "Cannot clear reset\n"); goto fail; } sc->soc->init(sc, &tegra_sku_info); dev_sc = sc; if (bootverbose) tegra_efuse_dump_sku(); return (bus_generic_attach(dev)); fail: dev_sc = NULL; if (sc->clk != NULL) clk_release(sc->clk); if (sc->reset != NULL) hwreset_release(sc->reset); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (rv); } static int tegra_efuse_detach(device_t dev) { struct tegra_efuse_softc *sc; sc = device_get_softc(dev); dev_sc = NULL; if (sc->clk != NULL) clk_release(sc->clk); if (sc->reset != NULL) hwreset_release(sc->reset); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (bus_generic_detach(dev)); } static device_method_t tegra_efuse_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra_efuse_probe), DEVMETHOD(device_attach, tegra_efuse_attach), DEVMETHOD(device_detach, tegra_efuse_detach), DEVMETHOD_END }; static DEFINE_CLASS_0(efuse, tegra_efuse_driver, tegra_efuse_methods, sizeof(struct tegra_efuse_softc)); EARLY_DRIVER_MODULE(tegra_efuse, simplebus, tegra_efuse_driver, NULL, NULL, BUS_PASS_TIMER); diff --git a/sys/arm/nvidia/tegra_ehci.c b/sys/arm/nvidia/tegra_ehci.c index 9160ca4fa1f3..1d34ed45dd90 100644 --- a/sys/arm/nvidia/tegra_ehci.c +++ b/sys/arm/nvidia/tegra_ehci.c @@ -1,315 +1,315 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * EHCI driver for Tegra SoCs. */ #include "opt_bus.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #define TEGRA_EHCI_REG_OFF 0x100 #define TEGRA_EHCI_REG_SIZE 0x100 /* Compatible devices. */ #define TEGRA124_EHCI 1 #define TEGRA210_EHCI 2 static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-ehci", (uintptr_t)TEGRA124_EHCI}, {"nvidia,tegra210-ehci", (uintptr_t)TEGRA210_EHCI}, {NULL, 0}, }; struct tegra_ehci_softc { ehci_softc_t ehci_softc; device_t dev; struct resource *ehci_mem_res; /* EHCI core regs. */ struct resource *ehci_irq_res; /* EHCI core IRQ. */ int usb_alloc_called; clk_t clk; phy_t phy; hwreset_t reset; }; static void tegra_ehci_post_reset(struct ehci_softc *ehci_softc) { uint32_t usbmode; /* Force HOST mode. */ usbmode = EOREAD4(ehci_softc, EHCI_USBMODE_LPM); usbmode &= ~EHCI_UM_CM; usbmode |= EHCI_UM_CM_HOST; device_printf(ehci_softc->sc_bus.bdev, "set host controller mode\n"); EOWRITE4(ehci_softc, EHCI_USBMODE_LPM, usbmode); } static int tegra_ehci_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data != 0) { device_set_desc(dev, "Nvidia Tegra EHCI controller"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int tegra_ehci_detach(device_t dev) { struct tegra_ehci_softc *sc; ehci_softc_t *esc; sc = device_get_softc(dev); esc = &sc->ehci_softc; if (sc->clk != NULL) clk_release(sc->clk); if (esc->sc_bus.bdev != NULL) device_delete_child(dev, esc->sc_bus.bdev); if (esc->sc_flags & EHCI_SCFLG_DONEINIT) ehci_detach(esc); if (esc->sc_intr_hdl != NULL) bus_teardown_intr(dev, esc->sc_irq_res, esc->sc_intr_hdl); if (sc->ehci_irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ehci_irq_res); if (sc->ehci_mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->ehci_mem_res); if (sc->usb_alloc_called) usb_bus_mem_free_all(&esc->sc_bus, &ehci_iterate_hw_softc); /* During module unload there are lots of children leftover. */ device_delete_children(dev); return (0); } static int tegra_ehci_attach(device_t dev) { struct tegra_ehci_softc *sc; ehci_softc_t *esc; int rv, rid; uint64_t freq; sc = device_get_softc(dev); sc->dev = dev; esc = &sc->ehci_softc; /* Allocate resources. */ rid = 0; sc->ehci_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->ehci_mem_res == NULL) { device_printf(dev, "Cannot allocate memory resources\n"); rv = ENXIO; goto out; } rid = 0; sc->ehci_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->ehci_irq_res == NULL) { device_printf(dev, "Cannot allocate IRQ resources\n"); rv = ENXIO; goto out; } rv = hwreset_get_by_ofw_name(dev, 0, "usb", &sc->reset); if (rv != 0) { device_printf(dev, "Cannot get reset\n"); rv = ENXIO; goto out; } rv = phy_get_by_ofw_property(sc->dev, 0, "nvidia,phy", &sc->phy); if (rv != 0) { device_printf(sc->dev, "Cannot get 'nvidia,phy' phy\n"); rv = ENXIO; goto out; } rv = clk_get_by_ofw_index(sc->dev, 0, 0, &sc->clk); if (rv != 0) { device_printf(dev, "Cannot get clock\n"); goto out; } rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable clock\n"); goto out; } freq = 0; rv = clk_get_freq(sc->clk, &freq); if (rv != 0) { device_printf(dev, "Cannot get clock frequency\n"); goto out; } rv = hwreset_deassert(sc->reset); if (rv != 0) { device_printf(dev, "Cannot clear reset: %d\n", rv); rv = ENXIO; goto out; } rv = phy_enable(sc->phy); if (rv != 0) { device_printf(dev, "Cannot enable phy: %d\n", rv); goto out; } /* Fill data for EHCI driver. */ esc->sc_vendor_get_port_speed = ehci_get_port_speed_hostc; esc->sc_vendor_post_reset = tegra_ehci_post_reset; esc->sc_io_tag = rman_get_bustag(sc->ehci_mem_res); esc->sc_bus.parent = dev; esc->sc_bus.devices = esc->sc_devices; esc->sc_bus.devices_max = EHCI_MAX_DEVICES; esc->sc_bus.dma_bits = 32; /* Allocate all DMA memory. */ rv = usb_bus_mem_alloc_all(&esc->sc_bus, USB_GET_DMA_TAG(dev), &ehci_iterate_hw_softc); sc->usb_alloc_called = 1; if (rv != 0) { device_printf(dev, "usb_bus_mem_alloc_all() failed\n"); rv = ENOMEM; goto out; } /* * Set handle to USB related registers subregion used by * generic EHCI driver. */ rv = bus_space_subregion(esc->sc_io_tag, rman_get_bushandle(sc->ehci_mem_res), TEGRA_EHCI_REG_OFF, TEGRA_EHCI_REG_SIZE, &esc->sc_io_hdl); if (rv != 0) { device_printf(dev, "Could not create USB memory subregion\n"); rv = ENXIO; goto out; } /* Setup interrupt handler. */ rv = bus_setup_intr(dev, sc->ehci_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, (driver_intr_t *)ehci_interrupt, esc, &esc->sc_intr_hdl); if (rv != 0) { device_printf(dev, "Could not setup IRQ\n"); goto out; } /* Add USB bus device. */ esc->sc_bus.bdev = device_add_child(dev, "usbus", -1); if (esc->sc_bus.bdev == NULL) { device_printf(dev, "Could not add USB device\n"); goto out; } device_set_ivars(esc->sc_bus.bdev, &esc->sc_bus); esc->sc_id_vendor = USB_VENDOR_FREESCALE; strlcpy(esc->sc_vendor, "Nvidia", sizeof(esc->sc_vendor)); /* Set flags that affect ehci_init() behavior. */ esc->sc_flags |= EHCI_SCFLG_TT; esc->sc_flags |= EHCI_SCFLG_NORESTERM; rv = ehci_init(esc); if (rv != 0) { device_printf(dev, "USB init failed: %d\n", rv); goto out; } esc->sc_flags |= EHCI_SCFLG_DONEINIT; /* Probe the bus. */ rv = device_probe_and_attach(esc->sc_bus.bdev); if (rv != 0) { device_printf(dev, "device_probe_and_attach() failed\n"); goto out; } return (0); out: tegra_ehci_detach(dev); return (rv); } static device_method_t ehci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra_ehci_probe), DEVMETHOD(device_attach, tegra_ehci_attach), DEVMETHOD(device_detach, tegra_ehci_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD_END }; static DEFINE_CLASS_0(ehci, ehci_driver, ehci_methods, sizeof(struct tegra_ehci_softc)); DRIVER_MODULE(tegra_ehci, simplebus, ehci_driver, NULL, NULL); MODULE_DEPEND(tegra_ehci, usb, 1, 1, 1); diff --git a/sys/arm/nvidia/tegra_i2c.c b/sys/arm/nvidia/tegra_i2c.c index 6b68b859a037..1849ae33e3a2 100644 --- a/sys/arm/nvidia/tegra_i2c.c +++ b/sys/arm/nvidia/tegra_i2c.c @@ -1,797 +1,797 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * I2C driver for Tegra SoCs. */ #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include "iicbus_if.h" #define I2C_CNFG 0x000 #define I2C_CNFG_MSTR_CLR_BUS_ON_TIMEOUT (1 << 15) #define I2C_CNFG_DEBOUNCE_CNT(x) (((x) & 0x07) << 12) #define I2C_CNFG_NEW_MASTER_FSM (1 << 11) #define I2C_CNFG_PACKET_MODE_EN (1 << 10) #define I2C_CNFG_SEND (1 << 9) #define I2C_CNFG_NOACK (1 << 8) #define I2C_CNFG_CMD2 (1 << 7) #define I2C_CNFG_CMD1 (1 << 6) #define I2C_CNFG_START (1 << 5) #define I2C_CNFG_SLV2 (1 << 4) #define I2C_CNFG_LENGTH_SHIFT 1 #define I2C_CNFG_LENGTH_MASK 0x7 #define I2C_CNFG_A_MOD (1 << 0) #define I2C_CMD_ADDR0 0x004 #define I2C_CMD_ADDR1 0x008 #define I2C_CMD_DATA1 0x00c #define I2C_CMD_DATA2 0x010 #define I2C_STATUS 0x01c #define I2C_SL_CNFG 0x020 #define I2C_SL_RCVD 0x024 #define I2C_SL_STATUS 0x028 #define I2C_SL_ADDR1 0x02c #define I2C_SL_ADDR2 0x030 #define I2C_TLOW_SEXT 0x034 #define I2C_SL_DELAY_COUNT 0x03c #define I2C_SL_INT_MASK 0x040 #define I2C_SL_INT_SOURCE 0x044 #define I2C_SL_INT_SET 0x048 #define I2C_TX_PACKET_FIFO 0x050 #define I2C_RX_FIFO 0x054 #define I2C_PACKET_TRANSFER_STATUS 0x058 #define I2C_FIFO_CONTROL 0x05c #define I2C_FIFO_CONTROL_SLV_TX_FIFO_TRIG(x) (((x) & 0x07) << 13) #define I2C_FIFO_CONTROL_SLV_RX_FIFO_TRIG(x) (((x) & 0x07) << 10) #define I2C_FIFO_CONTROL_SLV_TX_FIFO_FLUSH (1 << 9) #define I2C_FIFO_CONTROL_SLV_RX_FIFO_FLUSH (1 << 8) #define I2C_FIFO_CONTROL_TX_FIFO_TRIG(x) (((x) & 0x07) << 5) #define I2C_FIFO_CONTROL_RX_FIFO_TRIG(x) (((x) & 0x07) << 2) #define I2C_FIFO_CONTROL_TX_FIFO_FLUSH (1 << 1) #define I2C_FIFO_CONTROL_RX_FIFO_FLUSH (1 << 0) #define I2C_FIFO_STATUS 0x060 #define I2C_FIFO_STATUS_SLV_XFER_ERR_REASON (1 << 25) #define I2C_FIFO_STATUS_TX_FIFO_SLV_EMPTY_CNT(x) (((x) >> 20) & 0xF) #define I2C_FIFO_STATUS_RX_FIFO_SLV_FULL_CNT(x) (((x) >> 16) & 0xF) #define I2C_FIFO_STATUS_TX_FIFO_EMPTY_CNT(x) (((x) >> 4) & 0xF) #define I2C_FIFO_STATUS_RX_FIFO_FULL_CNT(x) (((x) >> 0) & 0xF) #define I2C_INTERRUPT_MASK_REGISTER 0x064 #define I2C_INTERRUPT_STATUS_REGISTER 0x068 #define I2C_INT_SLV_ACK_WITHHELD (1 << 28) #define I2C_INT_SLV_RD2WR (1 << 27) #define I2C_INT_SLV_WR2RD (1 << 26) #define I2C_INT_SLV_PKT_XFER_ERR (1 << 25) #define I2C_INT_SLV_TX_BUFFER_REQ (1 << 24) #define I2C_INT_SLV_RX_BUFFER_FILLED (1 << 23) #define I2C_INT_SLV_PACKET_XFER_COMPLETE (1 << 22) #define I2C_INT_SLV_TFIFO_OVF (1 << 21) #define I2C_INT_SLV_RFIFO_UNF (1 << 20) #define I2C_INT_SLV_TFIFO_DATA_REQ (1 << 17) #define I2C_INT_SLV_RFIFO_DATA_REQ (1 << 16) #define I2C_INT_BUS_CLEAR_DONE (1 << 11) #define I2C_INT_TLOW_MEXT_TIMEOUT (1 << 10) #define I2C_INT_TLOW_SEXT_TIMEOUT (1 << 9) #define I2C_INT_TIMEOUT (1 << 8) #define I2C_INT_PACKET_XFER_COMPLETE (1 << 7) #define I2C_INT_ALL_PACKETS_XFER_COMPLETE (1 << 6) #define I2C_INT_TFIFO_OVR (1 << 5) #define I2C_INT_RFIFO_UNF (1 << 4) #define I2C_INT_NOACK (1 << 3) #define I2C_INT_ARB_LOST (1 << 2) #define I2C_INT_TFIFO_DATA_REQ (1 << 1) #define I2C_INT_RFIFO_DATA_REQ (1 << 0) #define I2C_ERROR_MASK (I2C_INT_ARB_LOST | I2C_INT_NOACK | \ I2C_INT_RFIFO_UNF | I2C_INT_TFIFO_OVR) #define I2C_CLK_DIVISOR 0x06c #define I2C_CLK_DIVISOR_STD_FAST_MODE_SHIFT 16 #define I2C_CLK_DIVISOR_STD_FAST_MODE_MASK 0xffff #define I2C_CLK_DIVISOR_HSMODE_SHIFT 0 #define I2C_CLK_DIVISOR_HSMODE_MASK 0xffff #define I2C_INTERRUPT_SOURCE_REGISTER 0x070 #define I2C_INTERRUPT_SET_REGISTER 0x074 #define I2C_SLV_TX_PACKET_FIFO 0x07c #define I2C_SLV_PACKET_STATUS 0x080 #define I2C_BUS_CLEAR_CONFIG 0x084 #define I2C_BUS_CLEAR_CONFIG_BC_SCLK_THRESHOLD(x) (((x) & 0xFF) << 16) #define I2C_BUS_CLEAR_CONFIG_BC_STOP_COND (1 << 2) #define I2C_BUS_CLEAR_CONFIG_BC_TERMINATE (1 << 1) #define I2C_BUS_CLEAR_CONFIG_BC_ENABLE (1 << 0) #define I2C_BUS_CLEAR_STATUS 0x088 #define I2C_BUS_CLEAR_STATUS_BC_STATUS (1 << 0) #define I2C_CONFIG_LOAD 0x08c #define I2C_CONFIG_LOAD_TIMEOUT_CONFIG_LOAD (1 << 2) #define I2C_CONFIG_LOAD_SLV_CONFIG_LOAD (1 << 1) #define I2C_CONFIG_LOAD_MSTR_CONFIG_LOAD (1 << 0) #define I2C_INTERFACE_TIMING_0 0x094 #define I2C_INTERFACE_TIMING_1 0x098 #define I2C_HS_INTERFACE_TIMING_0 0x09c #define I2C_HS_INTERFACE_TIMING_1 0x0a0 /* Protocol header 0 */ #define PACKET_HEADER0_HEADER_SIZE_SHIFT 28 #define PACKET_HEADER0_HEADER_SIZE_MASK 0x3 #define PACKET_HEADER0_PACKET_ID_SHIFT 16 #define PACKET_HEADER0_PACKET_ID_MASK 0xff #define PACKET_HEADER0_CONT_ID_SHIFT 12 #define PACKET_HEADER0_CONT_ID_MASK 0xf #define PACKET_HEADER0_PROTOCOL_I2C (1 << 4) #define PACKET_HEADER0_TYPE_SHIFT 0 #define PACKET_HEADER0_TYPE_MASK 0x7 /* I2C header */ #define I2C_HEADER_HIGHSPEED_MODE (1 << 22) #define I2C_HEADER_CONT_ON_NAK (1 << 21) #define I2C_HEADER_SEND_START_BYTE (1 << 20) #define I2C_HEADER_READ (1 << 19) #define I2C_HEADER_10BIT_ADDR (1 << 18) #define I2C_HEADER_IE_ENABLE (1 << 17) #define I2C_HEADER_REPEAT_START (1 << 16) #define I2C_HEADER_CONTINUE_XFER (1 << 15) #define I2C_HEADER_MASTER_ADDR_SHIFT 12 #define I2C_HEADER_MASTER_ADDR_MASK 0x7 #define I2C_HEADER_SLAVE_ADDR_SHIFT 0 #define I2C_HEADER_SLAVE_ADDR_MASK 0x3ff #define I2C_CLK_DIVISOR_STD_FAST_MODE 0x19 #define I2C_CLK_MULTIPLIER_STD_FAST_MODE 8 #define I2C_REQUEST_TIMEOUT (5 * hz) #define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, (_r)) #define LOCK(_sc) mtx_lock(&(_sc)->mtx) #define UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define SLEEP(_sc, timeout) \ mtx_sleep(sc, &sc->mtx, 0, "i2cbuswait", timeout); #define LOCK_INIT(_sc) \ mtx_init(&_sc->mtx, device_get_nameunit(_sc->dev), "tegra_i2c", MTX_DEF) #define LOCK_DESTROY(_sc) mtx_destroy(&_sc->mtx) #define ASSERT_LOCKED(_sc) mtx_assert(&_sc->mtx, MA_OWNED) #define ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->mtx, MA_NOTOWNED) static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-i2c", 1}, {"nvidia,tegra210-i2c", 1}, {NULL, 0} }; enum tegra_i2c_xfer_type { XFER_STOP, /* Send stop condition after xfer */ XFER_REPEAT_START, /* Send repeated start after xfer */ XFER_CONTINUE /* Don't send nothing */ } ; struct tegra_i2c_softc { device_t dev; struct mtx mtx; struct resource *mem_res; struct resource *irq_res; void *irq_h; device_t iicbus; clk_t clk; hwreset_t reset; uint32_t core_freq; uint32_t bus_freq; int bus_inuse; struct iic_msg *msg; int msg_idx; uint32_t bus_err; int done; }; static int tegra_i2c_flush_fifo(struct tegra_i2c_softc *sc) { int timeout; uint32_t reg; reg = RD4(sc, I2C_FIFO_CONTROL); reg |= I2C_FIFO_CONTROL_TX_FIFO_FLUSH | I2C_FIFO_CONTROL_RX_FIFO_FLUSH; WR4(sc, I2C_FIFO_CONTROL, reg); timeout = 10; while (timeout > 0) { reg = RD4(sc, I2C_FIFO_CONTROL); reg &= I2C_FIFO_CONTROL_TX_FIFO_FLUSH | I2C_FIFO_CONTROL_RX_FIFO_FLUSH; if (reg == 0) break; DELAY(10); } if (timeout <= 0) { device_printf(sc->dev, "FIFO flush timedout\n"); return (ETIMEDOUT); } return (0); } static void tegra_i2c_setup_clk(struct tegra_i2c_softc *sc, int clk_freq) { int div; div = ((sc->core_freq / clk_freq) / 10) - 1; if ((sc->core_freq / (10 * (div + 1))) > clk_freq) div++; if (div > 65535) div = 65535; WR4(sc, I2C_CLK_DIVISOR, (1 << I2C_CLK_DIVISOR_HSMODE_SHIFT) | (div << I2C_CLK_DIVISOR_STD_FAST_MODE_SHIFT)); } static void tegra_i2c_bus_clear(struct tegra_i2c_softc *sc) { int timeout; uint32_t reg, status; WR4(sc, I2C_BUS_CLEAR_CONFIG, I2C_BUS_CLEAR_CONFIG_BC_SCLK_THRESHOLD(18) | I2C_BUS_CLEAR_CONFIG_BC_STOP_COND | I2C_BUS_CLEAR_CONFIG_BC_TERMINATE); WR4(sc, I2C_CONFIG_LOAD, I2C_CONFIG_LOAD_MSTR_CONFIG_LOAD); for (timeout = 1000; timeout > 0; timeout--) { if (RD4(sc, I2C_CONFIG_LOAD) == 0) break; DELAY(10); } if (timeout <= 0) device_printf(sc->dev, "config load timeouted\n"); reg = RD4(sc, I2C_BUS_CLEAR_CONFIG); reg |= I2C_BUS_CLEAR_CONFIG_BC_ENABLE; WR4(sc, I2C_BUS_CLEAR_CONFIG,reg); for (timeout = 1000; timeout > 0; timeout--) { if ((RD4(sc, I2C_BUS_CLEAR_CONFIG) & I2C_BUS_CLEAR_CONFIG_BC_ENABLE) == 0) break; DELAY(10); } if (timeout <= 0) device_printf(sc->dev, "bus clear timeouted\n"); status = RD4(sc, I2C_BUS_CLEAR_STATUS); if ((status & I2C_BUS_CLEAR_STATUS_BC_STATUS) == 0) device_printf(sc->dev, "bus clear failed\n"); } static int tegra_i2c_hw_init(struct tegra_i2c_softc *sc) { int rv, timeout; /* Reset the core. */ rv = hwreset_assert(sc->reset); if (rv != 0) { device_printf(sc->dev, "Cannot assert reset\n"); return (rv); } DELAY(10); rv = hwreset_deassert(sc->reset); if (rv != 0) { device_printf(sc->dev, "Cannot clear reset\n"); return (rv); } WR4(sc, I2C_INTERRUPT_MASK_REGISTER, 0); WR4(sc, I2C_INTERRUPT_STATUS_REGISTER, 0xFFFFFFFF); WR4(sc, I2C_CNFG, I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN | I2C_CNFG_DEBOUNCE_CNT(2)); tegra_i2c_setup_clk(sc, sc->bus_freq); WR4(sc, I2C_FIFO_CONTROL, I2C_FIFO_CONTROL_TX_FIFO_TRIG(7) | I2C_FIFO_CONTROL_RX_FIFO_TRIG(0)); WR4(sc, I2C_CONFIG_LOAD, I2C_CONFIG_LOAD_MSTR_CONFIG_LOAD); for (timeout = 1000; timeout > 0; timeout--) { if (RD4(sc, I2C_CONFIG_LOAD) == 0) break; DELAY(10); } if (timeout <= 0) device_printf(sc->dev, "config load timeouted\n"); tegra_i2c_bus_clear(sc); return (0); } static int tegra_i2c_tx(struct tegra_i2c_softc *sc) { uint32_t reg; int cnt, i; if (sc->msg_idx >= sc->msg->len) panic("Invalid call to tegra_i2c_tx\n"); while(sc->msg_idx < sc->msg->len) { reg = RD4(sc, I2C_FIFO_STATUS); if (I2C_FIFO_STATUS_TX_FIFO_EMPTY_CNT(reg) == 0) break; cnt = min(4, sc->msg->len - sc->msg_idx); reg = 0; for (i = 0; i < cnt; i++) { reg |= sc->msg->buf[sc->msg_idx] << (i * 8); sc->msg_idx++; } WR4(sc, I2C_TX_PACKET_FIFO, reg); } if (sc->msg_idx >= sc->msg->len) return (0); return (sc->msg->len - sc->msg_idx - 1); } static int tegra_i2c_rx(struct tegra_i2c_softc *sc) { uint32_t reg; int cnt, i; if (sc->msg_idx >= sc->msg->len) panic("Invalid call to tegra_i2c_rx\n"); while(sc->msg_idx < sc->msg->len) { reg = RD4(sc, I2C_FIFO_STATUS); if (I2C_FIFO_STATUS_RX_FIFO_FULL_CNT(reg) == 0) break; cnt = min(4, sc->msg->len - sc->msg_idx); reg = RD4(sc, I2C_RX_FIFO); for (i = 0; i < cnt; i++) { sc->msg->buf[sc->msg_idx] = (reg >> (i * 8)) & 0xFF; sc->msg_idx++; } } if (sc->msg_idx >= sc->msg->len) return (0); return (sc->msg->len - sc->msg_idx - 1); } static void tegra_i2c_intr(void *arg) { struct tegra_i2c_softc *sc; uint32_t status, reg; int rv; sc = (struct tegra_i2c_softc *)arg; LOCK(sc); status = RD4(sc, I2C_INTERRUPT_SOURCE_REGISTER); if (sc->msg == NULL) { /* Unexpected interrupt - disable FIFOs, clear reset. */ reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER); reg &= ~I2C_INT_TFIFO_DATA_REQ; reg &= ~I2C_INT_RFIFO_DATA_REQ; WR4(sc, I2C_INTERRUPT_MASK_REGISTER, 0); WR4(sc, I2C_INTERRUPT_STATUS_REGISTER, status); UNLOCK(sc); return; } if ((status & I2C_ERROR_MASK) != 0) { if (status & I2C_INT_NOACK) sc->bus_err = IIC_ENOACK; if (status & I2C_INT_ARB_LOST) sc->bus_err = IIC_EBUSERR; if ((status & I2C_INT_TFIFO_OVR) || (status & I2C_INT_RFIFO_UNF)) sc->bus_err = IIC_EBUSERR; sc->done = 1; } else if ((status & I2C_INT_RFIFO_DATA_REQ) && (sc->msg != NULL) && (sc->msg->flags & IIC_M_RD)) { rv = tegra_i2c_rx(sc); if (rv == 0) { reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER); reg &= ~I2C_INT_RFIFO_DATA_REQ; WR4(sc, I2C_INTERRUPT_MASK_REGISTER, reg); } } else if ((status & I2C_INT_TFIFO_DATA_REQ) && (sc->msg != NULL) && !(sc->msg->flags & IIC_M_RD)) { rv = tegra_i2c_tx(sc); if (rv == 0) { reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER); reg &= ~I2C_INT_TFIFO_DATA_REQ; WR4(sc, I2C_INTERRUPT_MASK_REGISTER, reg); } } else if ((status & I2C_INT_RFIFO_DATA_REQ) || (status & I2C_INT_TFIFO_DATA_REQ)) { device_printf(sc->dev, "Unexpected data interrupt: 0x%08X\n", status); reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER); reg &= ~I2C_INT_TFIFO_DATA_REQ; reg &= ~I2C_INT_RFIFO_DATA_REQ; WR4(sc, I2C_INTERRUPT_MASK_REGISTER, reg); } if (status & I2C_INT_PACKET_XFER_COMPLETE) sc->done = 1; WR4(sc, I2C_INTERRUPT_STATUS_REGISTER, status); if (sc->done) { WR4(sc, I2C_INTERRUPT_MASK_REGISTER, 0); wakeup(&(sc->done)); } UNLOCK(sc); } static void tegra_i2c_start_msg(struct tegra_i2c_softc *sc, struct iic_msg *msg, enum tegra_i2c_xfer_type xtype) { uint32_t tmp, mask; /* Packet header. */ tmp = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) | PACKET_HEADER0_PROTOCOL_I2C | (1 << PACKET_HEADER0_CONT_ID_SHIFT) | (1 << PACKET_HEADER0_PACKET_ID_SHIFT); WR4(sc, I2C_TX_PACKET_FIFO, tmp); /* Packet size. */ WR4(sc, I2C_TX_PACKET_FIFO, msg->len - 1); /* I2C header. */ tmp = I2C_HEADER_IE_ENABLE; if (xtype == XFER_CONTINUE) tmp |= I2C_HEADER_CONTINUE_XFER; else if (xtype == XFER_REPEAT_START) tmp |= I2C_HEADER_REPEAT_START; tmp |= msg->slave << I2C_HEADER_SLAVE_ADDR_SHIFT; if (msg->flags & IIC_M_RD) { tmp |= I2C_HEADER_READ; tmp |= 1 << I2C_HEADER_SLAVE_ADDR_SHIFT; } else tmp &= ~(1 << I2C_HEADER_SLAVE_ADDR_SHIFT); WR4(sc, I2C_TX_PACKET_FIFO, tmp); /* Interrupt mask. */ mask = I2C_INT_NOACK | I2C_INT_ARB_LOST | I2C_INT_PACKET_XFER_COMPLETE; if (msg->flags & IIC_M_RD) mask |= I2C_INT_RFIFO_DATA_REQ; else mask |= I2C_INT_TFIFO_DATA_REQ; WR4(sc, I2C_INTERRUPT_MASK_REGISTER, mask); } static int tegra_i2c_poll(struct tegra_i2c_softc *sc) { int timeout; for(timeout = 10000; timeout > 0; timeout--) { UNLOCK(sc); tegra_i2c_intr(sc); LOCK(sc); if (sc->done != 0) break; DELAY(1); } if (timeout <= 0) return (ETIMEDOUT); return (0); } static int tegra_i2c_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs) { int rv, i; struct tegra_i2c_softc *sc; enum tegra_i2c_xfer_type xtype; sc = device_get_softc(dev); LOCK(sc); /* Get the bus. */ while (sc->bus_inuse == 1) SLEEP(sc, 0); sc->bus_inuse = 1; rv = 0; for (i = 0; i < nmsgs; i++) { sc->msg = &msgs[i]; sc->msg_idx = 0; sc->bus_err = 0; sc->done = 0; /* Check for valid parameters. */ if (sc->msg == NULL || sc->msg->buf == NULL || sc->msg->len == 0) { rv = EINVAL; break; } /* Get flags for next transfer. */ if (i == (nmsgs - 1)) { if (msgs[i].flags & IIC_M_NOSTOP) xtype = XFER_CONTINUE; else xtype = XFER_STOP; } else { if (msgs[i + 1].flags & IIC_M_NOSTART) xtype = XFER_CONTINUE; else xtype = XFER_REPEAT_START; } tegra_i2c_start_msg(sc, sc->msg, xtype); if (cold) rv = tegra_i2c_poll(sc); else rv = msleep(&sc->done, &sc->mtx, PZERO, "iic", I2C_REQUEST_TIMEOUT); WR4(sc, I2C_INTERRUPT_MASK_REGISTER, 0); WR4(sc, I2C_INTERRUPT_STATUS_REGISTER, 0xFFFFFFFF); if (rv == 0) rv = sc->bus_err; if (rv != 0) break; } if (rv != 0) { tegra_i2c_hw_init(sc); tegra_i2c_flush_fifo(sc); } sc->msg = NULL; sc->msg_idx = 0; sc->bus_err = 0; sc->done = 0; /* Wake up the processes that are waiting for the bus. */ sc->bus_inuse = 0; wakeup(sc); UNLOCK(sc); return (rv); } static int tegra_i2c_iicbus_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr) { struct tegra_i2c_softc *sc; int busfreq; sc = device_get_softc(dev); busfreq = IICBUS_GET_FREQUENCY(sc->iicbus, speed); sc = device_get_softc(dev); LOCK(sc); tegra_i2c_setup_clk(sc, busfreq); UNLOCK(sc); return (0); } static int tegra_i2c_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); return (BUS_PROBE_DEFAULT); } static int tegra_i2c_attach(device_t dev) { int rv, rid; phandle_t node; struct tegra_i2c_softc *sc; uint64_t freq; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); LOCK_INIT(sc); /* Get the memory resource for the register mapping. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot map registers.\n"); rv = ENXIO; goto fail; } /* Allocate our IRQ resource. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate interrupt.\n"); rv = ENXIO; goto fail; } /* FDT resources. */ rv = clk_get_by_ofw_name(dev, 0, "div-clk", &sc->clk); if (rv != 0) { device_printf(dev, "Cannot get i2c clock: %d\n", rv); goto fail; } rv = hwreset_get_by_ofw_name(sc->dev, 0, "i2c", &sc->reset); if (rv != 0) { device_printf(sc->dev, "Cannot get i2c reset\n"); return (ENXIO); } rv = OF_getencprop(node, "clock-frequency", &sc->bus_freq, sizeof(sc->bus_freq)); if (rv != sizeof(sc->bus_freq)) { sc->bus_freq = 100000; } /* Request maximum frequency for I2C block 136MHz (408MHz / 3). */ rv = clk_set_freq(sc->clk, 136000000, CLK_SET_ROUND_DOWN); if (rv != 0) { device_printf(dev, "Cannot set clock frequency\n"); goto fail; } rv = clk_get_freq(sc->clk, &freq); if (rv != 0) { device_printf(dev, "Cannot get clock frequency\n"); goto fail; } sc->core_freq = (uint32_t)freq; rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable clock: %d\n", rv); goto fail; } /* Init hardware. */ rv = tegra_i2c_hw_init(sc); if (rv) { device_printf(dev, "tegra_i2c_activate failed\n"); goto fail; } /* Setup interrupt. */ rv = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, tegra_i2c_intr, sc, &sc->irq_h); if (rv) { device_printf(dev, "Cannot setup interrupt.\n"); goto fail; } /* Attach the iicbus. */ sc->iicbus = device_add_child(dev, "iicbus", -1); if (sc->iicbus == NULL) { device_printf(dev, "Could not allocate iicbus instance.\n"); rv = ENXIO; goto fail; } /* Probe and attach the iicbus. */ return (bus_generic_attach(dev)); fail: if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (rv); } static int tegra_i2c_detach(device_t dev) { struct tegra_i2c_softc *sc; sc = device_get_softc(dev); tegra_i2c_hw_init(sc); if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); if (sc->iicbus) device_delete_child(dev, sc->iicbus); return (bus_generic_detach(dev)); } static phandle_t tegra_i2c_get_node(device_t bus, device_t dev) { /* Share controller node with iibus device. */ return (ofw_bus_get_node(bus)); } static device_method_t tegra_i2c_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra_i2c_probe), DEVMETHOD(device_attach, tegra_i2c_attach), DEVMETHOD(device_detach, tegra_i2c_detach), /* Bus interface */ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), /* OFW methods */ DEVMETHOD(ofw_bus_get_node, tegra_i2c_get_node), /* iicbus interface */ DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_reset, tegra_i2c_iicbus_reset), DEVMETHOD(iicbus_transfer, tegra_i2c_transfer), DEVMETHOD_END }; static DEFINE_CLASS_0(iichb, tegra_i2c_driver, tegra_i2c_methods, sizeof(struct tegra_i2c_softc)); EARLY_DRIVER_MODULE(tegra_iic, simplebus, tegra_i2c_driver, NULL, NULL, 73); diff --git a/sys/arm/nvidia/tegra_mc.c b/sys/arm/nvidia/tegra_mc.c index bad75f274af0..020c9617b453 100644 --- a/sys/arm/nvidia/tegra_mc.c +++ b/sys/arm/nvidia/tegra_mc.c @@ -1,307 +1,307 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Memory controller driver for Tegra SoCs. */ #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include "clock_if.h" #define MC_INTSTATUS 0x000 #define MC_INTMASK 0x004 #define MC_INT_DECERR_MTS (1 << 16) #define MC_INT_SECERR_SEC (1 << 13) #define MC_INT_DECERR_VPR (1 << 12) #define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11) #define MC_INT_INVALID_SMMU_PAGE (1 << 10) #define MC_INT_ARBITRATION_EMEM (1 << 9) #define MC_INT_SECURITY_VIOLATION (1 << 8) #define MC_INT_DECERR_EMEM (1 << 6) #define MC_INT_INT_MASK (MC_INT_DECERR_MTS | \ MC_INT_SECERR_SEC | \ MC_INT_DECERR_VPR | \ MC_INT_INVALID_APB_ASID_UPDATE | \ MC_INT_INVALID_SMMU_PAGE | \ MC_INT_ARBITRATION_EMEM | \ MC_INT_SECURITY_VIOLATION | \ MC_INT_DECERR_EMEM) #define MC_ERR_STATUS 0x008 #define MC_ERR_TYPE(x) (((x) >> 28) & 0x7) #define MC_ERR_TYPE_DECERR_EMEM 2 #define MC_ERR_TYPE_SECURITY_TRUSTZONE 3 #define MC_ERR_TYPE_SECURITY_CARVEOUT 4 #define MC_ERR_TYPE_INVALID_SMMU_PAGE 6 #define MC_ERR_INVALID_SMMU_PAGE_READABLE (1 << 27) #define MC_ERR_INVALID_SMMU_PAGE_WRITABLE (1 << 26) #define MC_ERR_INVALID_SMMU_PAGE_NONSECURE (1 << 25) #define MC_ERR_ADR_HI(x) (((x) >> 20) & 0x3) #define MC_ERR_SWAP (1 << 18) #define MC_ERR_SECURITY (1 << 17) #define MC_ERR_RW (1 << 16) #define MC_ERR_ADR1(x) (((x) >> 12) & 0x7) #define MC_ERR_ID(x) (((x) >> 0) & 07F) #define MC_ERR_ADDR 0x00C #define MC_EMEM_CFG 0x050 #define MC_EMEM_ADR_CFG 0x054 #define MC_EMEM_NUMDEV(x) (((x) >> 0 ) & 0x1) #define MC_EMEM_ADR_CFG_DEV0 0x058 #define MC_EMEM_ADR_CFG_DEV1 0x05C #define EMEM_DEV_DEVSIZE(x) (((x) >> 16) & 0xF) #define EMEM_DEV_BANKWIDTH(x) (((x) >> 8) & 0x3) #define EMEM_DEV_COLWIDTH(x) (((x) >> 8) & 0x3) #define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, (_r)) #define LOCK(_sc) mtx_lock(&(_sc)->mtx) #define UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define SLEEP(_sc, timeout) mtx_sleep(sc, &sc->mtx, 0, "tegra_mc", timeout); #define LOCK_INIT(_sc) \ mtx_init(&_sc->mtx, device_get_nameunit(_sc->dev), "tegra_mc", MTX_DEF) #define LOCK_DESTROY(_sc) mtx_destroy(&_sc->mtx) #define ASSERT_LOCKED(_sc) mtx_assert(&_sc->mtx, MA_OWNED) #define ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->mtx, MA_NOTOWNED) static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-mc", 1}, {"nvidia,tegra210-mc", 1}, {NULL, 0} }; struct tegra_mc_softc { device_t dev; struct mtx mtx; struct resource *mem_res; struct resource *irq_res; void *irq_h; clk_t clk; }; static char *smmu_err_tbl[16] = { "reserved", /* 0 */ "reserved", /* 1 */ "DRAM decode", /* 2 */ "Trustzome Security", /* 3 */ "Security carveout", /* 4 */ "reserved", /* 5 */ "Invalid SMMU page", /* 6 */ "reserved", /* 7 */ }; static void tegra_mc_intr(void *arg) { struct tegra_mc_softc *sc; uint32_t stat, err; uint64_t addr; sc = (struct tegra_mc_softc *)arg; stat = RD4(sc, MC_INTSTATUS); if ((stat & MC_INT_INT_MASK) == 0) { WR4(sc, MC_INTSTATUS, stat); return; } device_printf(sc->dev, "Memory Controller Interrupt:\n"); if (stat & MC_INT_DECERR_MTS) printf(" - MTS carveout violation\n"); if (stat & MC_INT_SECERR_SEC) printf(" - SEC carveout violation\n"); if (stat & MC_INT_DECERR_VPR) printf(" - VPR requirements violated\n"); if (stat & MC_INT_INVALID_APB_ASID_UPDATE) printf(" - ivalid APB ASID update\n"); if (stat & MC_INT_INVALID_SMMU_PAGE) printf(" - SMMU address translation error\n"); if (stat & MC_INT_ARBITRATION_EMEM) printf(" - arbitration deadlock-prevention threshold hit\n"); if (stat & MC_INT_SECURITY_VIOLATION) printf(" - SMMU address translation security error\n"); if (stat & MC_INT_DECERR_EMEM) printf(" - SMMU address decode error\n"); if ((stat & (MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM)) != 0) { err = RD4(sc, MC_ERR_STATUS); addr = RD4(sc, MC_ERR_STATUS); addr |= (uint64_t)(MC_ERR_ADR_HI(err)) << 32; printf(" at 0x%012jX [%s %s %s] - %s error.\n", (uintmax_t)addr, stat & MC_ERR_SWAP ? "Swap, " : "", stat & MC_ERR_SECURITY ? "Sec, " : "", stat & MC_ERR_RW ? "Write" : "Read", smmu_err_tbl[MC_ERR_TYPE(err)]); } WR4(sc, MC_INTSTATUS, stat); } static void tegra_mc_init_hw(struct tegra_mc_softc *sc) { /* Disable and acknowledge all interrupts */ WR4(sc, MC_INTMASK, 0); WR4(sc, MC_INTSTATUS, MC_INT_INT_MASK); } static int tegra_mc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Tegra Memory Controller"); return (BUS_PROBE_DEFAULT); } static int tegra_mc_attach(device_t dev) { int rv, rid; struct tegra_mc_softc *sc; sc = device_get_softc(dev); sc->dev = dev; LOCK_INIT(sc); /* Get the memory resource for the register mapping. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot map registers.\n"); rv = ENXIO; goto fail; } /* Allocate our IRQ resource. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate interrupt.\n"); rv = ENXIO; goto fail; } /* OFW resources. */ rv = clk_get_by_ofw_name(dev, 0, "mc", &sc->clk); if (rv != 0) { device_printf(dev, "Cannot get mc clock: %d\n", rv); goto fail; } rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable clock: %d\n", rv); goto fail; } /* Init hardware. */ tegra_mc_init_hw(sc); /* Setup interrupt */ rv = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, tegra_mc_intr, sc, &sc->irq_h); if (rv) { device_printf(dev, "Cannot setup interrupt.\n"); goto fail; } /* Enable Interrupts */ WR4(sc, MC_INTMASK, MC_INT_INT_MASK); return (bus_generic_attach(dev)); fail: if (sc->clk != NULL) clk_release(sc->clk); if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (rv); } static int tegra_mc_detach(device_t dev) { struct tegra_mc_softc *sc; sc = device_get_softc(dev); if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (bus_generic_detach(dev)); } static device_method_t tegra_mc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra_mc_probe), DEVMETHOD(device_attach, tegra_mc_attach), DEVMETHOD(device_detach, tegra_mc_detach), DEVMETHOD_END }; static DEFINE_CLASS_0(mc, tegra_mc_driver, tegra_mc_methods, sizeof(struct tegra_mc_softc)); DRIVER_MODULE(tegra_mc, simplebus, tegra_mc_driver, NULL, NULL); diff --git a/sys/arm/nvidia/tegra_pcie.c b/sys/arm/nvidia/tegra_pcie.c index 4ac557510b23..82e282146d68 100644 --- a/sys/arm/nvidia/tegra_pcie.c +++ b/sys/arm/nvidia/tegra_pcie.c @@ -1,1624 +1,1624 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Nvidia Integrated PCI/PCI-Express controller driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ofw_bus_if.h" #include "msi_if.h" #include "pcib_if.h" #include "pic_if.h" #define AFI_AXI_BAR0_SZ 0x000 #define AFI_AXI_BAR1_SZ 0x004 #define AFI_AXI_BAR2_SZ 0x008 #define AFI_AXI_BAR3_SZ 0x00c #define AFI_AXI_BAR4_SZ 0x010 #define AFI_AXI_BAR5_SZ 0x014 #define AFI_AXI_BAR0_START 0x018 #define AFI_AXI_BAR1_START 0x01c #define AFI_AXI_BAR2_START 0x020 #define AFI_AXI_BAR3_START 0x024 #define AFI_AXI_BAR4_START 0x028 #define AFI_AXI_BAR5_START 0x02c #define AFI_FPCI_BAR0 0x030 #define AFI_FPCI_BAR1 0x034 #define AFI_FPCI_BAR2 0x038 #define AFI_FPCI_BAR3 0x03c #define AFI_FPCI_BAR4 0x040 #define AFI_FPCI_BAR5 0x044 #define AFI_MSI_BAR_SZ 0x060 #define AFI_MSI_FPCI_BAR_ST 0x064 #define AFI_MSI_AXI_BAR_ST 0x068 #define AFI_MSI_VEC(x) (0x06c + 4 * (x)) #define AFI_MSI_EN_VEC(x) (0x08c + 4 * (x)) #define AFI_MSI_INTR_IN_REG 32 #define AFI_MSI_REGS 8 #define AFI_CONFIGURATION 0x0ac #define AFI_CONFIGURATION_EN_FPCI (1 << 0) #define AFI_FPCI_ERROR_MASKS 0x0b0 #define AFI_INTR_MASK 0x0b4 #define AFI_INTR_MASK_MSI_MASK (1 << 8) #define AFI_INTR_MASK_INT_MASK (1 << 0) #define AFI_INTR_CODE 0x0b8 #define AFI_INTR_CODE_MASK 0xf #define AFI_INTR_CODE_INT_CODE_INI_SLVERR 1 #define AFI_INTR_CODE_INT_CODE_INI_DECERR 2 #define AFI_INTR_CODE_INT_CODE_TGT_SLVERR 3 #define AFI_INTR_CODE_INT_CODE_TGT_DECERR 4 #define AFI_INTR_CODE_INT_CODE_TGT_WRERR 5 #define AFI_INTR_CODE_INT_CODE_SM_MSG 6 #define AFI_INTR_CODE_INT_CODE_DFPCI_DECERR 7 #define AFI_INTR_CODE_INT_CODE_AXI_DECERR 8 #define AFI_INTR_CODE_INT_CODE_FPCI_TIMEOUT 9 #define AFI_INTR_CODE_INT_CODE_PE_PRSNT_SENSE 10 #define AFI_INTR_CODE_INT_CODE_PE_CLKREQ_SENSE 11 #define AFI_INTR_CODE_INT_CODE_CLKCLAMP_SENSE 12 #define AFI_INTR_CODE_INT_CODE_RDY4PD_SENSE 13 #define AFI_INTR_CODE_INT_CODE_P2P_ERROR 14 #define AFI_INTR_SIGNATURE 0x0bc #define AFI_UPPER_FPCI_ADDRESS 0x0c0 #define AFI_SM_INTR_ENABLE 0x0c4 #define AFI_SM_INTR_RP_DEASSERT (1 << 14) #define AFI_SM_INTR_RP_ASSERT (1 << 13) #define AFI_SM_INTR_HOTPLUG (1 << 12) #define AFI_SM_INTR_PME (1 << 11) #define AFI_SM_INTR_FATAL_ERROR (1 << 10) #define AFI_SM_INTR_UNCORR_ERROR (1 << 9) #define AFI_SM_INTR_CORR_ERROR (1 << 8) #define AFI_SM_INTR_INTD_DEASSERT (1 << 7) #define AFI_SM_INTR_INTC_DEASSERT (1 << 6) #define AFI_SM_INTR_INTB_DEASSERT (1 << 5) #define AFI_SM_INTR_INTA_DEASSERT (1 << 4) #define AFI_SM_INTR_INTD_ASSERT (1 << 3) #define AFI_SM_INTR_INTC_ASSERT (1 << 2) #define AFI_SM_INTR_INTB_ASSERT (1 << 1) #define AFI_SM_INTR_INTA_ASSERT (1 << 0) #define AFI_AFI_INTR_ENABLE 0x0c8 #define AFI_AFI_INTR_ENABLE_CODE(code) (1 << (code)) #define AFI_PCIE_CONFIG 0x0f8 #define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1)) #define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0x6 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_XBAR2_1 (0x0 << 20) #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_XBAR4_1 (0x1 << 20) #define AFI_FUSE 0x104 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2) #define AFI_PEX0_CTRL 0x110 #define AFI_PEX1_CTRL 0x118 #define AFI_PEX2_CTRL 0x128 #define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4) #define AFI_PEX_CTRL_REFCLK_EN (1 << 3) #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1) #define AFI_PEX_CTRL_RST_L (1 << 0) #define AFI_AXI_BAR6_SZ 0x134 #define AFI_AXI_BAR7_SZ 0x138 #define AFI_AXI_BAR8_SZ 0x13c #define AFI_AXI_BAR6_START 0x140 #define AFI_AXI_BAR7_START 0x144 #define AFI_AXI_BAR8_START 0x148 #define AFI_FPCI_BAR6 0x14c #define AFI_FPCI_BAR7 0x150 #define AFI_FPCI_BAR8 0x154 #define AFI_PLLE_CONTROL 0x160 #define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9) #define AFI_PLLE_CONTROL_BYPASS_PCIE2PLLE_CONTROL (1 << 8) #define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1) #define AFI_PLLE_CONTROL_PCIE2PLLE_CONTROL_EN (1 << 0) #define AFI_PEXBIAS_CTRL 0x168 /* Configuration space */ #define RP_VEND_XP 0x0F00 #define RP_VEND_XP_DL_UP (1 << 30) #define RP_VEND_CTL2 0x0fa8 #define RP_VEND_CTL2_PCA_ENABLE (1 << 7) #define RP_PRIV_MISC 0x0FE0 #define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0) #define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0) #define RP_LINK_CONTROL_STATUS 0x0090 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000 /* PADS space */ #define PADS_REFCLK_CFG0 0x000c8 #define PADS_REFCLK_CFG1 0x000cc /* Wait 50 ms (per port) for link. */ #define TEGRA_PCIE_LINKUP_TIMEOUT 50000 /* FPCI Address space */ #define FPCI_MAP_IO 0xFDFC000000ULL #define FPCI_MAP_TYPE0_CONFIG 0xFDFC000000ULL #define FPCI_MAP_TYPE1_CONFIG 0xFDFF000000ULL #define FPCI_MAP_EXT_TYPE0_CONFIG 0xFE00000000ULL #define FPCI_MAP_EXT_TYPE1_CONFIG 0xFE10000000ULL #define TEGRA_PCIB_MSI_ENABLE #define DEBUG #ifdef DEBUG #define debugf(fmt, args...) do { printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif /* * Configuration space format: * [27:24] extended register * [23:16] bus * [15:11] slot (device) * [10: 8] function * [ 7: 0] register */ #define PCI_CFG_EXT_REG(reg) ((((reg) >> 8) & 0x0f) << 24) #define PCI_CFG_BUS(bus) (((bus) & 0xff) << 16) #define PCI_CFG_DEV(dev) (((dev) & 0x1f) << 11) #define PCI_CFG_FUN(fun) (((fun) & 0x07) << 8) #define PCI_CFG_BASE_REG(reg) ((reg) & 0xff) #define PADS_WR4(_sc, _r, _v) bus_write_4((_sc)->pads_mem_res, (_r), (_v)) #define PADS_RD4(_sc, _r) bus_read_4((_sc)->pads_mem_res, (_r)) #define AFI_WR4(_sc, _r, _v) bus_write_4((_sc)->afi_mem_res, (_r), (_v)) #define AFI_RD4(_sc, _r) bus_read_4((_sc)->afi_mem_res, (_r)) static struct { bus_size_t axi_start; bus_size_t fpci_start; bus_size_t size; } bars[] = { {AFI_AXI_BAR0_START, AFI_FPCI_BAR0, AFI_AXI_BAR0_SZ}, /* BAR 0 */ {AFI_AXI_BAR1_START, AFI_FPCI_BAR1, AFI_AXI_BAR1_SZ}, /* BAR 1 */ {AFI_AXI_BAR2_START, AFI_FPCI_BAR2, AFI_AXI_BAR2_SZ}, /* BAR 2 */ {AFI_AXI_BAR3_START, AFI_FPCI_BAR3, AFI_AXI_BAR3_SZ}, /* BAR 3 */ {AFI_AXI_BAR4_START, AFI_FPCI_BAR4, AFI_AXI_BAR4_SZ}, /* BAR 4 */ {AFI_AXI_BAR5_START, AFI_FPCI_BAR5, AFI_AXI_BAR5_SZ}, /* BAR 5 */ {AFI_AXI_BAR6_START, AFI_FPCI_BAR6, AFI_AXI_BAR6_SZ}, /* BAR 6 */ {AFI_AXI_BAR7_START, AFI_FPCI_BAR7, AFI_AXI_BAR7_SZ}, /* BAR 7 */ {AFI_AXI_BAR8_START, AFI_FPCI_BAR8, AFI_AXI_BAR8_SZ}, /* BAR 8 */ {AFI_MSI_AXI_BAR_ST, AFI_MSI_FPCI_BAR_ST, AFI_MSI_BAR_SZ}, /* MSI 9 */ }; struct pcie_soc { char **regulator_names; bool cml_clk; bool pca_enable; uint32_t pads_refclk_cfg0; uint32_t pads_refclk_cfg1; }; /* Tegra 124 config. */ static char *tegra124_reg_names[] = { "avddio-pex-supply", "dvddio-pex-supply", "avdd-pex-pll-supply", "hvdd-pex-supply", "hvdd-pex-pll-e-supply", "vddio-pex-ctl-supply", "avdd-pll-erefe-supply", NULL }; static struct pcie_soc tegra124_soc = { .regulator_names = tegra124_reg_names, .cml_clk = true, .pca_enable = false, .pads_refclk_cfg0 = 0x44ac44ac, }; /* Tegra 210 config. */ static char *tegra210_reg_names[] = { "avdd-pll-uerefe-supply", "hvddio-pex-supply", "dvddio-pex-supply", "dvdd-pex-pll-supply", "hvdd-pex-pll-e-supply", "vddio-pex-ctl-supply", NULL }; static struct pcie_soc tegra210_soc = { .regulator_names = tegra210_reg_names, .cml_clk = true, .pca_enable = true, .pads_refclk_cfg0 = 0x90b890b8, }; /* Compatible devices. */ static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-pcie", (uintptr_t)&tegra124_soc}, {"nvidia,tegra210-pcie", (uintptr_t)&tegra210_soc}, {NULL, 0}, }; #define TEGRA_FLAG_MSI_USED 0x0001 struct tegra_pcib_irqsrc { struct intr_irqsrc isrc; u_int irq; u_int flags; }; struct tegra_pcib_port { int enabled; int port_idx; /* chip port index */ int num_lanes; /* number of lanes */ bus_size_t afi_pex_ctrl; /* offset of afi_pex_ctrl */ phy_t phy; /* port phy */ /* Config space properties. */ bus_addr_t rp_base_addr; /* PA of config window */ bus_size_t rp_size; /* size of config window */ bus_space_handle_t cfg_handle; /* handle of config window */ }; #define TEGRA_PCIB_MAX_PORTS 3 #define TEGRA_PCIB_MAX_MSI AFI_MSI_INTR_IN_REG * AFI_MSI_REGS struct tegra_pcib_softc { struct ofw_pci_softc ofw_pci; device_t dev; struct pcie_soc *soc; struct mtx mtx; struct resource *pads_mem_res; struct resource *afi_mem_res; struct resource *cfg_mem_res; struct resource *irq_res; struct resource *msi_irq_res; void *intr_cookie; void *msi_intr_cookie; struct ofw_pci_range mem_range; struct ofw_pci_range pref_mem_range; struct ofw_pci_range io_range; clk_t clk_pex; clk_t clk_afi; clk_t clk_pll_e; clk_t clk_cml; hwreset_t hwreset_pex; hwreset_t hwreset_afi; hwreset_t hwreset_pcie_x; regulator_t regulators[16]; /* Safe maximum */ vm_offset_t msi_page; /* VA of MSI page */ bus_addr_t cfg_base_addr; /* base address of config */ bus_size_t cfg_cur_offs; /* currently mapped window */ bus_space_handle_t cfg_handle; /* handle of config window */ bus_space_tag_t bus_tag; /* tag of config window */ int lanes_cfg; int num_ports; struct tegra_pcib_port *ports[TEGRA_PCIB_MAX_PORTS]; struct tegra_pcib_irqsrc *isrcs; }; static int tegra_pcib_maxslots(device_t dev) { return (16); } static int tegra_pcib_route_interrupt(device_t bus, device_t dev, int pin) { struct tegra_pcib_softc *sc; u_int irq; sc = device_get_softc(bus); irq = intr_map_clone_irq(rman_get_start(sc->irq_res)); device_printf(bus, "route pin %d for device %d.%d to %u\n", pin, pci_get_slot(dev), pci_get_function(dev), irq); return (irq); } static int tegra_pcbib_map_cfg(struct tegra_pcib_softc *sc, u_int bus, u_int slot, u_int func, u_int reg) { bus_size_t offs; int flags, rv; offs = sc->cfg_base_addr; offs |= PCI_CFG_BUS(bus) | PCI_CFG_DEV(slot) | PCI_CFG_FUN(func) | PCI_CFG_EXT_REG(reg); if ((sc->cfg_handle != 0) && (sc->cfg_cur_offs == offs)) return (0); if (sc->cfg_handle != 0) bus_space_unmap(sc->bus_tag, sc->cfg_handle, 0x800); #if defined(BUS_SPACE_MAP_NONPOSTED) flags = BUS_SPACE_MAP_NONPOSTED; #else flags = 0; #endif rv = bus_space_map(sc->bus_tag, offs, 0x800, flags, &sc->cfg_handle); if (rv != 0) device_printf(sc->dev, "Cannot map config space\n"); else sc->cfg_cur_offs = offs; return (rv); } static uint32_t tegra_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct tegra_pcib_softc *sc; bus_space_handle_t hndl; uint32_t off; uint32_t val; int rv, i; sc = device_get_softc(dev); if (bus == 0) { if (func != 0) return (0xFFFFFFFF); for (i = 0; i < TEGRA_PCIB_MAX_PORTS; i++) { if ((sc->ports[i] != NULL) && (sc->ports[i]->port_idx == slot)) { hndl = sc->ports[i]->cfg_handle; off = reg & 0xFFF; break; } } if (i >= TEGRA_PCIB_MAX_PORTS) return (0xFFFFFFFF); } else { rv = tegra_pcbib_map_cfg(sc, bus, slot, func, reg); if (rv != 0) return (0xFFFFFFFF); hndl = sc->cfg_handle; off = PCI_CFG_BASE_REG(reg); } val = bus_space_read_4(sc->bus_tag, hndl, off & ~3); switch (bytes) { case 4: break; case 2: if (off & 3) val >>= 16; val &= 0xffff; break; case 1: val >>= ((off & 3) << 3); val &= 0xff; break; } return val; } static void tegra_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { struct tegra_pcib_softc *sc; bus_space_handle_t hndl; uint32_t off; uint32_t val2; int rv, i; sc = device_get_softc(dev); if (bus == 0) { if (func != 0) return; for (i = 0; i < TEGRA_PCIB_MAX_PORTS; i++) { if ((sc->ports[i] != NULL) && (sc->ports[i]->port_idx == slot)) { hndl = sc->ports[i]->cfg_handle; off = reg & 0xFFF; break; } } if (i >= TEGRA_PCIB_MAX_PORTS) return; } else { rv = tegra_pcbib_map_cfg(sc, bus, slot, func, reg); if (rv != 0) return; hndl = sc->cfg_handle; off = PCI_CFG_BASE_REG(reg); } switch (bytes) { case 4: bus_space_write_4(sc->bus_tag, hndl, off, val); break; case 2: val2 = bus_space_read_4(sc->bus_tag, hndl, off & ~3); val2 &= ~(0xffff << ((off & 3) << 3)); val2 |= ((val & 0xffff) << ((off & 3) << 3)); bus_space_write_4(sc->bus_tag, hndl, off & ~3, val2); break; case 1: val2 = bus_space_read_4(sc->bus_tag, hndl, off & ~3); val2 &= ~(0xff << ((off & 3) << 3)); val2 |= ((val & 0xff) << ((off & 3) << 3)); bus_space_write_4(sc->bus_tag, hndl, off & ~3, val2); break; } } static int tegra_pci_intr(void *arg) { struct tegra_pcib_softc *sc = arg; uint32_t code, signature; code = bus_read_4(sc->afi_mem_res, AFI_INTR_CODE) & AFI_INTR_CODE_MASK; signature = bus_read_4(sc->afi_mem_res, AFI_INTR_SIGNATURE); bus_write_4(sc->afi_mem_res, AFI_INTR_CODE, 0); if (code == AFI_INTR_CODE_INT_CODE_SM_MSG) return(FILTER_STRAY); printf("tegra_pci_intr: code %x sig %x\n", code, signature); return (FILTER_HANDLED); } /* ----------------------------------------------------------------------- * * PCI MSI interface */ static int tegra_pcib_alloc_msi(device_t pci, device_t child, int count, int maxcount, int *irqs) { phandle_t msi_parent; /* XXXX ofw_bus_msimap() don't works for Tegra DT. ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, NULL); */ msi_parent = OF_xref_from_node(ofw_bus_get_node(pci)); return (intr_alloc_msi(pci, child, msi_parent, count, maxcount, irqs)); } static int tegra_pcib_release_msi(device_t pci, device_t child, int count, int *irqs) { phandle_t msi_parent; /* XXXX ofw_bus_msimap() don't works for Tegra DT. ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, NULL); */ msi_parent = OF_xref_from_node(ofw_bus_get_node(pci)); return (intr_release_msi(pci, child, msi_parent, count, irqs)); } static int tegra_pcib_map_msi(device_t pci, device_t child, int irq, uint64_t *addr, uint32_t *data) { phandle_t msi_parent; /* XXXX ofw_bus_msimap() don't works for Tegra DT. ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, NULL); */ msi_parent = OF_xref_from_node(ofw_bus_get_node(pci)); return (intr_map_msi(pci, child, msi_parent, irq, addr, data)); } #ifdef TEGRA_PCIB_MSI_ENABLE /* -------------------------------------------------------------------------- * * Interrupts * */ static inline void tegra_pcib_isrc_mask(struct tegra_pcib_softc *sc, struct tegra_pcib_irqsrc *tgi, uint32_t val) { uint32_t reg; int offs, bit; offs = tgi->irq / AFI_MSI_INTR_IN_REG; bit = 1 << (tgi->irq % AFI_MSI_INTR_IN_REG); if (val != 0) AFI_WR4(sc, AFI_MSI_VEC(offs), bit); reg = AFI_RD4(sc, AFI_MSI_EN_VEC(offs)); if (val != 0) reg |= bit; else reg &= ~bit; AFI_WR4(sc, AFI_MSI_EN_VEC(offs), reg); } static int tegra_pcib_msi_intr(void *arg) { u_int irq, i, bit, reg; struct tegra_pcib_softc *sc; struct trapframe *tf; struct tegra_pcib_irqsrc *tgi; sc = (struct tegra_pcib_softc *)arg; tf = curthread->td_intr_frame; for (i = 0; i < AFI_MSI_REGS; i++) { reg = AFI_RD4(sc, AFI_MSI_VEC(i)); /* Handle one vector. */ while (reg != 0) { bit = ffs(reg) - 1; /* Send EOI */ AFI_WR4(sc, AFI_MSI_VEC(i), 1 << bit); irq = i * AFI_MSI_INTR_IN_REG + bit; tgi = &sc->isrcs[irq]; if (intr_isrc_dispatch(&tgi->isrc, tf) != 0) { /* Disable stray. */ tegra_pcib_isrc_mask(sc, tgi, 0); device_printf(sc->dev, "Stray irq %u disabled\n", irq); } reg = AFI_RD4(sc, AFI_MSI_VEC(i)); } } return (FILTER_HANDLED); } static int tegra_pcib_msi_attach(struct tegra_pcib_softc *sc) { int error; uint32_t irq; const char *name; sc->isrcs = malloc(sizeof(*sc->isrcs) * TEGRA_PCIB_MAX_MSI, M_DEVBUF, M_WAITOK | M_ZERO); name = device_get_nameunit(sc->dev); for (irq = 0; irq < TEGRA_PCIB_MAX_MSI; irq++) { sc->isrcs[irq].irq = irq; error = intr_isrc_register(&sc->isrcs[irq].isrc, sc->dev, 0, "%s,%u", name, irq); if (error != 0) return (error); /* XXX deregister ISRCs */ } if (intr_msi_register(sc->dev, OF_xref_from_node(ofw_bus_get_node(sc->dev))) != 0) return (ENXIO); return (0); } static int tegra_pcib_msi_detach(struct tegra_pcib_softc *sc) { /* * There has not been established any procedure yet * how to detach PIC from living system correctly. */ device_printf(sc->dev, "%s: not implemented yet\n", __func__); return (EBUSY); } static void tegra_pcib_msi_disable_intr(device_t dev, struct intr_irqsrc *isrc) { struct tegra_pcib_softc *sc; struct tegra_pcib_irqsrc *tgi; sc = device_get_softc(dev); tgi = (struct tegra_pcib_irqsrc *)isrc; tegra_pcib_isrc_mask(sc, tgi, 0); } static void tegra_pcib_msi_enable_intr(device_t dev, struct intr_irqsrc *isrc) { struct tegra_pcib_softc *sc; struct tegra_pcib_irqsrc *tgi; sc = device_get_softc(dev); tgi = (struct tegra_pcib_irqsrc *)isrc; tegra_pcib_isrc_mask(sc, tgi, 1); } /* MSI interrupts are edge trigered -> do nothing */ static void tegra_pcib_msi_post_filter(device_t dev, struct intr_irqsrc *isrc) { } static void tegra_pcib_msi_post_ithread(device_t dev, struct intr_irqsrc *isrc) { } static void tegra_pcib_msi_pre_ithread(device_t dev, struct intr_irqsrc *isrc) { } static int tegra_pcib_msi_setup_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { if (data == NULL || data->type != INTR_MAP_DATA_MSI) return (ENOTSUP); if (isrc->isrc_handlers == 0) tegra_pcib_msi_enable_intr(dev, isrc); return (0); } static int tegra_pcib_msi_teardown_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct tegra_pcib_softc *sc; struct tegra_pcib_irqsrc *tgi; sc = device_get_softc(dev); tgi = (struct tegra_pcib_irqsrc *)isrc; if (isrc->isrc_handlers == 0) tegra_pcib_isrc_mask(sc, tgi, 0); return (0); } static int tegra_pcib_msi_alloc_msi(device_t dev, device_t child, int count, int maxcount, device_t *pic, struct intr_irqsrc **srcs) { struct tegra_pcib_softc *sc; int i, irq, end_irq; bool found; KASSERT(powerof2(count), ("%s: bad count", __func__)); KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__)); sc = device_get_softc(dev); mtx_lock(&sc->mtx); found = false; for (irq = 0; (irq + count - 1) < TEGRA_PCIB_MAX_MSI; irq++) { /* Start on an aligned interrupt */ if ((irq & (maxcount - 1)) != 0) continue; /* Assume we found a valid range until shown otherwise */ found = true; /* Check this range is valid */ for (end_irq = irq; end_irq < irq + count; end_irq++) { /* This is already used */ if ((sc->isrcs[end_irq].flags & TEGRA_FLAG_MSI_USED) == TEGRA_FLAG_MSI_USED) { found = false; break; } } if (found) break; } /* Not enough interrupts were found */ if (!found || irq == (TEGRA_PCIB_MAX_MSI - 1)) { mtx_unlock(&sc->mtx); return (ENXIO); } for (i = 0; i < count; i++) { /* Mark the interrupt as used */ sc->isrcs[irq + i].flags |= TEGRA_FLAG_MSI_USED; } mtx_unlock(&sc->mtx); for (i = 0; i < count; i++) srcs[i] = (struct intr_irqsrc *)&sc->isrcs[irq + i]; *pic = device_get_parent(dev); return (0); } static int tegra_pcib_msi_release_msi(device_t dev, device_t child, int count, struct intr_irqsrc **isrc) { struct tegra_pcib_softc *sc; struct tegra_pcib_irqsrc *ti; int i; sc = device_get_softc(dev); mtx_lock(&sc->mtx); for (i = 0; i < count; i++) { ti = (struct tegra_pcib_irqsrc *)isrc[i]; KASSERT((ti->flags & TEGRA_FLAG_MSI_USED) == TEGRA_FLAG_MSI_USED, ("%s: Trying to release an unused MSI-X interrupt", __func__)); ti->flags &= ~TEGRA_FLAG_MSI_USED; } mtx_unlock(&sc->mtx); return (0); } static int tegra_pcib_msi_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc, uint64_t *addr, uint32_t *data) { struct tegra_pcib_softc *sc = device_get_softc(dev); struct tegra_pcib_irqsrc *ti = (struct tegra_pcib_irqsrc *)isrc; *addr = vtophys(sc->msi_page); *data = ti->irq; return (0); } #endif /* ------------------------------------------------------------------- */ static bus_size_t tegra_pcib_pex_ctrl(struct tegra_pcib_softc *sc, int port) { switch (port) { case 0: return (AFI_PEX0_CTRL); case 1: return (AFI_PEX1_CTRL); case 2: return (AFI_PEX2_CTRL); default: panic("invalid port number: %d\n", port); } } static int tegra_pcib_enable_fdt_resources(struct tegra_pcib_softc *sc) { int i, rv; rv = hwreset_assert(sc->hwreset_pcie_x); if (rv != 0) { device_printf(sc->dev, "Cannot assert 'pcie_x' reset\n"); return (rv); } rv = hwreset_assert(sc->hwreset_afi); if (rv != 0) { device_printf(sc->dev, "Cannot assert 'afi' reset\n"); return (rv); } rv = hwreset_assert(sc->hwreset_pex); if (rv != 0) { device_printf(sc->dev, "Cannot assert 'pex' reset\n"); return (rv); } tegra_powergate_power_off(TEGRA_POWERGATE_PCX); /* Regulators. */ for (i = 0; i < nitems(sc->regulators); i++) { if (sc->regulators[i] == NULL) continue; rv = regulator_enable(sc->regulators[i]); if (rv != 0) { device_printf(sc->dev, "Cannot enable '%s' regulator\n", sc->soc->regulator_names[i]); return (rv); } } rv = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCX, sc->clk_pex, sc->hwreset_pex); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'PCX' powergate\n"); return (rv); } rv = hwreset_deassert(sc->hwreset_afi); if (rv != 0) { device_printf(sc->dev, "Cannot unreset 'afi' reset\n"); return (rv); } rv = clk_enable(sc->clk_afi); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'afi' clock\n"); return (rv); } if (sc->soc->cml_clk) { rv = clk_enable(sc->clk_cml); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'cml' clock\n"); return (rv); } } rv = clk_enable(sc->clk_pll_e); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'pll_e' clock\n"); return (rv); } return (0); } static struct tegra_pcib_port * tegra_pcib_parse_port(struct tegra_pcib_softc *sc, phandle_t node) { struct tegra_pcib_port *port; uint32_t tmp[5]; char tmpstr[6]; int rv; port = malloc(sizeof(struct tegra_pcib_port), M_DEVBUF, M_WAITOK); rv = OF_getprop(node, "status", tmpstr, sizeof(tmpstr)); if (rv <= 0 || strcmp(tmpstr, "okay") == 0 || strcmp(tmpstr, "ok") == 0) port->enabled = 1; else port->enabled = 0; rv = OF_getencprop(node, "assigned-addresses", tmp, sizeof(tmp)); if (rv != sizeof(tmp)) { device_printf(sc->dev, "Cannot parse assigned-address: %d\n", rv); goto fail; } port->rp_base_addr = tmp[2]; port->rp_size = tmp[4]; port->port_idx = OFW_PCI_PHYS_HI_DEVICE(tmp[0]) - 1; if (port->port_idx >= TEGRA_PCIB_MAX_PORTS) { device_printf(sc->dev, "Invalid port index: %d\n", port->port_idx); goto fail; } /* XXX - TODO: * Implement proper function for parsing pci "reg" property: * - it have PCI bus format * - its relative to matching "assigned-addresses" */ rv = OF_getencprop(node, "reg", tmp, sizeof(tmp)); if (rv != sizeof(tmp)) { device_printf(sc->dev, "Cannot parse reg: %d\n", rv); goto fail; } port->rp_base_addr += tmp[2]; rv = OF_getencprop(node, "nvidia,num-lanes", &port->num_lanes, sizeof(port->num_lanes)); if (rv != sizeof(port->num_lanes)) { device_printf(sc->dev, "Cannot parse nvidia,num-lanes: %d\n", rv); goto fail; } if (port->num_lanes > 4) { device_printf(sc->dev, "Invalid nvidia,num-lanes: %d\n", port->num_lanes); goto fail; } port->afi_pex_ctrl = tegra_pcib_pex_ctrl(sc, port->port_idx); sc->lanes_cfg |= port->num_lanes << (4 * port->port_idx); /* Phy. */ rv = phy_get_by_ofw_name(sc->dev, node, "pcie-0", &port->phy); if (rv != 0) { device_printf(sc->dev, "Cannot get 'pcie-0' phy for port %d\n", port->port_idx); goto fail; } return (port); fail: free(port, M_DEVBUF); return (NULL); } static int tegra_pcib_parse_fdt_resources(struct tegra_pcib_softc *sc, phandle_t node) { phandle_t child; struct tegra_pcib_port *port; int i, rv; /* Regulators. */ for (i = 0; sc->soc->regulator_names[i] != NULL; i++) { if (i >= nitems(sc->regulators)) { device_printf(sc->dev, "Too many regulators present in DT.\n"); return (EOVERFLOW); } rv = regulator_get_by_ofw_property(sc->dev, 0, sc->soc->regulator_names[i], sc->regulators + i); if (rv != 0) { device_printf(sc->dev, "Cannot get '%s' regulator\n", sc->soc->regulator_names[i]); return (ENXIO); } } /* Resets. */ rv = hwreset_get_by_ofw_name(sc->dev, 0, "pex", &sc->hwreset_pex); if (rv != 0) { device_printf(sc->dev, "Cannot get 'pex' reset\n"); return (ENXIO); } rv = hwreset_get_by_ofw_name(sc->dev, 0, "afi", &sc->hwreset_afi); if (rv != 0) { device_printf(sc->dev, "Cannot get 'afi' reset\n"); return (ENXIO); } rv = hwreset_get_by_ofw_name(sc->dev, 0, "pcie_x", &sc->hwreset_pcie_x); if (rv != 0) { device_printf(sc->dev, "Cannot get 'pcie_x' reset\n"); return (ENXIO); } /* Clocks. */ rv = clk_get_by_ofw_name(sc->dev, 0, "pex", &sc->clk_pex); if (rv != 0) { device_printf(sc->dev, "Cannot get 'pex' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "afi", &sc->clk_afi); if (rv != 0) { device_printf(sc->dev, "Cannot get 'afi' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "pll_e", &sc->clk_pll_e); if (rv != 0) { device_printf(sc->dev, "Cannot get 'pll_e' clock\n"); return (ENXIO); } if (sc->soc->cml_clk) { rv = clk_get_by_ofw_name(sc->dev, 0, "cml", &sc->clk_cml); if (rv != 0) { device_printf(sc->dev, "Cannot get 'cml' clock\n"); return (ENXIO); } } /* Ports */ sc->num_ports = 0; for (child = OF_child(node); child != 0; child = OF_peer(child)) { port = tegra_pcib_parse_port(sc, child); if (port == NULL) { device_printf(sc->dev, "Cannot parse PCIe port node\n"); return (ENXIO); } sc->ports[sc->num_ports++] = port; } return (0); } static int tegra_pcib_decode_ranges(struct tegra_pcib_softc *sc, struct ofw_pci_range *ranges, int nranges) { int i; for (i = 2; i < nranges; i++) { if ((ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) == OFW_PCI_PHYS_HI_SPACE_IO) { if (sc->io_range.size != 0) { device_printf(sc->dev, "Duplicated IO range found in DT\n"); return (ENXIO); } sc->io_range = ranges[i]; } if (((ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) == OFW_PCI_PHYS_HI_SPACE_MEM32)) { if (ranges[i].pci_hi & OFW_PCI_PHYS_HI_PREFETCHABLE) { if (sc->pref_mem_range.size != 0) { device_printf(sc->dev, "Duplicated memory range found " "in DT\n"); return (ENXIO); } sc->pref_mem_range = ranges[i]; } else { if (sc->mem_range.size != 0) { device_printf(sc->dev, "Duplicated memory range found " "in DT\n"); return (ENXIO); } sc->mem_range = ranges[i]; } } } if ((sc->io_range.size == 0) || (sc->mem_range.size == 0) || (sc->pref_mem_range.size == 0)) { device_printf(sc->dev, " Not all required ranges are found in DT\n"); return (ENXIO); } return (0); } /* * Hardware config. */ static int tegra_pcib_wait_for_link(struct tegra_pcib_softc *sc, struct tegra_pcib_port *port) { uint32_t reg; int i; /* Setup link detection. */ reg = tegra_pcib_read_config(sc->dev, 0, port->port_idx, 0, RP_PRIV_MISC, 4); reg &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT; reg |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT; tegra_pcib_write_config(sc->dev, 0, port->port_idx, 0, RP_PRIV_MISC, reg, 4); for (i = TEGRA_PCIE_LINKUP_TIMEOUT; i > 0; i--) { reg = tegra_pcib_read_config(sc->dev, 0, port->port_idx, 0, RP_VEND_XP, 4); if (reg & RP_VEND_XP_DL_UP) break; DELAY(1); } if (i <= 0) return (ETIMEDOUT); for (i = TEGRA_PCIE_LINKUP_TIMEOUT; i > 0; i--) { reg = tegra_pcib_read_config(sc->dev, 0, port->port_idx, 0, RP_LINK_CONTROL_STATUS, 4); if (reg & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE) break; DELAY(1); } if (i <= 0) return (ETIMEDOUT); return (0); } static void tegra_pcib_port_enable(struct tegra_pcib_softc *sc, int port_num) { struct tegra_pcib_port *port; uint32_t reg; int rv; port = sc->ports[port_num]; /* Put port to reset. */ reg = AFI_RD4(sc, port->afi_pex_ctrl); reg &= ~AFI_PEX_CTRL_RST_L; AFI_WR4(sc, port->afi_pex_ctrl, reg); AFI_RD4(sc, port->afi_pex_ctrl); DELAY(10); /* Enable clocks. */ reg |= AFI_PEX_CTRL_REFCLK_EN; reg |= AFI_PEX_CTRL_CLKREQ_EN; reg |= AFI_PEX_CTRL_OVERRIDE_EN; AFI_WR4(sc, port->afi_pex_ctrl, reg); AFI_RD4(sc, port->afi_pex_ctrl); DELAY(100); /* Release reset. */ reg |= AFI_PEX_CTRL_RST_L; AFI_WR4(sc, port->afi_pex_ctrl, reg); if (sc->soc->pca_enable) { reg = tegra_pcib_read_config(sc->dev, 0, port->port_idx, 0, RP_VEND_CTL2, 4); reg |= RP_VEND_CTL2_PCA_ENABLE; tegra_pcib_write_config(sc->dev, 0, port->port_idx, 0, RP_VEND_CTL2, reg, 4); } rv = tegra_pcib_wait_for_link(sc, port); if (bootverbose) device_printf(sc->dev, " port %d (%d lane%s): Link is %s\n", port->port_idx, port->num_lanes, port->num_lanes > 1 ? "s": "", rv == 0 ? "up": "down"); } static void tegra_pcib_port_disable(struct tegra_pcib_softc *sc, uint32_t port_num) { struct tegra_pcib_port *port; uint32_t reg; port = sc->ports[port_num]; /* Put port to reset. */ reg = AFI_RD4(sc, port->afi_pex_ctrl); reg &= ~AFI_PEX_CTRL_RST_L; AFI_WR4(sc, port->afi_pex_ctrl, reg); AFI_RD4(sc, port->afi_pex_ctrl); DELAY(10); /* Disable clocks. */ reg &= ~AFI_PEX_CTRL_CLKREQ_EN; reg &= ~AFI_PEX_CTRL_REFCLK_EN; AFI_WR4(sc, port->afi_pex_ctrl, reg); if (bootverbose) device_printf(sc->dev, " port %d (%d lane%s): Disabled\n", port->port_idx, port->num_lanes, port->num_lanes > 1 ? "s": ""); } static void tegra_pcib_set_bar(struct tegra_pcib_softc *sc, int bar, uint32_t axi, uint64_t fpci, uint32_t size, int is_memory) { uint32_t fpci_reg; uint32_t axi_reg; uint32_t size_reg; axi_reg = axi & ~0xFFF; size_reg = size >> 12; fpci_reg = (uint32_t)(fpci >> 8) & ~0xF; fpci_reg |= is_memory ? 0x1 : 0x0; AFI_WR4(sc, bars[bar].axi_start, axi_reg); AFI_WR4(sc, bars[bar].size, size_reg); AFI_WR4(sc, bars[bar].fpci_start, fpci_reg); } static int tegra_pcib_enable(struct tegra_pcib_softc *sc) { int rv; int i; uint32_t reg; rv = tegra_pcib_enable_fdt_resources(sc); if (rv != 0) { device_printf(sc->dev, "Cannot enable FDT resources\n"); return (rv); } /* Enable PLLE control. */ reg = AFI_RD4(sc, AFI_PLLE_CONTROL); reg &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL; reg |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN; AFI_WR4(sc, AFI_PLLE_CONTROL, reg); /* Set bias pad. */ AFI_WR4(sc, AFI_PEXBIAS_CTRL, 0); /* Configure mode and ports. */ reg = AFI_RD4(sc, AFI_PCIE_CONFIG); reg &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK; if (sc->lanes_cfg == 0x14) { if (bootverbose) device_printf(sc->dev, "Using x1,x4 configuration\n"); reg |= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_XBAR4_1; } else if (sc->lanes_cfg == 0x12) { if (bootverbose) device_printf(sc->dev, "Using x1,x2 configuration\n"); reg |= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_XBAR2_1; } else { device_printf(sc->dev, "Unsupported lanes configuration: 0x%X\n", sc->lanes_cfg); } reg |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL; for (i = 0; i < TEGRA_PCIB_MAX_PORTS; i++) { if ((sc->ports[i] != NULL)) reg &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(sc->ports[i]->port_idx); } AFI_WR4(sc, AFI_PCIE_CONFIG, reg); /* Enable Gen2 support. */ reg = AFI_RD4(sc, AFI_FUSE); reg &= ~AFI_FUSE_PCIE_T0_GEN2_DIS; AFI_WR4(sc, AFI_FUSE, reg); for (i = 0; i < TEGRA_PCIB_MAX_PORTS; i++) { if (sc->ports[i] != NULL) { rv = phy_enable(sc->ports[i]->phy); if (rv != 0) { device_printf(sc->dev, "Cannot enable phy for port %d\n", sc->ports[i]->port_idx); return (rv); } } } /* Configure PCIe reference clock */ PADS_WR4(sc, PADS_REFCLK_CFG0, sc->soc->pads_refclk_cfg0); if (sc->num_ports > 2) PADS_WR4(sc, PADS_REFCLK_CFG1, sc->soc->pads_refclk_cfg1); rv = hwreset_deassert(sc->hwreset_pcie_x); if (rv != 0) { device_printf(sc->dev, "Cannot unreset 'pci_x' reset\n"); return (rv); } /* Enable config space. */ reg = AFI_RD4(sc, AFI_CONFIGURATION); reg |= AFI_CONFIGURATION_EN_FPCI; AFI_WR4(sc, AFI_CONFIGURATION, reg); /* Enable AFI errors. */ reg = 0; reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_INI_SLVERR); reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_INI_DECERR); reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_TGT_SLVERR); reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_TGT_DECERR); reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_TGT_WRERR); reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_SM_MSG); reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_DFPCI_DECERR); reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_AXI_DECERR); reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_FPCI_TIMEOUT); reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_PE_PRSNT_SENSE); reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_PE_CLKREQ_SENSE); reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_CLKCLAMP_SENSE); reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_RDY4PD_SENSE); reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_P2P_ERROR); AFI_WR4(sc, AFI_AFI_INTR_ENABLE, reg); AFI_WR4(sc, AFI_SM_INTR_ENABLE, 0xffffffff); /* Enable INT, disable MSI. */ AFI_WR4(sc, AFI_INTR_MASK, AFI_INTR_MASK_INT_MASK); /* Mask all FPCI errors. */ AFI_WR4(sc, AFI_FPCI_ERROR_MASKS, 0); /* Setup AFI translation windows. */ /* BAR 0 - type 1 extended configuration. */ tegra_pcib_set_bar(sc, 0, rman_get_start(sc->cfg_mem_res), FPCI_MAP_EXT_TYPE1_CONFIG, rman_get_size(sc->cfg_mem_res), 0); /* BAR 1 - downstream I/O. */ tegra_pcib_set_bar(sc, 1, sc->io_range.host, FPCI_MAP_IO, sc->io_range.size, 0); /* BAR 2 - downstream prefetchable memory 1:1. */ tegra_pcib_set_bar(sc, 2, sc->pref_mem_range.host, sc->pref_mem_range.host, sc->pref_mem_range.size, 1); /* BAR 3 - downstream not prefetchable memory 1:1 .*/ tegra_pcib_set_bar(sc, 3, sc->mem_range.host, sc->mem_range.host, sc->mem_range.size, 1); /* BAR 3-8 clear. */ tegra_pcib_set_bar(sc, 4, 0, 0, 0, 0); tegra_pcib_set_bar(sc, 5, 0, 0, 0, 0); tegra_pcib_set_bar(sc, 6, 0, 0, 0, 0); tegra_pcib_set_bar(sc, 7, 0, 0, 0, 0); tegra_pcib_set_bar(sc, 8, 0, 0, 0, 0); /* MSI BAR - clear. */ tegra_pcib_set_bar(sc, 9, 0, 0, 0, 0); return(0); } #ifdef TEGRA_PCIB_MSI_ENABLE static int tegra_pcib_attach_msi(device_t dev) { struct tegra_pcib_softc *sc; uint32_t reg; int i, rv; sc = device_get_softc(dev); sc->msi_page = (uintptr_t)kmem_alloc_contig(PAGE_SIZE, M_WAITOK, 0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); /* MSI BAR */ tegra_pcib_set_bar(sc, 9, vtophys(sc->msi_page), vtophys(sc->msi_page), PAGE_SIZE, 0); /* Disable and clear all interrupts. */ for (i = 0; i < AFI_MSI_REGS; i++) { AFI_WR4(sc, AFI_MSI_EN_VEC(i), 0); AFI_WR4(sc, AFI_MSI_VEC(i), 0xFFFFFFFF); } rv = bus_setup_intr(dev, sc->msi_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, tegra_pcib_msi_intr, NULL, sc, &sc->msi_intr_cookie); if (rv != 0) { device_printf(dev, "cannot setup MSI interrupt handler\n"); rv = ENXIO; goto out; } if (tegra_pcib_msi_attach(sc) != 0) { device_printf(dev, "WARNING: unable to attach PIC\n"); tegra_pcib_msi_detach(sc); goto out; } /* Unmask MSI interrupt. */ reg = AFI_RD4(sc, AFI_INTR_MASK); reg |= AFI_INTR_MASK_MSI_MASK; AFI_WR4(sc, AFI_INTR_MASK, reg); out: return (rv); } #endif static int tegra_pcib_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data != 0) { device_set_desc(dev, "Nvidia Integrated PCI/PCI-E Controller"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int tegra_pcib_attach(device_t dev) { struct tegra_pcib_softc *sc; phandle_t node; int rv; int rid; struct tegra_pcib_port *port; int i; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->mtx, "msi_mtx", NULL, MTX_DEF); node = ofw_bus_get_node(dev); sc->soc = (struct pcie_soc *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; rv = tegra_pcib_parse_fdt_resources(sc, node); if (rv != 0) { device_printf(dev, "Cannot get FDT resources\n"); return (rv); } /* Allocate bus_space resources. */ rid = 0; sc->pads_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->pads_mem_res == NULL) { device_printf(dev, "Cannot allocate PADS register\n"); rv = ENXIO; goto out; } /* * XXX - FIXME * tag for config space is not filled when RF_ALLOCATED flag is used. */ sc->bus_tag = rman_get_bustag(sc->pads_mem_res); rid = 1; sc->afi_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->afi_mem_res == NULL) { device_printf(dev, "Cannot allocate AFI register\n"); rv = ENXIO; goto out; } rid = 2; sc->cfg_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ALLOCATED); if (sc->cfg_mem_res == NULL) { device_printf(dev, "Cannot allocate config space memory\n"); rv = ENXIO; goto out; } sc->cfg_base_addr = rman_get_start(sc->cfg_mem_res); /* Map RP slots */ for (i = 0; i < TEGRA_PCIB_MAX_PORTS; i++) { if (sc->ports[i] == NULL) continue; port = sc->ports[i]; rv = bus_space_map(sc->bus_tag, port->rp_base_addr, port->rp_size, 0, &port->cfg_handle); if (rv != 0) { device_printf(sc->dev, "Cannot allocate memory for " "port: %d\n", i); rv = ENXIO; goto out; } } /* * Get PCI interrupt */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate IRQ resources\n"); rv = ENXIO; goto out; } rid = 1; sc->msi_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate MSI IRQ resources\n"); rv = ENXIO; goto out; } sc->ofw_pci.sc_range_mask = 0x3; rv = ofw_pcib_init(dev); if (rv != 0) goto out; rv = tegra_pcib_decode_ranges(sc, sc->ofw_pci.sc_range, sc->ofw_pci.sc_nrange); if (rv != 0) goto out; if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_BIO | INTR_MPSAFE, tegra_pci_intr, NULL, sc, &sc->intr_cookie)) { device_printf(dev, "cannot setup interrupt handler\n"); rv = ENXIO; goto out; } /* * Enable PCIE device. */ rv = tegra_pcib_enable(sc); if (rv != 0) goto out; for (i = 0; i < TEGRA_PCIB_MAX_PORTS; i++) { if (sc->ports[i] == NULL) continue; if (sc->ports[i]->enabled) tegra_pcib_port_enable(sc, i); else tegra_pcib_port_disable(sc, i); } #ifdef TEGRA_PCIB_MSI_ENABLE rv = tegra_pcib_attach_msi(dev); if (rv != 0) goto out; #endif device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); out: return (rv); } static device_method_t tegra_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra_pcib_probe), DEVMETHOD(device_attach, tegra_pcib_attach), /* Bus interface */ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, tegra_pcib_maxslots), DEVMETHOD(pcib_read_config, tegra_pcib_read_config), DEVMETHOD(pcib_write_config, tegra_pcib_write_config), DEVMETHOD(pcib_route_interrupt, tegra_pcib_route_interrupt), DEVMETHOD(pcib_alloc_msi, tegra_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, tegra_pcib_release_msi), DEVMETHOD(pcib_map_msi, tegra_pcib_map_msi), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), #ifdef TEGRA_PCIB_MSI_ENABLE /* MSI/MSI-X */ DEVMETHOD(msi_alloc_msi, tegra_pcib_msi_alloc_msi), DEVMETHOD(msi_release_msi, tegra_pcib_msi_release_msi), DEVMETHOD(msi_map_msi, tegra_pcib_msi_map_msi), /* Interrupt controller interface */ DEVMETHOD(pic_disable_intr, tegra_pcib_msi_disable_intr), DEVMETHOD(pic_enable_intr, tegra_pcib_msi_enable_intr), DEVMETHOD(pic_setup_intr, tegra_pcib_msi_setup_intr), DEVMETHOD(pic_teardown_intr, tegra_pcib_msi_teardown_intr), DEVMETHOD(pic_post_filter, tegra_pcib_msi_post_filter), DEVMETHOD(pic_post_ithread, tegra_pcib_msi_post_ithread), DEVMETHOD(pic_pre_ithread, tegra_pcib_msi_pre_ithread), #endif /* OFW bus interface */ DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; DEFINE_CLASS_1(pcib, tegra_pcib_driver, tegra_pcib_methods, sizeof(struct tegra_pcib_softc), ofw_pcib_driver); DRIVER_MODULE(tegra_pcib, simplebus, tegra_pcib_driver, NULL, NULL); diff --git a/sys/arm/nvidia/tegra_rtc.c b/sys/arm/nvidia/tegra_rtc.c index b489967c946e..07bf51628665 100644 --- a/sys/arm/nvidia/tegra_rtc.c +++ b/sys/arm/nvidia/tegra_rtc.c @@ -1,297 +1,297 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * RTC driver for Tegra SoCs. */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include "clock_if.h" #define RTC_CONTROL 0x00 #define RTC_BUSY 0x04 #define RTC_BUSY_STATUS (1 << 0) #define RTC_SECONDS 0x08 #define RTC_SHADOW_SECONDS 0x0c #define RTC_MILLI_SECONDS 0x10 #define RTC_SECONDS_ALARM0 0x14 #define RTC_SECONDS_ALARM1 0x18 #define RTC_MILLI_SECONDS_ALARM 0x1c #define RTC_SECONDS_COUNTDOWN_ALARM 0x20 #define RTC_MILLI_SECONDS_COUNTDOW_ALARM 0x24 #define RTC_INTR_MASK 0x28 #define RTC_INTR_MSEC_CDN_ALARM (1 << 4) #define RTC_INTR_SEC_CDN_ALARM (1 << 3) #define RTC_INTR_MSEC_ALARM (1 << 2) #define RTC_INTR_SEC_ALARM1 (1 << 1) #define RTC_INTR_SEC_ALARM0 (1 << 0) #define RTC_INTR_STATUS 0x2c #define RTC_INTR_SOURCE 0x30 #define RTC_INTR_SET 0x34 #define RTC_CORRECTION_FACTOR 0x38 #define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, (_r)) #define LOCK(_sc) mtx_lock(&(_sc)->mtx) #define UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define SLEEP(_sc, timeout) \ mtx_sleep(sc, &sc->mtx, 0, "rtcwait", timeout); #define LOCK_INIT(_sc) \ mtx_init(&_sc->mtx, device_get_nameunit(_sc->dev), "tegra_rtc", MTX_DEF) #define LOCK_DESTROY(_sc) mtx_destroy(&_sc->mtx) #define ASSERT_LOCKED(_sc) mtx_assert(&_sc->mtx, MA_OWNED) #define ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->mtx, MA_NOTOWNED) static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-rtc", 1}, {NULL, 0} }; struct tegra_rtc_softc { device_t dev; struct mtx mtx; struct resource *mem_res; struct resource *irq_res; void *irq_h; clk_t clk; uint32_t core_freq; }; static void tegra_rtc_wait(struct tegra_rtc_softc *sc) { int timeout; for (timeout = 500; timeout >0; timeout--) { if ((RD4(sc, RTC_BUSY) & RTC_BUSY_STATUS) == 0) break; DELAY(1); } if (timeout <= 0) device_printf(sc->dev, "Device busy timeouted\n"); } /* * Get the time of day clock and return it in ts. * Return 0 on success, an error number otherwise. */ static int tegra_rtc_gettime(device_t dev, struct timespec *ts) { struct tegra_rtc_softc *sc; struct timeval tv; uint32_t msec, sec; sc = device_get_softc(dev); LOCK(sc); msec = RD4(sc, RTC_MILLI_SECONDS); sec = RD4(sc, RTC_SHADOW_SECONDS); UNLOCK(sc); tv.tv_sec = sec; tv.tv_usec = msec * 1000; TIMEVAL_TO_TIMESPEC(&tv, ts); return (0); } static int tegra_rtc_settime(device_t dev, struct timespec *ts) { struct tegra_rtc_softc *sc; struct timeval tv; sc = device_get_softc(dev); LOCK(sc); TIMESPEC_TO_TIMEVAL(&tv, ts); tegra_rtc_wait(sc); WR4(sc, RTC_SECONDS, tv.tv_sec); UNLOCK(sc); return (0); } static void tegra_rtc_intr(void *arg) { struct tegra_rtc_softc *sc; uint32_t status; sc = (struct tegra_rtc_softc *)arg; LOCK(sc); status = RD4(sc, RTC_INTR_STATUS); WR4(sc, RTC_INTR_STATUS, status); UNLOCK(sc); } static int tegra_rtc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); return (BUS_PROBE_DEFAULT); } static int tegra_rtc_attach(device_t dev) { int rv, rid; struct tegra_rtc_softc *sc; sc = device_get_softc(dev); sc->dev = dev; LOCK_INIT(sc); /* Get the memory resource for the register mapping. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot map registers.\n"); rv = ENXIO; goto fail; } /* Allocate our IRQ resource. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate interrupt.\n"); rv = ENXIO; goto fail; } /* OFW resources. */ rv = clk_get_by_ofw_index(dev, 0, 0, &sc->clk); if (rv != 0) { device_printf(dev, "Cannot get i2c clock: %d\n", rv); goto fail; } rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable clock: %d\n", rv); goto fail; } /* Init hardware. */ WR4(sc, RTC_SECONDS_ALARM0, 0); WR4(sc, RTC_SECONDS_ALARM1, 0); WR4(sc, RTC_INTR_STATUS, 0xFFFFFFFF); WR4(sc, RTC_INTR_MASK, 0); /* Setup interrupt */ rv = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, tegra_rtc_intr, sc, &sc->irq_h); if (rv) { device_printf(dev, "Cannot setup interrupt.\n"); goto fail; } /* * Register as a time of day clock with 1-second resolution. * * XXXX Not yet, we don't have support for multiple RTCs */ /* clock_register(dev, 1000000); */ return (bus_generic_attach(dev)); fail: if (sc->clk != NULL) clk_release(sc->clk); if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (rv); } static int tegra_rtc_detach(device_t dev) { struct tegra_rtc_softc *sc; sc = device_get_softc(dev); if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (bus_generic_detach(dev)); } static device_method_t tegra_rtc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra_rtc_probe), DEVMETHOD(device_attach, tegra_rtc_attach), DEVMETHOD(device_detach, tegra_rtc_detach), /* clock interface */ DEVMETHOD(clock_gettime, tegra_rtc_gettime), DEVMETHOD(clock_settime, tegra_rtc_settime), DEVMETHOD_END }; static DEFINE_CLASS_0(rtc, tegra_rtc_driver, tegra_rtc_methods, sizeof(struct tegra_rtc_softc)); DRIVER_MODULE(tegra_rtc, simplebus, tegra_rtc_driver, NULL, NULL); diff --git a/sys/arm/nvidia/tegra_sdhci.c b/sys/arm/nvidia/tegra_sdhci.c index b9b47f0d1c6f..6877b7021980 100644 --- a/sys/arm/nvidia/tegra_sdhci.c +++ b/sys/arm/nvidia/tegra_sdhci.c @@ -1,472 +1,472 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * SDHCI driver glue for NVIDIA Tegra family * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include "sdhci_if.h" #include "opt_mmccam.h" /* Tegra SDHOST controller vendor register definitions */ #define SDMMC_VENDOR_CLOCK_CNTRL 0x100 #define VENDOR_CLOCK_CNTRL_CLK_SHIFT 8 #define VENDOR_CLOCK_CNTRL_CLK_MASK 0xFF #define SDMMC_VENDOR_SYS_SW_CNTRL 0x104 #define SDMMC_VENDOR_CAP_OVERRIDES 0x10C #define SDMMC_VENDOR_BOOT_CNTRL 0x110 #define SDMMC_VENDOR_BOOT_ACK_TIMEOUT 0x114 #define SDMMC_VENDOR_BOOT_DAT_TIMEOUT 0x118 #define SDMMC_VENDOR_DEBOUNCE_COUNT 0x11C #define SDMMC_VENDOR_MISC_CNTRL 0x120 #define VENDOR_MISC_CTRL_ENABLE_SDR104 0x8 #define VENDOR_MISC_CTRL_ENABLE_SDR50 0x10 #define VENDOR_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20 #define VENDOR_MISC_CTRL_ENABLE_DDR50 0x200 #define SDMMC_MAX_CURRENT_OVERRIDE 0x124 #define SDMMC_MAX_CURRENT_OVERRIDE_HI 0x128 #define SDMMC_VENDOR_CLK_GATE_HYSTERESIS_COUNT 0x1D0 #define SDMMC_VENDOR_PHWRESET_VAL0 0x1D4 #define SDMMC_VENDOR_PHWRESET_VAL1 0x1D8 #define SDMMC_VENDOR_PHWRESET_VAL2 0x1DC #define SDMMC_SDMEMCOMPPADCTRL_0 0x1E0 #define SDMMC_AUTO_CAL_CONFIG 0x1E4 #define SDMMC_AUTO_CAL_INTERVAL 0x1E8 #define SDMMC_AUTO_CAL_STATUS 0x1EC #define SDMMC_SDMMC_MCCIF_FIFOCTRL 0x1F4 #define SDMMC_TIMEOUT_WCOAL_SDMMC 0x1F8 /* Compatible devices. */ static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-sdhci", 1}, {"nvidia,tegra210-sdhci", 1}, {NULL, 0}, }; struct tegra_sdhci_softc { device_t dev; struct resource * mem_res; struct resource * irq_res; void * intr_cookie; u_int quirks; /* Chip specific quirks */ u_int caps; /* If we override SDHCI_CAPABILITIES */ uint32_t max_clk; /* Max possible freq */ clk_t clk; hwreset_t reset; gpio_pin_t gpio_power; struct sdhci_fdt_gpio *gpio; int force_card_present; struct sdhci_slot slot; }; static inline uint32_t RD4(struct tegra_sdhci_softc *sc, bus_size_t off) { return (bus_read_4(sc->mem_res, off)); } static uint8_t tegra_sdhci_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); return (bus_read_1(sc->mem_res, off)); } static uint16_t tegra_sdhci_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); return (bus_read_2(sc->mem_res, off)); } static uint32_t tegra_sdhci_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct tegra_sdhci_softc *sc; uint32_t val32; sc = device_get_softc(dev); val32 = bus_read_4(sc->mem_res, off); /* Force the card-present state if necessary. */ if (off == SDHCI_PRESENT_STATE && sc->force_card_present) val32 |= SDHCI_CARD_PRESENT; return (val32); } static void tegra_sdhci_read_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); bus_read_multi_4(sc->mem_res, off, data, count); } static void tegra_sdhci_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint8_t val) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); bus_write_1(sc->mem_res, off, val); } static void tegra_sdhci_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint16_t val) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); bus_write_2(sc->mem_res, off, val); } static void tegra_sdhci_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t val) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->mem_res, off, val); } static void tegra_sdhci_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); bus_write_multi_4(sc->mem_res, off, data, count); } static void tegra_sdhci_intr(void *arg) { struct tegra_sdhci_softc *sc = arg; sdhci_generic_intr(&sc->slot); RD4(sc, SDHCI_INT_STATUS); } static int tegra_sdhci_get_ro(device_t brdev, device_t reqdev) { struct tegra_sdhci_softc *sc = device_get_softc(brdev); return (sdhci_fdt_gpio_get_readonly(sc->gpio)); } static bool tegra_sdhci_get_card_present(device_t dev, struct sdhci_slot *slot) { struct tegra_sdhci_softc *sc = device_get_softc(dev); return (sdhci_fdt_gpio_get_present(sc->gpio)); } static int tegra_sdhci_probe(device_t dev) { struct tegra_sdhci_softc *sc; phandle_t node; pcell_t cid; const struct ofw_compat_data *cd; sc = device_get_softc(dev); if (!ofw_bus_status_okay(dev)) return (ENXIO); cd = ofw_bus_search_compatible(dev, compat_data); if (cd->ocd_data == 0) return (ENXIO); node = ofw_bus_get_node(dev); device_set_desc(dev, "Tegra SDHCI controller"); /* Allow dts to patch quirks, slots, and max-frequency. */ if ((OF_getencprop(node, "quirks", &cid, sizeof(cid))) > 0) sc->quirks = cid; if ((OF_getencprop(node, "max-frequency", &cid, sizeof(cid))) > 0) sc->max_clk = cid; return (BUS_PROBE_DEFAULT); } static int tegra_sdhci_attach(device_t dev) { struct tegra_sdhci_softc *sc; int rid, rv; uint64_t freq; phandle_t node, prop; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->mem_res) { device_printf(dev, "cannot allocate memory window\n"); rv = ENXIO; goto fail; } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->irq_res) { device_printf(dev, "cannot allocate interrupt\n"); rv = ENXIO; goto fail; } rv = hwreset_get_by_ofw_name(sc->dev, 0, "sdhci", &sc->reset); if (rv != 0) { device_printf(sc->dev, "Cannot get 'sdhci' reset\n"); goto fail; } rv = hwreset_assert(sc->reset); if (rv != 0) { device_printf(dev, "Cannot reset 'sdhci' reset\n"); goto fail; } gpio_pin_get_by_ofw_property(sc->dev, node, "power-gpios", &sc->gpio_power); if (OF_hasprop(node, "assigned-clocks")) { rv = clk_set_assigned(sc->dev, node); if (rv != 0) { device_printf(dev, "Cannot set assigned clocks\n"); goto fail; } } rv = clk_get_by_ofw_index(dev, 0, 0, &sc->clk); if (rv != 0) { device_printf(dev, "Cannot get clock\n"); goto fail; } rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable clock\n"); goto fail; } rv = clk_set_freq(sc->clk, 48000000, CLK_SET_ROUND_DOWN); if (rv != 0) { device_printf(dev, "Cannot set clock\n"); } rv = clk_get_freq(sc->clk, &freq); if (rv != 0) { device_printf(dev, "Cannot get clock frequency\n"); goto fail; } DELAY(4000); rv = hwreset_deassert(sc->reset); if (rv != 0) { device_printf(dev, "Cannot unreset 'sdhci' reset\n"); goto fail; } if (bootverbose) device_printf(dev, " Base MMC clock: %jd\n", (uintmax_t)freq); /* Fill slot information. */ sc->max_clk = (int)freq; sc->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | SDHCI_QUIRK_MISSING_CAPS; /* Limit real slot capabilities. */ sc->caps = RD4(sc, SDHCI_CAPABILITIES); if (OF_getencprop(node, "bus-width", &prop, sizeof(prop)) > 0) { sc->caps &= ~(MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); switch (prop) { case 8: sc->caps |= MMC_CAP_8_BIT_DATA; /* FALLTHROUGH */ case 4: sc->caps |= MMC_CAP_4_BIT_DATA; break; case 1: break; default: device_printf(dev, "Bad bus-width value %u\n", prop); break; } } if (OF_hasprop(node, "non-removable")) sc->force_card_present = 1; /* * Clear clock field, so SDHCI driver uses supplied frequency. * in sc->slot.max_clk */ sc->caps &= ~SDHCI_CLOCK_V3_BASE_MASK; sc->slot.quirks = sc->quirks; sc->slot.max_clk = sc->max_clk; sc->slot.caps = sc->caps; if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, tegra_sdhci_intr, sc, &sc->intr_cookie)) { device_printf(dev, "cannot setup interrupt handler\n"); rv = ENXIO; goto fail; } rv = sdhci_init_slot(dev, &sc->slot, 0); if (rv != 0) { goto fail; } sc->gpio = sdhci_fdt_gpio_setup(sc->dev, &sc->slot); bus_generic_probe(dev); bus_generic_attach(dev); sdhci_start_slot(&sc->slot); return (0); fail: if (sc->gpio != NULL) sdhci_fdt_gpio_teardown(sc->gpio); if (sc->intr_cookie != NULL) bus_teardown_intr(dev, sc->irq_res, sc->intr_cookie); if (sc->gpio_power != NULL) gpio_pin_release(sc->gpio_power); if (sc->clk != NULL) clk_release(sc->clk); if (sc->reset != NULL) hwreset_release(sc->reset); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (rv); } static int tegra_sdhci_detach(device_t dev) { struct tegra_sdhci_softc *sc = device_get_softc(dev); struct sdhci_slot *slot = &sc->slot; bus_generic_detach(dev); sdhci_fdt_gpio_teardown(sc->gpio); clk_release(sc->clk); bus_teardown_intr(dev, sc->irq_res, sc->intr_cookie); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res), sc->irq_res); sdhci_cleanup_slot(slot); bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); return (0); } static device_method_t tegra_sdhci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra_sdhci_probe), DEVMETHOD(device_attach, tegra_sdhci_attach), DEVMETHOD(device_detach, tegra_sdhci_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, sdhci_generic_read_ivar), DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar), /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios), DEVMETHOD(mmcbr_request, sdhci_generic_request), DEVMETHOD(mmcbr_get_ro, tegra_sdhci_get_ro), DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host), DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host), /* SDHCI registers accessors */ DEVMETHOD(sdhci_read_1, tegra_sdhci_read_1), DEVMETHOD(sdhci_read_2, tegra_sdhci_read_2), DEVMETHOD(sdhci_read_4, tegra_sdhci_read_4), DEVMETHOD(sdhci_read_multi_4, tegra_sdhci_read_multi_4), DEVMETHOD(sdhci_write_1, tegra_sdhci_write_1), DEVMETHOD(sdhci_write_2, tegra_sdhci_write_2), DEVMETHOD(sdhci_write_4, tegra_sdhci_write_4), DEVMETHOD(sdhci_write_multi_4, tegra_sdhci_write_multi_4), DEVMETHOD(sdhci_get_card_present, tegra_sdhci_get_card_present), DEVMETHOD_END }; static DEFINE_CLASS_0(sdhci, tegra_sdhci_driver, tegra_sdhci_methods, sizeof(struct tegra_sdhci_softc)); DRIVER_MODULE(sdhci_tegra, simplebus, tegra_sdhci_driver, NULL, NULL); SDHCI_DEPEND(sdhci_tegra); #ifndef MMCCAM MMC_DECLARE_BRIDGE(sdhci); #endif diff --git a/sys/arm/nvidia/tegra_soctherm.c b/sys/arm/nvidia/tegra_soctherm.c index 2cd0400b5c95..0018833e5a43 100644 --- a/sys/arm/nvidia/tegra_soctherm.c +++ b/sys/arm/nvidia/tegra_soctherm.c @@ -1,841 +1,841 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Thermometer and thermal zones driver for Tegra SoCs. * Calibration data and algo are taken from Linux, because this part of SoC * is undocumented in TRM. */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include "tegra_soctherm_if.h" /* Per sensors registers - base is 0x0c0*/ #define TSENSOR_CONFIG0 0x000 #define TSENSOR_CONFIG0_TALL(x) (((x) & 0xFFFFF) << 8) #define TSENSOR_CONFIG0_STATUS_CLR (1 << 5) #define TSENSOR_CONFIG0_TCALC_OVERFLOW (1 << 4) #define TSENSOR_CONFIG0_OVERFLOW (1 << 3) #define TSENSOR_CONFIG0_CPTR_OVERFLOW (1 << 2) #define TSENSOR_CONFIG0_RO_SEL (1 << 1) #define TSENSOR_CONFIG0_STOP (1 << 0) #define TSENSOR_CONFIG1 0x004 #define TSENSOR_CONFIG1_TEMP_ENABLE (1U << 31) #define TSENSOR_CONFIG1_TEN_COUNT(x) (((x) & 0x3F) << 24) #define TSENSOR_CONFIG1_TIDDQ_EN(x) (((x) & 0x3F) << 15) #define TSENSOR_CONFIG1_TSAMPLE(x) (((x) & 0x3FF) << 0) #define TSENSOR_CONFIG2 0x008 #define TSENSOR_CONFIG2_THERMA(x) (((x) & 0xFFFF) << 16) #define TSENSOR_CONFIG2_THERMB(x) (((x) & 0xFFFF) << 0) #define TSENSOR_STATUS0 0x00c #define TSENSOR_STATUS0_CAPTURE_VALID (1U << 31) #define TSENSOR_STATUS0_CAPTURE(x) (((x) >> 0) & 0xffff) #define TSENSOR_STATUS1 0x010 #define TSENSOR_STATUS1_TEMP_VALID (1U << 31) #define TSENSOR_STATUS1_TEMP(x) (((x) >> 0) & 0xffff) #define TSENSOR_STATUS2 0x014 #define TSENSOR_STATUS2_TEMP_MAX(x) (((x) >> 16) & 0xffff) #define TSENSOR_STATUS2_TEMP_MIN(x) (((x) >> 0) & 0xffff) /* Readbacks */ #define READBACK_VALUE(x) (((x) >> 8) & 0xff) #define READBACK_ADD_HALF (1 << 7) #define READBACK_NEGATE (1 << 0) /* Global registers */ #define TSENSOR_PDIV 0x1c0 #define TSENSOR_HOTSPOT_OFF 0x1c4 #define TSENSOR_TEMP1 0x1c8 #define TSENSOR_TEMP2 0x1cc /* Fuses */ #define FUSE_TSENSOR_CALIB_CP_TS_BASE_SHIFT 0 #define FUSE_TSENSOR_CALIB_CP_TS_BASE_BITS 13 #define FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT 13 #define FUSE_TSENSOR_CALIB_FT_TS_BASE_BITS 13 /* Layout is different for Tegra124 and Tegra210 */ #define FUSE_TSENSOR_COMMON 0x180 #define TEGRA124_FUSE_COMMON_CP_TS_BASE(x) (((x) >> 0) & 0x3ff) #define TEGRA124_FUSE_COMMON_FT_TS_BASE(x) (((x) >> 10) & 0x7ff) #define TEGRA124_FUSE_COMMON_SHIFT_FT_SHIFT 21 #define TEGRA124_FUSE_COMMON_SHIFT_FT_BITS 5 #define TEGRA210_FUSE_COMMON_CP_TS_BASE(x) (((x) >> 11) & 0x3ff) #define TEGRA210_FUSE_COMMON_FT_TS_BASE(x) (((x) >> 21) & 0x7ff) #define TEGRA210_FUSE_COMMON_SHIFT_CP_SHIFT 0 #define TEGRA210_FUSE_COMMON_SHIFT_CP_BITS 6 #define TEGRA210_FUSE_COMMON_SHIFT_FT_SHIFT 6 #define TEGRA210_FUSE_COMMON_SHIFT_FT_BITS 5 /* Only for Tegra124 */ #define FUSE_SPARE_REALIGNMENT_REG 0x1fc #define FUSE_SPARE_REALIGNMENT_REG_SHIFT_CP_SHIFT 0 #define FUSE_SPARE_REALIGNMENT_REG_SHIFT_CP_BITS 6 #define TEGRA124_NOMINAL_CALIB_FT 105 #define TEGRA124_NOMINAL_CALIB_CP 25 #define TEGRA210_NOMINAL_CALIB_FT 105 #define TEGRA210_NOMINAL_CALIB_CP 25 #define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, (_r)) static struct sysctl_ctx_list soctherm_sysctl_ctx; struct tsensor_cfg { uint32_t tall; uint32_t tsample; uint32_t tiddq_en; uint32_t ten_count; uint32_t pdiv; uint32_t tsample_ate; uint32_t pdiv_ate; }; struct soctherm_shared_cal { uint32_t base_cp; uint32_t base_ft; int32_t actual_temp_cp; int32_t actual_temp_ft; }; struct tsensor { char *name; int id; bus_addr_t sensor_base; bus_addr_t calib_fuse; int fuse_corr_alpha; int fuse_corr_beta; int16_t therm_a; int16_t therm_b; }; struct soctherm_soc; struct soctherm_softc { device_t dev; struct resource *mem_res; struct resource *irq_res; void *irq_ih; clk_t tsensor_clk; clk_t soctherm_clk; hwreset_t reset; struct soctherm_soc *soc; struct soctherm_shared_cal shared_cal; }; struct soctherm_soc { void (*shared_cal)(struct soctherm_softc *sc); uint32_t tsensor_pdiv; uint32_t tsensor_hotspot_off; struct tsensor_cfg *tsensor_cfg; struct tsensor *tsensors; int ntsensors; }; /* Tegra124 config */ static struct tsensor_cfg t124_tsensor_config = { .tall = 16300, .tsample = 120, .tiddq_en = 1, .ten_count = 1, .pdiv = 8, .tsample_ate = 480, .pdiv_ate = 8 }; static struct tsensor t124_tsensors[] = { { .name = "cpu0", .id = TEGRA124_SOCTHERM_SENSOR_CPU, .sensor_base = 0x0c0, .calib_fuse = 0x098, .fuse_corr_alpha = 1135400, .fuse_corr_beta = -6266900, }, { .name = "cpu1", .id = -1, .sensor_base = 0x0e0, .calib_fuse = 0x084, .fuse_corr_alpha = 1122220, .fuse_corr_beta = -5700700, }, { .name = "cpu2", .id = -1, .sensor_base = 0x100, .calib_fuse = 0x088, .fuse_corr_alpha = 1127000, .fuse_corr_beta = -6768200, }, { .name = "cpu3", .id = -1, .sensor_base = 0x120, .calib_fuse = 0x12c, .fuse_corr_alpha = 1110900, .fuse_corr_beta = -6232000, }, { .name = "mem0", .id = TEGRA124_SOCTHERM_SENSOR_MEM, .sensor_base = 0x140, .calib_fuse = 0x158, .fuse_corr_alpha = 1122300, .fuse_corr_beta = -5936400, }, { .name = "mem1", .id = -1, .sensor_base = 0x160, .calib_fuse = 0x15c, .fuse_corr_alpha = 1145700, .fuse_corr_beta = -7124600, }, { .name = "gpu", .id = TEGRA124_SOCTHERM_SENSOR_GPU, .sensor_base = 0x180, .calib_fuse = 0x154, .fuse_corr_alpha = 1120100, .fuse_corr_beta = -6000500, }, { .name = "pllX", .id = TEGRA124_SOCTHERM_SENSOR_PLLX, .sensor_base = 0x1a0, .calib_fuse = 0x160, .fuse_corr_alpha = 1106500, .fuse_corr_beta = -6729300, }, }; static void tegra124_shared_cal(struct soctherm_softc *sc); static struct soctherm_soc tegra124_soc = { .shared_cal = tegra124_shared_cal, .tsensor_pdiv = 0x8888, .tsensor_hotspot_off = 0x00060600 , .tsensor_cfg = &t124_tsensor_config, .tsensors = t124_tsensors, .ntsensors = nitems(t124_tsensors), }; /* Tegra210 config */ static struct tsensor_cfg t210_tsensor_config = { .tall = 16300, .tsample = 120, .tiddq_en = 1, .ten_count = 1, .pdiv = 8, .tsample_ate = 480, .pdiv_ate = 8 }; static struct tsensor t210_tsensors[] = { { .name = "cpu0", .id = TEGRA124_SOCTHERM_SENSOR_CPU, .sensor_base = 0x0c0, .calib_fuse = 0x098, .fuse_corr_alpha = 1085000, .fuse_corr_beta = 3244200, }, { .name = "cpu1", .id = -1, .sensor_base = 0x0e0, .calib_fuse = 0x084, .fuse_corr_alpha = 1126200, .fuse_corr_beta = -67500, }, { .name = "cpu2", .id = -1, .sensor_base = 0x100, .calib_fuse = 0x088, .fuse_corr_alpha = 1098400, .fuse_corr_beta = 2251100, }, { .name = "cpu3", .id = -1, .sensor_base = 0x120, .calib_fuse = 0x12c, .fuse_corr_alpha = 1108000, .fuse_corr_beta = 602700, }, { .name = "mem0", .id = TEGRA124_SOCTHERM_SENSOR_MEM, .sensor_base = 0x140, .calib_fuse = 0x158, .fuse_corr_alpha = 1069200, .fuse_corr_beta = 3549900, }, { .name = "mem1", .id = -1, .sensor_base = 0x160, .calib_fuse = 0x15c, .fuse_corr_alpha = 1173700, .fuse_corr_beta = -6263600, }, { .name = "gpu", .id = TEGRA124_SOCTHERM_SENSOR_GPU, .sensor_base = 0x180, .calib_fuse = 0x154, .fuse_corr_alpha = 1074300, .fuse_corr_beta = 2734900, }, { .name = "pllX", .id = TEGRA124_SOCTHERM_SENSOR_PLLX, .sensor_base = 0x1a0, .calib_fuse = 0x160, .fuse_corr_alpha = 1039700, .fuse_corr_beta = 6829100, }, }; static void tegra210_shared_cal(struct soctherm_softc *sc); static struct soctherm_soc tegra210_soc = { .shared_cal = tegra210_shared_cal, .tsensor_pdiv = 0x8888, .tsensor_hotspot_off = 0x000A0500 , .tsensor_cfg = &t210_tsensor_config, .tsensors = t210_tsensors, .ntsensors = nitems(t210_tsensors), }; static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-soctherm", (uintptr_t)&tegra124_soc}, {"nvidia,tegra210-soctherm", (uintptr_t)&tegra210_soc}, {NULL, 0}, }; /* Extract signed integer bitfield from register */ static int extract_signed(uint32_t reg, int shift, int bits) { int32_t val; uint32_t mask; mask = (1 << bits) - 1; val = ((reg >> shift) & mask) << (32 - bits); val >>= 32 - bits; return ((int32_t)val); } static inline int64_t div64_s64_precise(int64_t a, int64_t b) { int64_t r, al; al = a << 16; r = (al * 2 + 1) / (2 * b); return (r >> 16); } static void tegra124_shared_cal(struct soctherm_softc *sc) { uint32_t val; int calib_cp, calib_ft; struct soctherm_shared_cal *cal; cal = &sc->shared_cal; val = tegra_fuse_read_4(FUSE_TSENSOR_COMMON); cal->base_cp = TEGRA124_FUSE_COMMON_CP_TS_BASE(val); cal->base_ft = TEGRA124_FUSE_COMMON_FT_TS_BASE(val); calib_ft = extract_signed(val, TEGRA124_FUSE_COMMON_SHIFT_FT_SHIFT, TEGRA124_FUSE_COMMON_SHIFT_FT_BITS); val = tegra_fuse_read_4(FUSE_SPARE_REALIGNMENT_REG); calib_cp = extract_signed(val, FUSE_SPARE_REALIGNMENT_REG_SHIFT_CP_SHIFT, FUSE_SPARE_REALIGNMENT_REG_SHIFT_CP_BITS); cal->actual_temp_cp = 2 * TEGRA124_NOMINAL_CALIB_CP + calib_cp; cal->actual_temp_ft = 2 * TEGRA124_NOMINAL_CALIB_FT + calib_ft; #ifdef DEBUG printf("%s: base_cp: %u, base_ft: %d," " actual_temp_cp: %d, actual_temp_ft: %d\n", __func__, cal->base_cp, cal->base_ft, cal->actual_temp_cp, cal->actual_temp_ft); #endif } static void tegra210_shared_cal(struct soctherm_softc *sc) { uint32_t val; int calib_cp, calib_ft; struct soctherm_shared_cal *cal; cal = &sc->shared_cal; val = tegra_fuse_read_4(FUSE_TSENSOR_COMMON); cal->base_cp = TEGRA210_FUSE_COMMON_CP_TS_BASE(val); cal->base_ft = TEGRA210_FUSE_COMMON_FT_TS_BASE(val); calib_ft = extract_signed(val, TEGRA210_FUSE_COMMON_SHIFT_FT_SHIFT, TEGRA210_FUSE_COMMON_SHIFT_FT_BITS); calib_cp = extract_signed(val, TEGRA210_FUSE_COMMON_SHIFT_CP_SHIFT, TEGRA210_FUSE_COMMON_SHIFT_CP_BITS); cal->actual_temp_cp = 2 * TEGRA210_NOMINAL_CALIB_CP + calib_cp; cal->actual_temp_ft = 2 * TEGRA210_NOMINAL_CALIB_FT + calib_ft; #ifdef DEBUG printf("%s: base_cp: %u, base_ft: %d," " actual_temp_cp: %d, actual_temp_ft: %d\n", __func__, cal->base_cp, cal->base_ft, cal->actual_temp_cp, cal->actual_temp_ft); #endif } static void tsensor_calibration(struct soctherm_softc *sc, struct tsensor *sensor) { uint32_t val; int mult, div, calib_cp, calib_ft; int actual_tsensor_ft, actual_tsensor_cp, delta_sens, delta_temp; int temp_a, temp_b; struct tsensor_cfg *cfg; struct soctherm_shared_cal *cal; int64_t tmp; cfg = sc->soc->tsensor_cfg; cal = &sc->shared_cal; val = tegra_fuse_read_4(sensor->calib_fuse); calib_cp = extract_signed(val, FUSE_TSENSOR_CALIB_CP_TS_BASE_SHIFT, FUSE_TSENSOR_CALIB_CP_TS_BASE_BITS); actual_tsensor_cp = cal->base_cp * 64 + calib_cp; calib_ft = extract_signed(val, FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT, FUSE_TSENSOR_CALIB_FT_TS_BASE_BITS); actual_tsensor_ft = cal->base_ft * 32 + calib_ft; delta_sens = actual_tsensor_ft - actual_tsensor_cp; delta_temp = cal->actual_temp_ft - cal->actual_temp_cp; mult = cfg->pdiv * cfg->tsample_ate; div = cfg->tsample * cfg->pdiv_ate; temp_a = div64_s64_precise((int64_t) delta_temp * (1LL << 13) * mult, (int64_t) delta_sens * div); tmp = (int64_t)actual_tsensor_ft * cal->actual_temp_cp - (int64_t)actual_tsensor_cp * cal->actual_temp_ft; temp_b = div64_s64_precise(tmp, (int64_t)delta_sens); temp_a = div64_s64_precise((int64_t)temp_a * sensor->fuse_corr_alpha, 1000000); temp_b = div64_s64_precise((int64_t)temp_b * sensor->fuse_corr_alpha + sensor->fuse_corr_beta, 1000000); sensor->therm_a = (int16_t)temp_a; sensor->therm_b = (int16_t)temp_b; #ifdef DEBUG printf("%s: sensor %s fuse: 0x%08X (0x%04X, 0x%04X)" " calib_cp: %d(0x%04X), calib_ft: %d(0x%04X)\n", __func__, sensor->name, val, val & 0x1FFF, (val >> 13) & 0x1FFF, calib_cp, calib_cp, calib_ft, calib_ft); printf("therma: 0x%04X(%d), thermb: 0x%04X(%d)\n", (uint16_t)sensor->therm_a, sensor->therm_a, (uint16_t)sensor->therm_b, sensor->therm_b); #endif } static void soctherm_init_tsensor(struct soctherm_softc *sc, struct tsensor *sensor) { struct tsensor_cfg *cfg; uint32_t val; cfg = sc->soc->tsensor_cfg; tsensor_calibration(sc, sensor); val = RD4(sc, sensor->sensor_base + TSENSOR_CONFIG0); val |= TSENSOR_CONFIG0_STOP; val |= TSENSOR_CONFIG0_STATUS_CLR; WR4(sc, sensor->sensor_base + TSENSOR_CONFIG0, val); val = TSENSOR_CONFIG0_TALL(cfg->tall); val |= TSENSOR_CONFIG0_STOP; WR4(sc, sensor->sensor_base + TSENSOR_CONFIG0, val); val = TSENSOR_CONFIG1_TSAMPLE(cfg->tsample - 1); val |= TSENSOR_CONFIG1_TIDDQ_EN(cfg->tiddq_en); val |= TSENSOR_CONFIG1_TEN_COUNT(cfg->ten_count); val |= TSENSOR_CONFIG1_TEMP_ENABLE; WR4(sc, sensor->sensor_base + TSENSOR_CONFIG1, val); val = TSENSOR_CONFIG2_THERMA((uint16_t)sensor->therm_a) | TSENSOR_CONFIG2_THERMB((uint16_t)sensor->therm_b); WR4(sc, sensor->sensor_base + TSENSOR_CONFIG2, val); val = RD4(sc, sensor->sensor_base + TSENSOR_CONFIG0); val &= ~TSENSOR_CONFIG0_STOP; WR4(sc, sensor->sensor_base + TSENSOR_CONFIG0, val); #ifdef DEBUG printf(" Sensor: %s cfg:0x%08X, 0x%08X, 0x%08X," " sts:0x%08X, 0x%08X, 0x%08X\n", sensor->name, RD4(sc, sensor->sensor_base + TSENSOR_CONFIG0), RD4(sc, sensor->sensor_base + TSENSOR_CONFIG1), RD4(sc, sensor->sensor_base + TSENSOR_CONFIG2), RD4(sc, sensor->sensor_base + TSENSOR_STATUS0), RD4(sc, sensor->sensor_base + TSENSOR_STATUS1), RD4(sc, sensor->sensor_base + TSENSOR_STATUS2) ); #endif } static int soctherm_convert_raw(uint32_t val) { int32_t t; t = READBACK_VALUE(val) * 1000; if (val & READBACK_ADD_HALF) t += 500; if (val & READBACK_NEGATE) t *= -1; return (t); } static int soctherm_read_temp(struct soctherm_softc *sc, struct tsensor *sensor, int *temp) { int timeout; uint32_t val; /* wait for valid sample */ for (timeout = 100; timeout > 0; timeout--) { val = RD4(sc, sensor->sensor_base + TSENSOR_STATUS1); if ((val & TSENSOR_STATUS1_TEMP_VALID) != 0) break; DELAY(100); } if (timeout <= 0) device_printf(sc->dev, "Sensor %s timeouted\n", sensor->name); *temp = soctherm_convert_raw(val); #ifdef DEBUG printf("%s: Raw: 0x%08X, temp: %d\n", __func__, val, *temp); printf(" Sensor: %s cfg:0x%08X, 0x%08X, 0x%08X," " sts:0x%08X, 0x%08X, 0x%08X\n", sensor->name, RD4(sc, sensor->sensor_base + TSENSOR_CONFIG0), RD4(sc, sensor->sensor_base + TSENSOR_CONFIG1), RD4(sc, sensor->sensor_base + TSENSOR_CONFIG2), RD4(sc, sensor->sensor_base + TSENSOR_STATUS0), RD4(sc, sensor->sensor_base + TSENSOR_STATUS1), RD4(sc, sensor->sensor_base + TSENSOR_STATUS2) ); #endif return (0); } static int soctherm_get_temp(device_t dev, device_t cdev, uintptr_t id, int *val) { struct soctherm_softc *sc; int i; sc = device_get_softc(dev); /* The direct sensor map starts at 0x100 */ if (id >= 0x100) { id -= 0x100; if (id >= sc->soc->ntsensors) return (ERANGE); return(soctherm_read_temp(sc, sc->soc->tsensors + id, val)); } /* Linux (DT) compatible thermal zones */ for (i = 0; i < sc->soc->ntsensors; i++) { if (sc->soc->tsensors->id == id) { return(soctherm_read_temp(sc, sc->soc->tsensors + id, val)); } } return (ERANGE); } static int soctherm_sysctl_temperature(SYSCTL_HANDLER_ARGS) { struct soctherm_softc *sc; int val; int rv; int id; /* Write request */ if (req->newptr != NULL) return (EINVAL); sc = arg1; id = arg2; if (id >= sc->soc->ntsensors) return (ERANGE); rv = soctherm_read_temp(sc, sc->soc->tsensors + id, &val); if (rv != 0) return (rv); val = val / 100; val += 2731; rv = sysctl_handle_int(oidp, &val, 0, req); return (rv); } static int soctherm_init_sysctl(struct soctherm_softc *sc) { int i; struct sysctl_oid *oid, *tmp; sysctl_ctx_init(&soctherm_sysctl_ctx); /* create node for hw.temp */ oid = SYSCTL_ADD_NODE(&soctherm_sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, "temperature", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); if (oid == NULL) return (ENXIO); /* Add sensors */ for (i = sc->soc->ntsensors - 1; i >= 0; i--) { tmp = SYSCTL_ADD_PROC(&soctherm_sysctl_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, sc->soc->tsensors[i].name, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, i, soctherm_sysctl_temperature, "IK", "SoC Temperature"); if (tmp == NULL) return (ENXIO); } return (0); } static int soctherm_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Tegra temperature sensors"); return (BUS_PROBE_DEFAULT); } static int soctherm_attach(device_t dev) { struct soctherm_softc *sc; phandle_t node; int i, rid, rv; sc = device_get_softc(dev); sc->dev = dev; sc->soc = (struct soctherm_soc *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; node = ofw_bus_get_node(sc->dev); rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot allocate memory resources\n"); goto fail; } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate IRQ resources\n"); goto fail; } /* if ((bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC, soctherm_intr, NULL, sc, &sc->irq_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); goto fail; } */ /* OWF resources */ rv = hwreset_get_by_ofw_name(dev, 0, "soctherm", &sc->reset); if (rv != 0) { device_printf(dev, "Cannot get fuse reset\n"); goto fail; } rv = clk_get_by_ofw_name(dev, 0, "tsensor", &sc->tsensor_clk); if (rv != 0) { device_printf(dev, "Cannot get 'tsensor' clock: %d\n", rv); goto fail; } rv = clk_get_by_ofw_name(dev, 0, "soctherm", &sc->soctherm_clk); if (rv != 0) { device_printf(dev, "Cannot get 'soctherm' clock: %d\n", rv); goto fail; } rv = hwreset_assert(sc->reset); if (rv != 0) { device_printf(dev, "Cannot assert reset\n"); goto fail; } rv = clk_enable(sc->tsensor_clk); if (rv != 0) { device_printf(dev, "Cannot enable 'tsensor' clock: %d\n", rv); goto fail; } rv = clk_enable(sc->soctherm_clk); if (rv != 0) { device_printf(dev, "Cannot enable 'soctherm' clock: %d\n", rv); goto fail; } rv = hwreset_deassert(sc->reset); if (rv != 0) { device_printf(dev, "Cannot clear reset\n"); goto fail; } sc->soc->shared_cal(sc); WR4(sc, TSENSOR_PDIV, sc->soc->tsensor_pdiv); WR4(sc, TSENSOR_HOTSPOT_OFF, sc->soc->tsensor_hotspot_off); for (i = 0; i < sc->soc->ntsensors; i++) soctherm_init_tsensor(sc, sc->soc->tsensors + i); rv = soctherm_init_sysctl(sc); if (rv != 0) { device_printf(sc->dev, "Cannot initialize sysctls\n"); goto fail; } OF_device_register_xref(OF_xref_from_node(node), dev); return (bus_generic_attach(dev)); fail: if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); sysctl_ctx_free(&soctherm_sysctl_ctx); if (sc->tsensor_clk != NULL) clk_release(sc->tsensor_clk); if (sc->soctherm_clk != NULL) clk_release(sc->soctherm_clk); if (sc->reset != NULL) hwreset_release(sc->reset); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (ENXIO); } static int soctherm_detach(device_t dev) { struct soctherm_softc *sc; sc = device_get_softc(dev); if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); sysctl_ctx_free(&soctherm_sysctl_ctx); if (sc->tsensor_clk != NULL) clk_release(sc->tsensor_clk); if (sc->soctherm_clk != NULL) clk_release(sc->soctherm_clk); if (sc->reset != NULL) hwreset_release(sc->reset); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (ENXIO); } static device_method_t tegra_soctherm_methods[] = { /* Device interface */ DEVMETHOD(device_probe, soctherm_probe), DEVMETHOD(device_attach, soctherm_attach), DEVMETHOD(device_detach, soctherm_detach), /* SOCTHERM interface */ DEVMETHOD(tegra_soctherm_get_temperature, soctherm_get_temp), DEVMETHOD_END }; static DEFINE_CLASS_0(soctherm, tegra_soctherm_driver, tegra_soctherm_methods, sizeof(struct soctherm_softc)); EARLY_DRIVER_MODULE(tegra_soctherm, simplebus, tegra_soctherm_driver, NULL, NULL, 79); diff --git a/sys/arm/nvidia/tegra_uart.c b/sys/arm/nvidia/tegra_uart.c index 05d1c46407f7..31f92d34a6d3 100644 --- a/sys/arm/nvidia/tegra_uart.c +++ b/sys/arm/nvidia/tegra_uart.c @@ -1,249 +1,249 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * UART driver for Tegra SoCs. */ #include "opt_platform.h" #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include "uart_if.h" /* * High-level UART interface. */ struct tegra_softc { struct ns8250_softc ns8250_base; clk_t clk; hwreset_t reset; }; /* * UART class interface. */ static int tegra_uart_attach(struct uart_softc *sc) { int rv; struct ns8250_softc *ns8250 = (struct ns8250_softc*)sc; struct uart_bas *bas = &sc->sc_bas; rv = ns8250_bus_attach(sc); if (rv != 0) return (rv); ns8250->ier_rxbits = 0x1d; ns8250->ier_mask = 0xc0; ns8250->ier = uart_getreg(bas, REG_IER) & ns8250->ier_mask; ns8250->ier |= ns8250->ier_rxbits; uart_setreg(bas, REG_IER, ns8250->ier); uart_barrier(bas); return (0); } static void tegra_uart_grab(struct uart_softc *sc) { struct uart_bas *bas = &sc->sc_bas; struct ns8250_softc *ns8250 = (struct ns8250_softc*)sc; u_char ier; /* * turn off all interrupts to enter polling mode. Leave the * saved mask alone. We'll restore whatever it was in ungrab. * All pending interrupt signals are reset when IER is set to 0. */ uart_lock(sc->sc_hwmtx); ier = uart_getreg(bas, REG_IER); uart_setreg(bas, REG_IER, ier & ns8250->ier_mask); while ((uart_getreg(bas, REG_LSR) & LSR_TEMT) == 0) ; uart_setreg(bas, REG_FCR, 0); uart_barrier(bas); uart_unlock(sc->sc_hwmtx); } static void tegra_uart_ungrab(struct uart_softc *sc) { struct ns8250_softc *ns8250 = (struct ns8250_softc*)sc; struct uart_bas *bas = &sc->sc_bas; /* * Restore previous interrupt mask */ uart_lock(sc->sc_hwmtx); uart_setreg(bas, REG_FCR, ns8250->fcr); uart_setreg(bas, REG_IER, ns8250->ier); uart_barrier(bas); uart_unlock(sc->sc_hwmtx); } static kobj_method_t tegra_methods[] = { KOBJMETHOD(uart_probe, ns8250_bus_probe), KOBJMETHOD(uart_attach, tegra_uart_attach), KOBJMETHOD(uart_detach, ns8250_bus_detach), KOBJMETHOD(uart_flush, ns8250_bus_flush), KOBJMETHOD(uart_getsig, ns8250_bus_getsig), KOBJMETHOD(uart_ioctl, ns8250_bus_ioctl), KOBJMETHOD(uart_ipend, ns8250_bus_ipend), KOBJMETHOD(uart_param, ns8250_bus_param), KOBJMETHOD(uart_receive, ns8250_bus_receive), KOBJMETHOD(uart_setsig, ns8250_bus_setsig), KOBJMETHOD(uart_transmit, ns8250_bus_transmit), KOBJMETHOD(uart_grab, tegra_uart_grab), KOBJMETHOD(uart_ungrab, tegra_uart_ungrab), KOBJMETHOD_END }; static struct uart_class tegra_uart_class = { "tegra class", tegra_methods, sizeof(struct tegra_softc), .uc_ops = &uart_ns8250_ops, .uc_range = 8, .uc_rclk = 0, }; /* Compatible devices. */ static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-uart", (uintptr_t)&tegra_uart_class}, {"nvidia,tegra210-uart", (uintptr_t)&tegra_uart_class}, {NULL, (uintptr_t)NULL}, }; UART_FDT_CLASS(compat_data); /* * UART Driver interface. */ static int uart_fdt_get_shift1(phandle_t node) { pcell_t shift; if ((OF_getencprop(node, "reg-shift", &shift, sizeof(shift))) <= 0) shift = 2; return ((int)shift); } static int tegra_uart_probe(device_t dev) { struct tegra_softc *sc; phandle_t node; uint64_t freq; int shift; int rv; const struct ofw_compat_data *cd; sc = device_get_softc(dev); if (!ofw_bus_status_okay(dev)) return (ENXIO); cd = ofw_bus_search_compatible(dev, compat_data); if (cd->ocd_data == 0) return (ENXIO); sc->ns8250_base.base.sc_class = (struct uart_class *)cd->ocd_data; rv = hwreset_get_by_ofw_name(dev, 0, "serial", &sc->reset); if (rv != 0) { device_printf(dev, "Cannot get 'serial' reset\n"); return (ENXIO); } rv = hwreset_deassert(sc->reset); if (rv != 0) { device_printf(dev, "Cannot unreset 'serial' reset\n"); return (ENXIO); } node = ofw_bus_get_node(dev); shift = uart_fdt_get_shift1(node); rv = clk_get_by_ofw_index(dev, 0, 0, &sc->clk); if (rv != 0) { device_printf(dev, "Cannot get UART clock: %d\n", rv); return (ENXIO); } rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable UART clock: %d\n", rv); return (ENXIO); } rv = clk_get_freq(sc->clk, &freq); if (rv != 0) { device_printf(dev, "Cannot enable UART clock: %d\n", rv); return (ENXIO); } return (uart_bus_probe(dev, shift, 0, (int)freq, 0, 0, 0)); } static int tegra_uart_detach(device_t dev) { struct tegra_softc *sc; sc = device_get_softc(dev); if (sc->clk != NULL) { clk_release(sc->clk); } return (uart_bus_detach(dev)); } static device_method_t tegra_uart_bus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra_uart_probe), DEVMETHOD(device_attach, uart_bus_attach), DEVMETHOD(device_detach, tegra_uart_detach), { 0, 0 } }; static driver_t tegra_uart_driver = { uart_driver_name, tegra_uart_bus_methods, sizeof(struct tegra_softc), }; DRIVER_MODULE(tegra_uart, simplebus, tegra_uart_driver, 0, 0); diff --git a/sys/arm/nvidia/tegra_usbphy.c b/sys/arm/nvidia/tegra_usbphy.c index 5c48afd48d2f..438ba719170c 100644 --- a/sys/arm/nvidia/tegra_usbphy.c +++ b/sys/arm/nvidia/tegra_usbphy.c @@ -1,848 +1,848 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * USB phy driver for Tegra SoCs. */ #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include "phynode_if.h" #define CTRL_ICUSB_CTRL 0x15c #define ICUSB_CTR_IC_ENB1 (1 << 3) #define CTRL_USB_USBMODE 0x1f8 #define USB_USBMODE_MASK (3 << 0) #define USB_USBMODE_HOST (3 << 0) #define USB_USBMODE_DEVICE (2 << 0) #define CTRL_USB_HOSTPC1_DEVLC 0x1b4 #define USB_HOSTPC1_DEVLC_PTS(x) (((x) & 0x7) << 29) #define USB_HOSTPC1_DEVLC_STS (1 << 28) #define USB_HOSTPC1_DEVLC_PHCD (1 << 22) #define IF_USB_SUSP_CTRL 0x400 #define FAST_WAKEUP_RESP (1 << 26) #define UTMIP_SUSPL1_SET (1 << 25) #define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16) #define USB_SUSP_SET (1 << 14) #define UTMIP_PHY_ENB (1 << 12) #define UTMIP_RESET (1 << 11) #define USB_SUSP_POL (1 << 10) #define USB_PHY_CLK_VALID_INT_ENB (1 << 9) #define USB_PHY_CLK_VALID_INT_STS (1 << 8) #define USB_PHY_CLK_VALID (1 << 7) #define USB_CLKEN (1 << 6) #define USB_SUSP_CLR (1 << 5) #define USB_WAKE_ON_DISCON_EN_DEV (1 << 4) #define USB_WAKE_ON_CNNT_EN_DEV (1 << 3) #define USB_WAKE_ON_RESUME_EN (1 << 2) #define USB_WAKEUP_INT_ENB (1 << 1) #define USB_WAKEUP_INT_STS (1 << 0) #define IF_USB_PHY_VBUS_SENSORS 0x404 #define B_SESS_END_SW_VALUE (1 << 4) #define B_SESS_END_SW_EN (1 << 3) #define UTMIP_XCVR_CFG0 0x808 #define UTMIP_XCVR_HSSLEW_MSB(x) ((((x) & 0x1fc) >> 2) << 25) #define UTMIP_XCVR_SETUP_MSB(x) ((((x) & 0x70) >> 4) << 22) #define UTMIP_XCVR_LSBIAS_SEL (1 << 21) #define UTMIP_XCVR_DISCON_METHOD (1 << 20) #define UTMIP_FORCE_PDZI_POWERUP (1 << 19) #define UTMIP_FORCE_PDZI_POWERDOWN (1 << 18) #define UTMIP_FORCE_PD2_POWERUP (1 << 17) #define UTMIP_FORCE_PD2_POWERDOWN (1 << 16) #define UTMIP_FORCE_PD_POWERUP (1 << 15) #define UTMIP_FORCE_PD_POWERDOWN (1 << 14) #define UTMIP_XCVR_TERMEN (1 << 13) #define UTMIP_XCVR_HSLOOPBACK (1 << 12) #define UTMIP_XCVR_LSFSLEW(x) (((x) & 0x3) << 10) #define UTMIP_XCVR_LSRSLEW(x) (((x) & 0x3) << 8) #define UTMIP_XCVR_FSSLEW(x) (((x) & 0x3) << 6) #define UTMIP_XCVR_HSSLEW(x) (((x) & 0x3) << 4) #define UTMIP_XCVR_SETUP(x) (((x) & 0xf) << 0) #define UTMIP_BIAS_CFG0 0x80C #define UTMIP_IDDIG_C_VAL (1 << 30) #define UTMIP_IDDIG_C_SEL (1 << 29) #define UTMIP_IDDIG_B_VAL (1 << 28) #define UTMIP_IDDIG_B_SEL (1 << 27) #define UTMIP_IDDIG_A_VAL (1 << 26) #define UTMIP_IDDIG_A_SEL (1 << 25) #define UTMIP_HSDISCON_LEVEL_MSB(x) ((((x) & 0x4) >> 2) << 24) #define UTMIP_IDPD_VAL (1 << 23) #define UTMIP_IDPD_SEL (1 << 22) #define UTMIP_IDDIG_VAL (1 << 21) #define UTMIP_IDDIG_SEL (1 << 20) #define UTMIP_GPI_VAL (1 << 19) #define UTMIP_GPI_SEL (1 << 18) #define UTMIP_ACTIVE_TERM_OFFSET(x) (((x) & 0x7) << 15) #define UTMIP_ACTIVE_PULLUP_OFFSET(x) (((x) & 0x7) << 12) #define UTMIP_OTGPD (1 << 11) #define UTMIP_BIASPD (1 << 10) #define UTMIP_VBUS_LEVEL_LEVEL(x) (((x) & 0x3) << 8) #define UTMIP_SESS_LEVEL_LEVEL(x) (((x) & 0x3) << 6) #define UTMIP_HSCHIRP_LEVEL(x) (((x) & 0x3) << 4) #define UTMIP_HSDISCON_LEVEL(x) (((x) & 0x3) << 2) #define UTMIP_HSSQUELCH_LEVEL(x) (((x) & 0x3) << 0) #define UTMIP_HSRX_CFG0 0x810 #define UTMIP_KEEP_PATT_ON_ACTIVE(x) (((x) & 0x3) << 30) #define UTMIP_ALLOW_CONSEC_UPDN (1 << 29) #define UTMIP_REALIGN_ON_NEW_PKT (1 << 28) #define UTMIP_PCOUNT_UPDN_DIV(x) (((x) & 0xf) << 24) #define UTMIP_SQUELCH_EOP_DLY(x) (((x) & 0x7) << 21) #define UTMIP_NO_STRIPPING (1 << 20) #define UTMIP_IDLE_WAIT(x) (((x) & 0x1f) << 15) #define UTMIP_ELASTIC_LIMIT(x) (((x) & 0x1f) << 10) #define UTMIP_ELASTIC_OVERRUN_DISABLE (1 << 9) #define UTMIP_ELASTIC_UNDERRUN_DISABLE (1 << 8) #define UTMIP_PASS_CHIRP (1 << 7) #define UTMIP_PASS_FEEDBACK (1 << 6) #define UTMIP_PCOUNT_INERTIA(x) (((x) & 0x3) << 4) #define UTMIP_PHASE_ADJUST(x) (((x) & 0x3) << 2) #define UTMIP_THREE_SYNCBITS (1 << 1) #define UTMIP_USE4SYNC_TRAN (1 << 0) #define UTMIP_HSRX_CFG1 0x814 #define UTMIP_HS_SYNC_START_DLY(x) (((x) & 0x1F) << 1) #define UTMIP_HS_ALLOW_KEEP_ALIVE (1 << 0) #define UTMIP_TX_CFG0 0x820 #define UTMIP_FS_PREAMBLE_J (1 << 19) #define UTMIP_FS_POSTAMBLE_OUTPUT_ENABLE (1 << 18) #define UTMIP_FS_PREAMBLE_OUTPUT_ENABLE (1 << 17) #define UTMIP_FSLS_ALLOW_SOP_TX_STUFF_ERR (1 << 16) #define UTMIP_HS_READY_WAIT_FOR_VALID (1 << 15) #define UTMIP_HS_TX_IPG_DLY(x) (((x) & 0x1f) << 10) #define UTMIP_HS_DISCON_EOP_ONLY (1 << 9) #define UTMIP_HS_DISCON_DISABLE (1 << 8) #define UTMIP_HS_POSTAMBLE_OUTPUT_ENABLE (1 << 7) #define UTMIP_HS_PREAMBLE_OUTPUT_ENABLE (1 << 6) #define UTMIP_SIE_RESUME_ON_LINESTATE (1 << 5) #define UTMIP_SOF_ON_NO_STUFF (1 << 4) #define UTMIP_SOF_ON_NO_ENCODE (1 << 3) #define UTMIP_NO_STUFFING (1 << 2) #define UTMIP_NO_ENCODING (1 << 1) #define UTMIP_NO_SYNC_NO_EOP (1 << 0) #define UTMIP_MISC_CFG0 0x824 #define UTMIP_DPDM_OBSERVE_SEL(x) (((x) & 0xf) << 27) #define UTMIP_DPDM_OBSERVE (1 << 26) #define UTMIP_KEEP_XCVR_PD_ON_SOFT_DISCON (1 << 25) #define UTMIP_ALLOW_LS_ON_SOFT_DISCON (1 << 24) #define UTMIP_FORCE_FS_DISABLE_ON_DEV_CHIRP (1 << 23) #define UTMIP_SUSPEND_EXIT_ON_EDGE (1 << 22) #define UTMIP_LS_TO_FS_SKIP_4MS (1 << 21) #define UTMIP_INJECT_ERROR_TYPE(x) (((x) & 0x3) << 19) #define UTMIP_FORCE_HS_CLOCK_ON (1 << 18) #define UTMIP_DISABLE_HS_TERM (1 << 17) #define UTMIP_FORCE_HS_TERM (1 << 16) #define UTMIP_DISABLE_PULLUP_DP (1 << 15) #define UTMIP_DISABLE_PULLUP_DM (1 << 14) #define UTMIP_DISABLE_PULLDN_DP (1 << 13) #define UTMIP_DISABLE_PULLDN_DM (1 << 12) #define UTMIP_FORCE_PULLUP_DP (1 << 11) #define UTMIP_FORCE_PULLUP_DM (1 << 10) #define UTMIP_FORCE_PULLDN_DP (1 << 9) #define UTMIP_FORCE_PULLDN_DM (1 << 8) #define UTMIP_STABLE_COUNT(x) (((x) & 0x7) << 5) #define UTMIP_STABLE_ALL (1 << 4) #define UTMIP_NO_FREE_ON_SUSPEND (1 << 3) #define UTMIP_NEVER_FREE_RUNNING_TERMS (1 << 2) #define UTMIP_ALWAYS_FREE_RUNNING_TERMS (1 << 1) #define UTMIP_COMB_TERMS (1 << 0) #define UTMIP_MISC_CFG1 0x828 #define UTMIP_PHY_XTAL_CLOCKEN (1 << 30) #define UTMIP_DEBOUNCE_CFG0 0x82C #define UTMIP_BIAS_DEBOUNCE_B(x) (((x) & 0xffff) << 16) #define UTMIP_BIAS_DEBOUNCE_A(x) (((x) & 0xffff) << 0) #define UTMIP_BAT_CHRG_CFG0 0x830 #define UTMIP_CHRG_DEBOUNCE_TIMESCALE(x) (((x) & 0x1f) << 8) #define UTMIP_OP_I_SRC_ENG (1 << 5) #define UTMIP_ON_SRC_ENG (1 << 4) #define UTMIP_OP_SRC_ENG (1 << 3) #define UTMIP_ON_SINK_ENG (1 << 2) #define UTMIP_OP_SINK_ENG (1 << 1) #define UTMIP_PD_CHRG (1 << 0) #define UTMIP_SPARE_CFG0 0x834 #define FUSE_HS_IREF_CAP_CFG (1 << 7) #define FUSE_HS_SQUELCH_LEVEL (1 << 6) #define FUSE_SPARE (1 << 5) #define FUSE_TERM_RANGE_ADJ_SEL (1 << 4) #define FUSE_SETUP_SEL (1 << 3) #define HS_RX_LATE_SQUELCH (1 << 2) #define HS_RX_FLUSH_ALAP (1 << 1) #define HS_RX_IPG_ERROR_ENABLE (1 << 0) #define UTMIP_XCVR_CFG1 0x838 #define UTMIP_XCVR_RPU_RANGE_ADJ(x) (((x) & 0x3) << 26) #define UTMIP_XCVR_HS_IREF_CAP(x) (((x) & 0x3) << 24) #define UTMIP_XCVR_SPARE(x) (((x) & 0x3) << 22) #define UTMIP_XCVR_TERM_RANGE_ADJ(x) (((x) & 0xf) << 18) #define UTMIP_RCTRL_SW_SET (1 << 17) #define UTMIP_RCTRL_SW_VAL(x) (((x) & 0x1f) << 12) #define UTMIP_TCTRL_SW_SET (1 << 11) #define UTMIP_TCTRL_SW_VAL(x) (((x) & 0x1f) << 6) #define UTMIP_FORCE_PDDR_POWERUP (1 << 5) #define UTMIP_FORCE_PDDR_POWERDOWN (1 << 4) #define UTMIP_FORCE_PDCHRP_POWERUP (1 << 3) #define UTMIP_FORCE_PDCHRP_POWERDOWN (1 << 2) #define UTMIP_FORCE_PDDISC_POWERUP (1 << 1) #define UTMIP_FORCE_PDDISC_POWERDOWN (1 << 0) #define UTMIP_BIAS_CFG1 0x83c #define UTMIP_BIAS_DEBOUNCE_TIMESCALE(x) (((x) & 0x3f) << 8) #define UTMIP_BIAS_PDTRK_COUNT(x) (((x) & 0x1f) << 3) #define UTMIP_VBUS_WAKEUP_POWERDOWN (1 << 2) #define UTMIP_FORCE_PDTRK_POWERUP (1 << 1) #define UTMIP_FORCE_PDTRK_POWERDOWN (1 << 0) static int usbpby_enable_cnt; enum usb_ifc_type { USB_IFC_TYPE_UNKNOWN = 0, USB_IFC_TYPE_UTMI, USB_IFC_TYPE_ULPI }; enum usb_dr_mode { USB_DR_MODE_UNKNOWN = 0, USB_DR_MODE_DEVICE, USB_DR_MODE_HOST, USB_DR_MODE_OTG }; struct usbphy_softc { device_t dev; struct resource *mem_res; struct resource *pads_res; clk_t clk_reg; clk_t clk_pads; clk_t clk_pllu; regulator_t supply_vbus; hwreset_t reset_usb; hwreset_t reset_pads; enum usb_ifc_type ifc_type; enum usb_dr_mode dr_mode; bool have_utmi_regs; /* UTMI params */ int hssync_start_delay; int elastic_limit; int idle_wait_delay; int term_range_adj; int xcvr_lsfslew; int xcvr_lsrslew; int xcvr_hsslew; int hssquelch_level; int hsdiscon_level; int xcvr_setup; int xcvr_setup_use_fuses; }; static struct ofw_compat_data compat_data[] = { {"nvidia,tegra210-usb-phy", 1}, {"nvidia,tegra30-usb-phy", 1}, {NULL, 0}, }; /* Phy controller class and methods. */ static int usbphy_phy_enable(struct phynode *phy, bool enable); static phynode_method_t usbphy_phynode_methods[] = { PHYNODEMETHOD(phynode_enable, usbphy_phy_enable), PHYNODEMETHOD_END }; DEFINE_CLASS_1(usbphy_phynode, usbphy_phynode_class, usbphy_phynode_methods, 0, phynode_class); #define RD4(sc, offs) \ bus_read_4(sc->mem_res, offs) #define WR4(sc, offs, val) \ bus_write_4(sc->mem_res, offs, val) static int reg_wait(struct usbphy_softc *sc, uint32_t reg, uint32_t mask, uint32_t val) { int i; for (i = 0; i < 1000; i++) { if ((RD4(sc, reg) & mask) == val) return (0); DELAY(10); } return (ETIMEDOUT); } static int usbphy_utmi_phy_clk(struct usbphy_softc *sc, bool enable) { uint32_t val; int rv; val = RD4(sc, CTRL_USB_HOSTPC1_DEVLC); if (enable) val &= ~USB_HOSTPC1_DEVLC_PHCD; else val |= USB_HOSTPC1_DEVLC_PHCD; WR4(sc, CTRL_USB_HOSTPC1_DEVLC, val); rv = reg_wait(sc, IF_USB_SUSP_CTRL, USB_PHY_CLK_VALID, enable ? USB_PHY_CLK_VALID: 0); if (rv != 0) { device_printf(sc->dev, "USB phy clock timeout.\n"); return (ETIMEDOUT); } return (0); } static int usbphy_utmi_enable(struct usbphy_softc *sc) { int rv; uint32_t val; /* Reset phy */ val = RD4(sc, IF_USB_SUSP_CTRL); val |= UTMIP_RESET; WR4(sc, IF_USB_SUSP_CTRL, val); val = RD4(sc, UTMIP_TX_CFG0); val |= UTMIP_FS_PREAMBLE_J; WR4(sc, UTMIP_TX_CFG0, val); val = RD4(sc, UTMIP_HSRX_CFG0); val &= ~UTMIP_IDLE_WAIT(~0); val &= ~UTMIP_ELASTIC_LIMIT(~0); val |= UTMIP_IDLE_WAIT(sc->idle_wait_delay); val |= UTMIP_ELASTIC_LIMIT(sc->elastic_limit); WR4(sc, UTMIP_HSRX_CFG0, val); val = RD4(sc, UTMIP_HSRX_CFG1); val &= ~UTMIP_HS_SYNC_START_DLY(~0); val |= UTMIP_HS_SYNC_START_DLY(sc->hssync_start_delay); WR4(sc, UTMIP_HSRX_CFG1, val); val = RD4(sc, UTMIP_DEBOUNCE_CFG0); val &= ~UTMIP_BIAS_DEBOUNCE_A(~0); val |= UTMIP_BIAS_DEBOUNCE_A(0x7530); /* For 12MHz */ WR4(sc, UTMIP_DEBOUNCE_CFG0, val); val = RD4(sc, UTMIP_MISC_CFG0); val &= ~UTMIP_SUSPEND_EXIT_ON_EDGE; WR4(sc, UTMIP_MISC_CFG0, val); if (sc->dr_mode == USB_DR_MODE_DEVICE) { val = RD4(sc,IF_USB_SUSP_CTRL); val &= ~USB_WAKE_ON_CNNT_EN_DEV; val &= ~USB_WAKE_ON_DISCON_EN_DEV; WR4(sc, IF_USB_SUSP_CTRL, val); val = RD4(sc, UTMIP_BAT_CHRG_CFG0); val &= ~UTMIP_PD_CHRG; WR4(sc, UTMIP_BAT_CHRG_CFG0, val); } else { val = RD4(sc, UTMIP_BAT_CHRG_CFG0); val |= UTMIP_PD_CHRG; WR4(sc, UTMIP_BAT_CHRG_CFG0, val); } usbpby_enable_cnt++; if (usbpby_enable_cnt == 1) { rv = hwreset_deassert(sc->reset_pads); if (rv != 0) { device_printf(sc->dev, "Cannot unreset 'utmi-pads' reset\n"); return (rv); } rv = clk_enable(sc->clk_pads); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'utmi-pads' clock\n"); return (rv); } val = bus_read_4(sc->pads_res, UTMIP_BIAS_CFG0); val &= ~UTMIP_OTGPD; val &= ~UTMIP_BIASPD; val &= ~UTMIP_HSSQUELCH_LEVEL(~0); val &= ~UTMIP_HSDISCON_LEVEL(~0); val &= ~UTMIP_HSDISCON_LEVEL_MSB(~0); val |= UTMIP_HSSQUELCH_LEVEL(sc->hssquelch_level); val |= UTMIP_HSDISCON_LEVEL(sc->hsdiscon_level); val |= UTMIP_HSDISCON_LEVEL_MSB(sc->hsdiscon_level); bus_write_4(sc->pads_res, UTMIP_BIAS_CFG0, val); rv = clk_disable(sc->clk_pads); if (rv != 0) { device_printf(sc->dev, "Cannot disable 'utmi-pads' clock\n"); return (rv); } } val = RD4(sc, UTMIP_XCVR_CFG0); val &= ~UTMIP_FORCE_PD_POWERDOWN; val &= ~UTMIP_FORCE_PD2_POWERDOWN ; val &= ~UTMIP_FORCE_PDZI_POWERDOWN; val &= ~UTMIP_XCVR_LSBIAS_SEL; val &= ~UTMIP_XCVR_LSFSLEW(~0); val &= ~UTMIP_XCVR_LSRSLEW(~0); val &= ~UTMIP_XCVR_HSSLEW(~0); val &= ~UTMIP_XCVR_HSSLEW_MSB(~0); val |= UTMIP_XCVR_LSFSLEW(sc->xcvr_lsfslew); val |= UTMIP_XCVR_LSRSLEW(sc->xcvr_lsrslew); val |= UTMIP_XCVR_HSSLEW(sc->xcvr_hsslew); val |= UTMIP_XCVR_HSSLEW_MSB(sc->xcvr_hsslew); if (!sc->xcvr_setup_use_fuses) { val &= ~UTMIP_XCVR_SETUP(~0); val &= ~UTMIP_XCVR_SETUP_MSB(~0); val |= UTMIP_XCVR_SETUP(sc->xcvr_setup); val |= UTMIP_XCVR_SETUP_MSB(sc->xcvr_setup); } WR4(sc, UTMIP_XCVR_CFG0, val); val = RD4(sc, UTMIP_XCVR_CFG1); val &= ~UTMIP_FORCE_PDDISC_POWERDOWN; val &= ~UTMIP_FORCE_PDCHRP_POWERDOWN; val &= ~UTMIP_FORCE_PDDR_POWERDOWN; val &= ~UTMIP_XCVR_TERM_RANGE_ADJ(~0); val |= UTMIP_XCVR_TERM_RANGE_ADJ(sc->term_range_adj); WR4(sc, UTMIP_XCVR_CFG1, val); val = RD4(sc, UTMIP_BIAS_CFG1); val &= ~UTMIP_BIAS_PDTRK_COUNT(~0); val |= UTMIP_BIAS_PDTRK_COUNT(0x5); WR4(sc, UTMIP_BIAS_CFG1, val); val = RD4(sc, UTMIP_SPARE_CFG0); if (sc->xcvr_setup_use_fuses) val |= FUSE_SETUP_SEL; else val &= ~FUSE_SETUP_SEL; WR4(sc, UTMIP_SPARE_CFG0, val); val = RD4(sc, IF_USB_SUSP_CTRL); val |= UTMIP_PHY_ENB; WR4(sc, IF_USB_SUSP_CTRL, val); val = RD4(sc, IF_USB_SUSP_CTRL); val &= ~UTMIP_RESET; WR4(sc, IF_USB_SUSP_CTRL, val); usbphy_utmi_phy_clk(sc, true); val = RD4(sc, CTRL_USB_USBMODE); val &= ~USB_USBMODE_MASK; if (sc->dr_mode == USB_DR_MODE_HOST) val |= USB_USBMODE_HOST; else val |= USB_USBMODE_DEVICE; WR4(sc, CTRL_USB_USBMODE, val); val = RD4(sc, CTRL_USB_HOSTPC1_DEVLC); val &= ~USB_HOSTPC1_DEVLC_PTS(~0); val |= USB_HOSTPC1_DEVLC_PTS(0); WR4(sc, CTRL_USB_HOSTPC1_DEVLC, val); return (0); } static int usbphy_utmi_disable(struct usbphy_softc *sc) { int rv; uint32_t val; usbphy_utmi_phy_clk(sc, false); if (sc->dr_mode == USB_DR_MODE_DEVICE) { val = RD4(sc, IF_USB_SUSP_CTRL); val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0); val |= USB_WAKE_ON_CNNT_EN_DEV; val |= USB_WAKEUP_DEBOUNCE_COUNT(5); WR4(sc, IF_USB_SUSP_CTRL, val); } val = RD4(sc, IF_USB_SUSP_CTRL); val |= UTMIP_RESET; WR4(sc, IF_USB_SUSP_CTRL, val); val = RD4(sc, UTMIP_BAT_CHRG_CFG0); val |= UTMIP_PD_CHRG; WR4(sc, UTMIP_BAT_CHRG_CFG0, val); val = RD4(sc, UTMIP_XCVR_CFG0); val |= UTMIP_FORCE_PD_POWERDOWN; val |= UTMIP_FORCE_PD2_POWERDOWN; val |= UTMIP_FORCE_PDZI_POWERDOWN; WR4(sc, UTMIP_XCVR_CFG0, val); val = RD4(sc, UTMIP_XCVR_CFG1); val |= UTMIP_FORCE_PDDISC_POWERDOWN; val |= UTMIP_FORCE_PDCHRP_POWERDOWN; val |= UTMIP_FORCE_PDDR_POWERDOWN; WR4(sc, UTMIP_XCVR_CFG1, val); usbpby_enable_cnt--; if (usbpby_enable_cnt <= 0) { rv = clk_enable(sc->clk_pads); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'utmi-pads' clock\n"); return (rv); } val =bus_read_4(sc->pads_res, UTMIP_BIAS_CFG0); val |= UTMIP_OTGPD; val |= UTMIP_BIASPD; bus_write_4(sc->pads_res, UTMIP_BIAS_CFG0, val); rv = clk_disable(sc->clk_pads); if (rv != 0) { device_printf(sc->dev, "Cannot disable 'utmi-pads' clock\n"); return (rv); } } return (0); } static int usbphy_phy_enable(struct phynode *phy, bool enable) { device_t dev; struct usbphy_softc *sc; int rv = 0; dev = phynode_get_device(phy); sc = device_get_softc(dev); if (sc->ifc_type != USB_IFC_TYPE_UTMI) { device_printf(sc->dev, "Only UTMI interface is supported.\n"); return (ENXIO); } if (enable) rv = usbphy_utmi_enable(sc); else rv = usbphy_utmi_disable(sc); return (rv); } static enum usb_ifc_type usb_get_ifc_mode(device_t dev, phandle_t node, char *name) { char *tmpstr; int rv; enum usb_ifc_type ret; rv = OF_getprop_alloc(node, name, (void **)&tmpstr); if (rv <= 0) return (USB_IFC_TYPE_UNKNOWN); ret = USB_IFC_TYPE_UNKNOWN; if (strcmp(tmpstr, "utmi") == 0) ret = USB_IFC_TYPE_UTMI; else if (strcmp(tmpstr, "ulpi") == 0) ret = USB_IFC_TYPE_ULPI; else device_printf(dev, "Unsupported phy type: %s\n", tmpstr); OF_prop_free(tmpstr); return (ret); } static enum usb_dr_mode usb_get_dr_mode(device_t dev, phandle_t node, char *name) { char *tmpstr; int rv; enum usb_dr_mode ret; rv = OF_getprop_alloc(node, name, (void **)&tmpstr); if (rv <= 0) return (USB_DR_MODE_UNKNOWN); ret = USB_DR_MODE_UNKNOWN; if (strcmp(tmpstr, "device") == 0) ret = USB_DR_MODE_DEVICE; else if (strcmp(tmpstr, "host") == 0) ret = USB_DR_MODE_HOST; else if (strcmp(tmpstr, "otg") == 0) ret = USB_DR_MODE_OTG; else device_printf(dev, "Unknown dr mode: %s\n", tmpstr); OF_prop_free(tmpstr); return (ret); } static int usbphy_utmi_read_params(struct usbphy_softc *sc, phandle_t node) { int rv; rv = OF_getencprop(node, "nvidia,hssync-start-delay", &sc->hssync_start_delay, sizeof (sc->hssync_start_delay)); if (rv <= 0) return (ENXIO); rv = OF_getencprop(node, "nvidia,elastic-limit", &sc->elastic_limit, sizeof (sc->elastic_limit)); if (rv <= 0) return (ENXIO); rv = OF_getencprop(node, "nvidia,idle-wait-delay", &sc->idle_wait_delay, sizeof (sc->idle_wait_delay)); if (rv <= 0) return (ENXIO); rv = OF_getencprop(node, "nvidia,term-range-adj", &sc->term_range_adj, sizeof (sc->term_range_adj)); if (rv <= 0) return (ENXIO); rv = OF_getencprop(node, "nvidia,xcvr-lsfslew", &sc->xcvr_lsfslew, sizeof (sc->xcvr_lsfslew)); if (rv <= 0) return (ENXIO); rv = OF_getencprop(node, "nvidia,xcvr-lsrslew", &sc->xcvr_lsrslew, sizeof (sc->xcvr_lsrslew)); if (rv <= 0) return (ENXIO); rv = OF_getencprop(node, "nvidia,xcvr-hsslew", &sc->xcvr_hsslew, sizeof (sc->xcvr_hsslew)); if (rv <= 0) return (ENXIO); rv = OF_getencprop(node, "nvidia,hssquelch-level", &sc->hssquelch_level, sizeof (sc->hssquelch_level)); if (rv <= 0) return (ENXIO); rv = OF_getencprop(node, "nvidia,hsdiscon-level", &sc->hsdiscon_level, sizeof (sc->hsdiscon_level)); if (rv <= 0) return (ENXIO); rv = OF_getproplen(node, "nvidia,xcvr-setup-use-fuses"); if (rv >= 1) { sc->xcvr_setup_use_fuses = 1; } else { rv = OF_getencprop(node, "nvidia,xcvr-setup", &sc->xcvr_setup, sizeof (sc->xcvr_setup)); if (rv <= 0) return (ENXIO); } return (0); } static int usbphy_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Tegra USB phy"); return (BUS_PROBE_DEFAULT); } static int usbphy_attach(device_t dev) { struct usbphy_softc *sc; int rid, rv; phandle_t node; struct phynode *phynode; struct phynode_init_def phy_init; sc = device_get_softc(dev); sc->dev = dev; rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot allocate memory resources\n"); return (ENXIO); } rid = 1; sc->pads_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot allocate memory resources\n"); return (ENXIO); } node = ofw_bus_get_node(dev); rv = hwreset_get_by_ofw_name(sc->dev, 0, "usb", &sc->reset_usb); if (rv != 0) { device_printf(dev, "Cannot get 'usb' reset\n"); return (ENXIO); } rv = hwreset_get_by_ofw_name(sc->dev, 0, "utmi-pads", &sc->reset_pads); if (rv != 0) { device_printf(dev, "Cannot get 'utmi-pads' reset\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "reg", &sc->clk_reg); if (rv != 0) { device_printf(sc->dev, "Cannot get 'reg' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "pll_u", &sc->clk_pllu); if (rv != 0) { device_printf(sc->dev, "Cannot get 'pll_u' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "utmi-pads", &sc->clk_pads); if (rv != 0) { device_printf(sc->dev, "Cannot get 'utmi-pads' clock\n"); return (ENXIO); } rv = hwreset_deassert(sc->reset_usb); if (rv != 0) { device_printf(dev, "Cannot unreset 'usb' reset\n"); return (ENXIO); } rv = clk_enable(sc->clk_pllu); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'pllu' clock\n"); return (ENXIO); } rv = clk_enable(sc->clk_reg); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'reg' clock\n"); return (ENXIO); } if (OF_hasprop(node, "nvidia,has-utmi-pad-registers")) sc->have_utmi_regs = true; sc->dr_mode = usb_get_dr_mode(dev, node, "dr_mode"); if (sc->dr_mode == USB_DR_MODE_UNKNOWN) sc->dr_mode = USB_DR_MODE_HOST; sc->ifc_type = usb_get_ifc_mode(dev, node, "phy_type"); /* We supports only utmi phy mode for now .... */ if (sc->ifc_type != USB_IFC_TYPE_UTMI) { device_printf(dev, "Unsupported phy type\n"); return (ENXIO); } rv = usbphy_utmi_read_params(sc, node); if (rv < 0) return rv; if (OF_hasprop(node, "vbus-supply")) { rv = regulator_get_by_ofw_property(sc->dev, 0, "vbus-supply", &sc->supply_vbus); if (rv != 0) { device_printf(sc->dev, "Cannot get \"vbus\" regulator\n"); return (ENXIO); } rv = regulator_enable(sc->supply_vbus); if (rv != 0) { device_printf(sc->dev, "Cannot enable \"vbus\" regulator\n"); return (rv); } } /* Create and register phy. */ bzero(&phy_init, sizeof(phy_init)); phy_init.id = 1; phy_init.ofw_node = node; phynode = phynode_create(dev, &usbphy_phynode_class, &phy_init); if (phynode == NULL) { device_printf(sc->dev, "Cannot create phy\n"); return (ENXIO); } if (phynode_register(phynode) == NULL) { device_printf(sc->dev, "Cannot create phy\n"); return (ENXIO); } return (0); } static int usbphy_detach(device_t dev) { /* This device is always present. */ return (EBUSY); } static device_method_t tegra_usbphy_methods[] = { /* Device interface */ DEVMETHOD(device_probe, usbphy_probe), DEVMETHOD(device_attach, usbphy_attach), DEVMETHOD(device_detach, usbphy_detach), DEVMETHOD_END }; static DEFINE_CLASS_0(usbphy, tegra_usbphy_driver, tegra_usbphy_methods, sizeof(struct usbphy_softc)); EARLY_DRIVER_MODULE(tegra_usbphy, simplebus, tegra_usbphy_driver, NULL, NULL, 79); diff --git a/sys/arm/nvidia/tegra_xhci.c b/sys/arm/nvidia/tegra_xhci.c index 2c2c69d177ea..e80e34f38c38 100644 --- a/sys/arm/nvidia/tegra_xhci.c +++ b/sys/arm/nvidia/tegra_xhci.c @@ -1,1122 +1,1122 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * XHCI driver for Tegra SoCs. */ #include "opt_bus.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" /* FPCI address space */ #define T_XUSB_CFG_0 0x000 #define T_XUSB_CFG_1 0x004 #define CFG_1_BUS_MASTER (1 << 2) #define CFG_1_MEMORY_SPACE (1 << 1) #define CFG_1_IO_SPACE (1 << 0) #define T_XUSB_CFG_2 0x008 #define T_XUSB_CFG_3 0x00C #define T_XUSB_CFG_4 0x010 #define CFG_4_BASE_ADDRESS(x) (((x) & 0x1FFFF) << 15) #define T_XUSB_CFG_5 0x014 #define T_XUSB_CFG_ARU_MAILBOX_CMD 0x0E4 #define ARU_MAILBOX_CMD_INT_EN (1U << 31) #define ARU_MAILBOX_CMD_DEST_XHCI (1 << 30) #define ARU_MAILBOX_CMD_DEST_SMI (1 << 29) #define ARU_MAILBOX_CMD_DEST_PME (1 << 28) #define ARU_MAILBOX_CMD_DEST_FALC (1 << 27) #define T_XUSB_CFG_ARU_MAILBOX_DATA_IN 0x0E8 #define ARU_MAILBOX_DATA_IN_DATA(x) (((x) & 0xFFFFFF) << 0) #define ARU_MAILBOX_DATA_IN_TYPE(x) (((x) & 0x0000FF) << 24) #define T_XUSB_CFG_ARU_MAILBOX_DATA_OUT 0x0EC #define ARU_MAILBOX_DATA_OUT_DATA(x) (((x) >> 0) & 0xFFFFFF) #define ARU_MAILBOX_DATA_OUT_TYPE(x) (((x) >> 24) & 0x0000FF) #define T_XUSB_CFG_ARU_MAILBOX_OWNER 0x0F0 #define ARU_MAILBOX_OWNER_SW 2 #define ARU_MAILBOX_OWNER_FW 1 #define ARU_MAILBOX_OWNER_NONE 0 #define XUSB_CFG_ARU_C11_CSBRANGE 0x41C /* ! UNDOCUMENTED ! */ #define ARU_C11_CSBRANGE_PAGE(x) ((x) >> 9) #define ARU_C11_CSBRANGE_ADDR(x) (0x800 + ((x) & 0x1FF)) #define XUSB_CFG_ARU_SMI_INTR 0x428 /* ! UNDOCUMENTED ! */ #define ARU_SMI_INTR_EN (1 << 3) #define ARU_SMI_INTR_FW_HANG (1 << 1) #define XUSB_CFG_ARU_RST 0x42C /* ! UNDOCUMENTED ! */ #define ARU_RST_RESET (1 << 0) #define XUSB_HOST_CONFIGURATION 0x180 #define CONFIGURATION_CLKEN_OVERRIDE (1U<< 31) #define CONFIGURATION_PW_NO_DEVSEL_ERR_CYA (1 << 19) #define CONFIGURATION_INITIATOR_READ_IDLE (1 << 18) #define CONFIGURATION_INITIATOR_WRITE_IDLE (1 << 17) #define CONFIGURATION_WDATA_LEAD_CYA (1 << 15) #define CONFIGURATION_WR_INTRLV_CYA (1 << 14) #define CONFIGURATION_TARGET_READ_IDLE (1 << 11) #define CONFIGURATION_TARGET_WRITE_IDLE (1 << 10) #define CONFIGURATION_MSI_VEC_EMPTY (1 << 9) #define CONFIGURATION_UFPCI_MSIAW (1 << 7) #define CONFIGURATION_UFPCI_PWPASSPW (1 << 6) #define CONFIGURATION_UFPCI_PASSPW (1 << 5) #define CONFIGURATION_UFPCI_PWPASSNPW (1 << 4) #define CONFIGURATION_DFPCI_PWPASSNPW (1 << 3) #define CONFIGURATION_DFPCI_RSPPASSPW (1 << 2) #define CONFIGURATION_DFPCI_PASSPW (1 << 1) #define CONFIGURATION_EN_FPCI (1 << 0) /* IPFS address space */ #define XUSB_HOST_FPCI_ERROR_MASKS 0x184 #define FPCI_ERROR_MASTER_ABORT (1 << 2) #define FPCI_ERRORI_DATA_ERROR (1 << 1) #define FPCI_ERROR_TARGET_ABORT (1 << 0) #define XUSB_HOST_INTR_MASK 0x188 #define INTR_IP_INT_MASK (1 << 16) #define INTR_MSI_MASK (1 << 8) #define INTR_INT_MASK (1 << 0) #define XUSB_HOST_CLKGATE_HYSTERESIS 0x1BC /* CSB Falcon CPU */ #define XUSB_FALCON_CPUCTL 0x100 #define CPUCTL_STOPPED (1 << 5) #define CPUCTL_HALTED (1 << 4) #define CPUCTL_HRESET (1 << 3) #define CPUCTL_SRESET (1 << 2) #define CPUCTL_STARTCPU (1 << 1) #define CPUCTL_IINVAL (1 << 0) #define XUSB_FALCON_BOOTVEC 0x104 #define XUSB_FALCON_DMACTL 0x10C #define XUSB_FALCON_IMFILLRNG1 0x154 #define IMFILLRNG1_TAG_HI(x) (((x) & 0xFFF) << 16) #define IMFILLRNG1_TAG_LO(x) (((x) & 0xFFF) << 0) #define XUSB_FALCON_IMFILLCTL 0x158 /* CSB mempool */ #define XUSB_CSB_MEMPOOL_APMAP 0x10181C #define APMAP_BOOTPATH (1U << 31) #define XUSB_CSB_MEMPOOL_ILOAD_ATTR 0x101A00 #define XUSB_CSB_MEMPOOL_ILOAD_BASE_LO 0x101A04 #define XUSB_CSB_MEMPOOL_ILOAD_BASE_HI 0x101A08 #define XUSB_CSB_MEMPOOL_L2IMEMOP_SIZE 0x101A10 #define L2IMEMOP_SIZE_OFFSET(x) (((x) & 0x3FF) << 8) #define L2IMEMOP_SIZE_SIZE(x) (((x) & 0x0FF) << 24) #define XUSB_CSB_MEMPOOL_L2IMEMOP_TRIG 0x101A14 #define L2IMEMOP_INVALIDATE_ALL (0x40 << 24) #define L2IMEMOP_LOAD_LOCKED_RESULT (0x11 << 24) #define XUSB_CSB_MEMPOOL_L2IMEMOP_RESULT 0x101A18 #define L2IMEMOP_RESULT_VLD (1U << 31) #define XUSB_CSB_IMEM_BLOCK_SIZE 256 #define TEGRA_XHCI_SS_HIGH_SPEED 120000000 #define TEGRA_XHCI_SS_LOW_SPEED 12000000 /* MBOX commands. */ #define MBOX_CMD_MSG_ENABLED 1 #define MBOX_CMD_INC_FALC_CLOCK 2 #define MBOX_CMD_DEC_FALC_CLOCK 3 #define MBOX_CMD_INC_SSPI_CLOCK 4 #define MBOX_CMD_DEC_SSPI_CLOCK 5 #define MBOX_CMD_SET_BW 6 #define MBOX_CMD_SET_SS_PWR_GATING 7 #define MBOX_CMD_SET_SS_PWR_UNGATING 8 #define MBOX_CMD_SAVE_DFE_CTLE_CTX 9 #define MBOX_CMD_AIRPLANE_MODE_ENABLED 10 #define MBOX_CMD_AIRPLANE_MODE_DISABLED 11 #define MBOX_CMD_START_HSIC_IDLE 12 #define MBOX_CMD_STOP_HSIC_IDLE 13 #define MBOX_CMD_DBC_WAKE_STACK 14 #define MBOX_CMD_HSIC_PRETEND_CONNECT 15 #define MBOX_CMD_RESET_SSPI 16 #define MBOX_CMD_DISABLE_SS_LFPS_DETECTION 17 #define MBOX_CMD_ENABLE_SS_LFPS_DETECTION 18 /* MBOX responses. */ #define MBOX_CMD_ACK (0x80 + 0) #define MBOX_CMD_NAK (0x80 + 1) #define IPFS_WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res_ipfs, (_r), (_v)) #define IPFS_RD4(_sc, _r) bus_read_4((_sc)->mem_res_ipfs, (_r)) #define FPCI_WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res_fpci, (_r), (_v)) #define FPCI_RD4(_sc, _r) bus_read_4((_sc)->mem_res_fpci, (_r)) #define LOCK(_sc) mtx_lock(&(_sc)->mtx) #define UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define SLEEP(_sc, timeout) \ mtx_sleep(sc, &sc->mtx, 0, "tegra_xhci", timeout); #define LOCK_INIT(_sc) \ mtx_init(&_sc->mtx, device_get_nameunit(_sc->dev), "tegra_xhci", MTX_DEF) #define LOCK_DESTROY(_sc) mtx_destroy(&_sc->mtx) #define ASSERT_LOCKED(_sc) mtx_assert(&_sc->mtx, MA_OWNED) #define ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->mtx, MA_NOTOWNED) struct tegra_xusb_fw_hdr { uint32_t boot_loadaddr_in_imem; uint32_t boot_codedfi_offset; uint32_t boot_codetag; uint32_t boot_codesize; uint32_t phys_memaddr; uint16_t reqphys_memsize; uint16_t alloc_phys_memsize; uint32_t rodata_img_offset; uint32_t rodata_section_start; uint32_t rodata_section_end; uint32_t main_fnaddr; uint32_t fwimg_cksum; uint32_t fwimg_created_time; uint32_t imem_resident_start; uint32_t imem_resident_end; uint32_t idirect_start; uint32_t idirect_end; uint32_t l2_imem_start; uint32_t l2_imem_end; uint32_t version_id; uint8_t init_ddirect; uint8_t reserved[3]; uint32_t phys_addr_log_buffer; uint32_t total_log_entries; uint32_t dequeue_ptr; uint32_t dummy[2]; uint32_t fwimg_len; uint8_t magic[8]; uint32_t ss_low_power_entry_timeout; uint8_t num_hsic_port; uint8_t ss_portmap; uint8_t build; uint8_t padding[137]; /* Pad to 256 bytes */ }; struct xhci_soc; struct tegra_xhci_softc { struct xhci_softc xhci_softc; device_t dev; struct xhci_soc *soc; struct mtx mtx; struct resource *mem_res_fpci; struct resource *mem_res_ipfs; struct resource *irq_res_mbox; void *irq_hdl_mbox; clk_t clk_xusb_host; clk_t clk_xusb_gate; clk_t clk_xusb_falcon_src; clk_t clk_xusb_ss; clk_t clk_xusb_hs_src; clk_t clk_xusb_fs_src; hwreset_t hwreset_xusb_host; hwreset_t hwreset_xusb_ss; regulator_t regulators[16]; /* Safe maximum */ phy_t phys[8]; /* Safe maximum */ struct intr_config_hook irq_hook; bool xhci_inited; void *fw_vaddr; vm_size_t fw_size; }; struct xhci_soc { char *fw_name; char **regulator_names; char **phy_names; }; /* Tegra 124 config */ static char *tegra124_reg_names[] = { "avddio-pex-supply", "dvddio-pex-supply", "avdd-usb-supply", "avdd-pll-utmip-supply", "avdd-pll-erefe-supply", "avdd-usb-ss-pll-supply", "hvdd-usb-ss-supply", "hvdd-usb-ss-pll-e-supply", NULL }; static char *tegra124_phy_names[] = { "usb2-0", "usb2-1", "usb2-2", "usb3-0", NULL }; static struct xhci_soc tegra124_soc = { .fw_name = "tegra124_xusb_fw", .regulator_names = tegra124_reg_names, .phy_names = tegra124_phy_names, }; /* Tegra 210 config */ static char *tegra210_reg_names[] = { "dvddio-pex-supply", "hvddio-pex-supply", "avdd-usb-supply", "avdd-pll-utmip-supply", "avdd-pll-uerefe-supply", "dvdd-usb-ss-pll-supply", "hvdd-usb-ss-pll-e-supply", NULL }; static char *tegra210_phy_names[] = { "usb2-0", "usb2-1", "usb2-2", "usb2-3", "usb3-0", "usb3-1", NULL }; static struct xhci_soc tegra210_soc = { .fw_name = "tegra210_xusb_fw", .regulator_names = tegra210_reg_names, .phy_names = tegra210_phy_names, }; /* Compatible devices. */ static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-xusb", (uintptr_t)&tegra124_soc}, {"nvidia,tegra210-xusb", (uintptr_t)&tegra210_soc}, {NULL, 0} }; static uint32_t CSB_RD4(struct tegra_xhci_softc *sc, uint32_t addr) { FPCI_WR4(sc, XUSB_CFG_ARU_C11_CSBRANGE, ARU_C11_CSBRANGE_PAGE(addr)); return (FPCI_RD4(sc, ARU_C11_CSBRANGE_ADDR(addr))); } static void CSB_WR4(struct tegra_xhci_softc *sc, uint32_t addr, uint32_t val) { FPCI_WR4(sc, XUSB_CFG_ARU_C11_CSBRANGE, ARU_C11_CSBRANGE_PAGE(addr)); FPCI_WR4(sc, ARU_C11_CSBRANGE_ADDR(addr), val); } static int get_fdt_resources(struct tegra_xhci_softc *sc, phandle_t node) { int i, rv; /* Regulators. */ for (i = 0; sc->soc->regulator_names[i] != NULL; i++) { if (i >= nitems(sc->regulators)) { device_printf(sc->dev, "Too many regulators present in DT.\n"); return (EOVERFLOW); } rv = regulator_get_by_ofw_property(sc->dev, 0, sc->soc->regulator_names[i], sc->regulators + i); if (rv != 0) { device_printf(sc->dev, "Cannot get '%s' regulator\n", sc->soc->regulator_names[i]); return (ENXIO); } } rv = hwreset_get_by_ofw_name(sc->dev, 0, "xusb_host", &sc->hwreset_xusb_host); if (rv != 0) { device_printf(sc->dev, "Cannot get 'xusb_host' reset\n"); return (ENXIO); } rv = hwreset_get_by_ofw_name(sc->dev, 0, "xusb_ss", &sc->hwreset_xusb_ss); if (rv != 0) { device_printf(sc->dev, "Cannot get 'xusb_ss' reset\n"); return (ENXIO); } /* Phys. */ for (i = 0; sc->soc->phy_names[i] != NULL; i++) { if (i >= nitems(sc->phys)) { device_printf(sc->dev, "Too many phys present in DT.\n"); return (EOVERFLOW); } rv = phy_get_by_ofw_name(sc->dev, 0, sc->soc->phy_names[i], sc->phys + i); if (rv != 0 && rv != ENOENT) { device_printf(sc->dev, "Cannot get '%s' phy.\n", sc->soc->phy_names[i]); return (ENXIO); } } rv = clk_get_by_ofw_name(sc->dev, 0, "xusb_host", &sc->clk_xusb_host); if (rv != 0) { device_printf(sc->dev, "Cannot get 'xusb_host' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "xusb_falcon_src", &sc->clk_xusb_falcon_src); if (rv != 0) { device_printf(sc->dev, "Cannot get 'xusb_falcon_src' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "xusb_ss", &sc->clk_xusb_ss); if (rv != 0) { device_printf(sc->dev, "Cannot get 'xusb_ss' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "xusb_hs_src", &sc->clk_xusb_hs_src); if (rv != 0) { device_printf(sc->dev, "Cannot get 'xusb_hs_src' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "xusb_fs_src", &sc->clk_xusb_fs_src); if (rv != 0) { device_printf(sc->dev, "Cannot get 'xusb_fs_src' clock\n"); return (ENXIO); } /* Clock xusb_gate is missing in mainstream DT */ rv = clk_get_by_name(sc->dev, "xusb_gate", &sc->clk_xusb_gate); if (rv != 0) { device_printf(sc->dev, "Cannot get 'xusb_gate' clock\n"); return (ENXIO); } return (0); } static int enable_fdt_resources(struct tegra_xhci_softc *sc) { int i, rv; rv = hwreset_assert(sc->hwreset_xusb_host); if (rv != 0) { device_printf(sc->dev, "Cannot reset 'xusb_host' reset\n"); return (rv); } rv = hwreset_assert(sc->hwreset_xusb_ss); if (rv != 0) { device_printf(sc->dev, "Cannot reset 'xusb_ss' reset\n"); return (rv); } /* Regulators. */ for (i = 0; i < nitems(sc->regulators); i++) { if (sc->regulators[i] == NULL) continue; rv = regulator_enable(sc->regulators[i]); if (rv != 0) { device_printf(sc->dev, "Cannot enable '%s' regulator\n", sc->soc->regulator_names[i]); return (rv); } } /* Power off XUSB host and XUSB SS domains. */ rv = tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA); if (rv != 0) { device_printf(sc->dev, "Cannot powerdown 'xusba' domain\n"); return (rv); } rv = tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC); if (rv != 0) { device_printf(sc->dev, "Cannot powerdown 'xusbc' domain\n"); return (rv); } /* Setup XUSB ss_src clock first */ clk_set_freq(sc->clk_xusb_ss, TEGRA_XHCI_SS_HIGH_SPEED, 0); if (rv != 0) return (rv); /* The XUSB gate clock must be enabled before XUSBA can be powered. */ rv = clk_enable(sc->clk_xusb_gate); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'xusb_gate' clock\n"); return (rv); } /* Power on XUSB host and XUSB SS domains. */ rv = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_XUSBC, sc->clk_xusb_host, sc->hwreset_xusb_host); if (rv != 0) { device_printf(sc->dev, "Cannot powerup 'xusbc' domain\n"); return (rv); } rv = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_XUSBA, sc->clk_xusb_ss, sc->hwreset_xusb_ss); if (rv != 0) { device_printf(sc->dev, "Cannot powerup 'xusba' domain\n"); return (rv); } /* Enable rest of clocks */ rv = clk_enable(sc->clk_xusb_falcon_src); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'xusb_falcon_src' clock\n"); return (rv); } rv = clk_enable(sc->clk_xusb_fs_src); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'xusb_fs_src' clock\n"); return (rv); } rv = clk_enable(sc->clk_xusb_hs_src); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'xusb_hs_src' clock\n"); return (rv); } /* Phys. */ for (i = 0; i < nitems(sc->phys); i++) { if (sc->phys[i] == NULL) continue; rv = phy_enable(sc->phys[i]); if (rv != 0) { device_printf(sc->dev, "Cannot enable '%s' phy\n", sc->soc->phy_names[i]); return (rv); } } return (0); } /* Respond by ACK/NAK back to FW */ static void mbox_send_ack(struct tegra_xhci_softc *sc, uint32_t cmd, uint32_t data) { uint32_t reg; reg = ARU_MAILBOX_DATA_IN_TYPE(cmd) | ARU_MAILBOX_DATA_IN_DATA(data); FPCI_WR4(sc, T_XUSB_CFG_ARU_MAILBOX_DATA_IN, reg); reg = FPCI_RD4(sc, T_XUSB_CFG_ARU_MAILBOX_CMD); reg |= ARU_MAILBOX_CMD_DEST_FALC | ARU_MAILBOX_CMD_INT_EN; FPCI_WR4(sc, T_XUSB_CFG_ARU_MAILBOX_CMD, reg); } /* Sent command to FW */ static int mbox_send_cmd(struct tegra_xhci_softc *sc, uint32_t cmd, uint32_t data) { uint32_t reg; int i; reg = FPCI_RD4(sc, T_XUSB_CFG_ARU_MAILBOX_OWNER); if (reg != ARU_MAILBOX_OWNER_NONE) { device_printf(sc->dev, "CPU mailbox is busy: 0x%08X\n", reg); return (EBUSY); } /* XXX Is this right? Retry loop? Wait before send? */ FPCI_WR4(sc, T_XUSB_CFG_ARU_MAILBOX_OWNER, ARU_MAILBOX_OWNER_SW); reg = FPCI_RD4(sc, T_XUSB_CFG_ARU_MAILBOX_OWNER); if (reg != ARU_MAILBOX_OWNER_SW) { device_printf(sc->dev, "Cannot acquire CPU mailbox: 0x%08X\n", reg); return (EBUSY); } reg = ARU_MAILBOX_DATA_IN_TYPE(cmd) | ARU_MAILBOX_DATA_IN_DATA(data); FPCI_WR4(sc, T_XUSB_CFG_ARU_MAILBOX_DATA_IN, reg); reg = FPCI_RD4(sc, T_XUSB_CFG_ARU_MAILBOX_CMD); reg |= ARU_MAILBOX_CMD_DEST_FALC | ARU_MAILBOX_CMD_INT_EN; FPCI_WR4(sc, T_XUSB_CFG_ARU_MAILBOX_CMD, reg); for (i = 250; i > 0; i--) { reg = FPCI_RD4(sc, T_XUSB_CFG_ARU_MAILBOX_OWNER); if (reg == ARU_MAILBOX_OWNER_NONE) break; DELAY(100); } if (i <= 0) { device_printf(sc->dev, "Command response timeout: 0x%08X\n", reg); return (ETIMEDOUT); } return(0); } static void process_msg(struct tegra_xhci_softc *sc, uint32_t req_cmd, uint32_t req_data, uint32_t *resp_cmd, uint32_t *resp_data) { uint64_t freq; int rv; /* In most cases, data are echoed back. */ *resp_data = req_data; switch (req_cmd) { case MBOX_CMD_INC_FALC_CLOCK: case MBOX_CMD_DEC_FALC_CLOCK: rv = clk_set_freq(sc->clk_xusb_falcon_src, req_data * 1000ULL, 0); if (rv == 0) { rv = clk_get_freq(sc->clk_xusb_falcon_src, &freq); *resp_data = (uint32_t)(freq / 1000); } *resp_cmd = rv == 0 ? MBOX_CMD_ACK: MBOX_CMD_NAK; break; case MBOX_CMD_INC_SSPI_CLOCK: case MBOX_CMD_DEC_SSPI_CLOCK: rv = clk_set_freq(sc->clk_xusb_ss, req_data * 1000ULL, 0); if (rv == 0) { rv = clk_get_freq(sc->clk_xusb_ss, &freq); *resp_data = (uint32_t)(freq / 1000); } *resp_cmd = rv == 0 ? MBOX_CMD_ACK: MBOX_CMD_NAK; break; case MBOX_CMD_SET_BW: /* No respense is expected. */ *resp_cmd = 0; break; case MBOX_CMD_SET_SS_PWR_GATING: case MBOX_CMD_SET_SS_PWR_UNGATING: *resp_cmd = MBOX_CMD_NAK; break; case MBOX_CMD_SAVE_DFE_CTLE_CTX: /* Not implemented yet. */ *resp_cmd = MBOX_CMD_ACK; break; case MBOX_CMD_START_HSIC_IDLE: case MBOX_CMD_STOP_HSIC_IDLE: /* Not implemented yet. */ *resp_cmd = MBOX_CMD_NAK; break; case MBOX_CMD_DISABLE_SS_LFPS_DETECTION: case MBOX_CMD_ENABLE_SS_LFPS_DETECTION: /* Not implemented yet. */ *resp_cmd = MBOX_CMD_NAK; break; case MBOX_CMD_AIRPLANE_MODE_ENABLED: case MBOX_CMD_AIRPLANE_MODE_DISABLED: case MBOX_CMD_DBC_WAKE_STACK: case MBOX_CMD_HSIC_PRETEND_CONNECT: case MBOX_CMD_RESET_SSPI: device_printf(sc->dev, "Received unused/unexpected command: %u\n", req_cmd); *resp_cmd = 0; break; default: device_printf(sc->dev, "Received unknown command: %u\n", req_cmd); } } static void intr_mbox(void *arg) { struct tegra_xhci_softc *sc; uint32_t reg, msg, resp_cmd, resp_data; sc = (struct tegra_xhci_softc *)arg; /* Clear interrupt first */ reg = FPCI_RD4(sc, XUSB_CFG_ARU_SMI_INTR); FPCI_WR4(sc, XUSB_CFG_ARU_SMI_INTR, reg); if (reg & ARU_SMI_INTR_FW_HANG) { device_printf(sc->dev, "XUSB CPU firmware hang!!! CPUCTL: 0x%08X\n", CSB_RD4(sc, XUSB_FALCON_CPUCTL)); } msg = FPCI_RD4(sc, T_XUSB_CFG_ARU_MAILBOX_DATA_OUT); resp_cmd = 0; process_msg(sc, ARU_MAILBOX_DATA_OUT_TYPE(msg), ARU_MAILBOX_DATA_OUT_DATA(msg), &resp_cmd, &resp_data); if (resp_cmd != 0) mbox_send_ack(sc, resp_cmd, resp_data); else FPCI_WR4(sc, T_XUSB_CFG_ARU_MAILBOX_OWNER, ARU_MAILBOX_OWNER_NONE); reg = FPCI_RD4(sc, T_XUSB_CFG_ARU_MAILBOX_CMD); reg &= ~ARU_MAILBOX_CMD_DEST_SMI; FPCI_WR4(sc, T_XUSB_CFG_ARU_MAILBOX_CMD, reg); } static int load_fw(struct tegra_xhci_softc *sc) { const struct firmware *fw; const struct tegra_xusb_fw_hdr *fw_hdr; vm_paddr_t fw_paddr, fw_base; void *fw_vaddr; vm_size_t fw_size; uint32_t code_tags, code_size; struct clocktime fw_clock; struct timespec fw_timespec; int i; /* Reset ARU */ FPCI_WR4(sc, XUSB_CFG_ARU_RST, ARU_RST_RESET); DELAY(3000); /* Check if FALCON already runs */ if (CSB_RD4(sc, XUSB_CSB_MEMPOOL_ILOAD_BASE_LO) != 0) { device_printf(sc->dev, "XUSB CPU is already loaded, CPUCTL: 0x%08X\n", CSB_RD4(sc, XUSB_FALCON_CPUCTL)); return (0); } fw = firmware_get(sc->soc->fw_name); if (fw == NULL) { device_printf(sc->dev, "Cannot read xusb firmware\n"); return (ENOENT); } /* Allocate uncached memory and copy firmware into. */ fw_hdr = (const struct tegra_xusb_fw_hdr *)fw->data; fw_size = fw_hdr->fwimg_len; fw_vaddr = kmem_alloc_contig(fw_size, M_WAITOK, 0, -1UL, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE); fw_paddr = vtophys((uintptr_t)fw_vaddr); fw_hdr = (const struct tegra_xusb_fw_hdr *)fw_vaddr; memcpy(fw_vaddr, fw->data, fw_size); firmware_put(fw, FIRMWARE_UNLOAD); sc->fw_vaddr = fw_vaddr; sc->fw_size = fw_size; /* Setup firmware physical address and size. */ fw_base = fw_paddr + sizeof(*fw_hdr); CSB_WR4(sc, XUSB_CSB_MEMPOOL_ILOAD_ATTR, fw_size); CSB_WR4(sc, XUSB_CSB_MEMPOOL_ILOAD_BASE_LO, fw_base & 0xFFFFFFFF); CSB_WR4(sc, XUSB_CSB_MEMPOOL_ILOAD_BASE_HI, (uint64_t)fw_base >> 32); CSB_WR4(sc, XUSB_CSB_MEMPOOL_APMAP, APMAP_BOOTPATH); /* Invalidate full L2IMEM context. */ CSB_WR4(sc, XUSB_CSB_MEMPOOL_L2IMEMOP_TRIG, L2IMEMOP_INVALIDATE_ALL); /* Program load of L2IMEM by boot code. */ code_tags = howmany(fw_hdr->boot_codetag, XUSB_CSB_IMEM_BLOCK_SIZE); code_size = howmany(fw_hdr->boot_codesize, XUSB_CSB_IMEM_BLOCK_SIZE); CSB_WR4(sc, XUSB_CSB_MEMPOOL_L2IMEMOP_SIZE, L2IMEMOP_SIZE_OFFSET(code_tags) | L2IMEMOP_SIZE_SIZE(code_size)); /* Execute L2IMEM boot code fetch. */ CSB_WR4(sc, XUSB_CSB_MEMPOOL_L2IMEMOP_TRIG, L2IMEMOP_LOAD_LOCKED_RESULT); /* Program FALCON auto-fill range and block count */ CSB_WR4(sc, XUSB_FALCON_IMFILLCTL, code_size); CSB_WR4(sc, XUSB_FALCON_IMFILLRNG1, IMFILLRNG1_TAG_LO(code_tags) | IMFILLRNG1_TAG_HI(code_tags + code_size)); CSB_WR4(sc, XUSB_FALCON_DMACTL, 0); /* Wait for CPU */ for (i = 500; i > 0; i--) { if (CSB_RD4(sc, XUSB_CSB_MEMPOOL_L2IMEMOP_RESULT) & L2IMEMOP_RESULT_VLD) break; DELAY(100); } if (i <= 0) { device_printf(sc->dev, "Timedout while wating for DMA, " "state: 0x%08X\n", CSB_RD4(sc, XUSB_CSB_MEMPOOL_L2IMEMOP_RESULT)); return (ETIMEDOUT); } /* Boot FALCON cpu */ CSB_WR4(sc, XUSB_FALCON_BOOTVEC, fw_hdr->boot_codetag); CSB_WR4(sc, XUSB_FALCON_CPUCTL, CPUCTL_STARTCPU); /* Wait for CPU */ for (i = 50; i > 0; i--) { if (CSB_RD4(sc, XUSB_FALCON_CPUCTL) == CPUCTL_STOPPED) break; DELAY(100); } if (i <= 0) { device_printf(sc->dev, "Timedout while wating for FALCON cpu, " "state: 0x%08X\n", CSB_RD4(sc, XUSB_FALCON_CPUCTL)); return (ETIMEDOUT); } fw_timespec.tv_sec = fw_hdr->fwimg_created_time; fw_timespec.tv_nsec = 0; clock_ts_to_ct(&fw_timespec, &fw_clock); device_printf(sc->dev, " Falcon firmware version: %02X.%02X.%04X," " (%d/%d/%d %d:%02d:%02d UTC)\n", (fw_hdr->version_id >> 24) & 0xFF,(fw_hdr->version_id >> 15) & 0xFF, fw_hdr->version_id & 0xFFFF, fw_clock.day, fw_clock.mon, fw_clock.year, fw_clock.hour, fw_clock.min, fw_clock.sec); return (0); } static int init_hw(struct tegra_xhci_softc *sc) { int rv; uint32_t reg; rman_res_t base_addr; base_addr = rman_get_start(sc->xhci_softc.sc_io_res); /* Enable FPCI access */ reg = IPFS_RD4(sc, XUSB_HOST_CONFIGURATION); reg |= CONFIGURATION_EN_FPCI; IPFS_WR4(sc, XUSB_HOST_CONFIGURATION, reg); IPFS_RD4(sc, XUSB_HOST_CONFIGURATION); /* Program bar for XHCI base address */ reg = FPCI_RD4(sc, T_XUSB_CFG_4); reg &= ~CFG_4_BASE_ADDRESS(~0); reg |= CFG_4_BASE_ADDRESS((uint32_t)base_addr >> 15); FPCI_WR4(sc, T_XUSB_CFG_4, reg); FPCI_WR4(sc, T_XUSB_CFG_5, (uint32_t)((uint64_t)(base_addr) >> 32)); /* Enable bus master */ reg = FPCI_RD4(sc, T_XUSB_CFG_1); reg |= CFG_1_IO_SPACE; reg |= CFG_1_MEMORY_SPACE; reg |= CFG_1_BUS_MASTER; FPCI_WR4(sc, T_XUSB_CFG_1, reg); /* Enable Interrupts */ reg = IPFS_RD4(sc, XUSB_HOST_INTR_MASK); reg |= INTR_IP_INT_MASK; IPFS_WR4(sc, XUSB_HOST_INTR_MASK, reg); /* Set hysteresis */ IPFS_WR4(sc, XUSB_HOST_CLKGATE_HYSTERESIS, 128); rv = load_fw(sc); if (rv != 0) return rv; return (0); } static int tegra_xhci_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data != 0) { device_set_desc(dev, "Nvidia Tegra XHCI controller"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int tegra_xhci_detach(device_t dev) { struct tegra_xhci_softc *sc; struct xhci_softc *xsc; sc = device_get_softc(dev); xsc = &sc->xhci_softc; /* during module unload there are lots of children leftover */ device_delete_children(dev); if (sc->xhci_inited) { usb_callout_drain(&xsc->sc_callout); xhci_halt_controller(xsc); } if (xsc->sc_irq_res && xsc->sc_intr_hdl) { bus_teardown_intr(dev, xsc->sc_irq_res, xsc->sc_intr_hdl); xsc->sc_intr_hdl = NULL; } if (xsc->sc_irq_res) { bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(xsc->sc_irq_res), xsc->sc_irq_res); xsc->sc_irq_res = NULL; } if (xsc->sc_io_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(xsc->sc_io_res), xsc->sc_io_res); xsc->sc_io_res = NULL; } if (sc->xhci_inited) xhci_uninit(xsc); if (sc->irq_hdl_mbox != NULL) bus_teardown_intr(dev, sc->irq_res_mbox, sc->irq_hdl_mbox); if (sc->fw_vaddr != NULL) kmem_free(sc->fw_vaddr, sc->fw_size); LOCK_DESTROY(sc); return (0); } static int tegra_xhci_attach(device_t dev) { struct tegra_xhci_softc *sc; struct xhci_softc *xsc; int rv, rid; phandle_t node; sc = device_get_softc(dev); sc->dev = dev; sc->soc = (struct xhci_soc *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; node = ofw_bus_get_node(dev); xsc = &sc->xhci_softc; LOCK_INIT(sc); rv = get_fdt_resources(sc, node); if (rv != 0) { rv = ENXIO; goto error; } rv = enable_fdt_resources(sc); if (rv != 0) { rv = ENXIO; goto error; } /* Allocate resources. */ rid = 0; xsc->sc_io_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (xsc->sc_io_res == NULL) { device_printf(dev, "Could not allocate HCD memory resources\n"); rv = ENXIO; goto error; } rid = 1; sc->mem_res_fpci = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res_fpci == NULL) { device_printf(dev, "Could not allocate FPCI memory resources\n"); rv = ENXIO; goto error; } rid = 2; sc->mem_res_ipfs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res_ipfs == NULL) { device_printf(dev, "Could not allocate IPFS memory resources\n"); rv = ENXIO; goto error; } rid = 0; xsc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (xsc->sc_irq_res == NULL) { device_printf(dev, "Could not allocate HCD IRQ resources\n"); rv = ENXIO; goto error; } rid = 1; sc->irq_res_mbox = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res_mbox == NULL) { device_printf(dev, "Could not allocate MBOX IRQ resources\n"); rv = ENXIO; goto error; } rv = init_hw(sc); if (rv != 0) { device_printf(dev, "Could not initialize XUSB hardware\n"); goto error; } /* Wakeup and enable firmaware */ rv = mbox_send_cmd(sc, MBOX_CMD_MSG_ENABLED, 0); if (rv != 0) { device_printf(sc->dev, "Could not enable XUSB firmware\n"); goto error; } /* Fill data for XHCI driver. */ xsc->sc_bus.parent = dev; xsc->sc_bus.devices = xsc->sc_devices; xsc->sc_bus.devices_max = XHCI_MAX_DEVICES; xsc->sc_io_tag = rman_get_bustag(xsc->sc_io_res); xsc->sc_io_hdl = rman_get_bushandle(xsc->sc_io_res); xsc->sc_io_size = rman_get_size(xsc->sc_io_res); strlcpy(xsc->sc_vendor, "Nvidia", sizeof(xsc->sc_vendor)); /* Add USB bus device. */ xsc->sc_bus.bdev = device_add_child(sc->dev, "usbus", -1); if (xsc->sc_bus.bdev == NULL) { device_printf(sc->dev, "Could not add USB device\n"); rv = ENXIO; goto error; } device_set_ivars(xsc->sc_bus.bdev, &xsc->sc_bus); device_set_desc(xsc->sc_bus.bdev, "Nvidia USB 3.0 controller"); rv = xhci_init(xsc, sc->dev, 1); if (rv != 0) { device_printf(sc->dev, "USB init failed: %d\n", rv); goto error; } sc->xhci_inited = true; rv = xhci_start_controller(xsc); if (rv != 0) { device_printf(sc->dev, "Could not start XHCI controller: %d\n", rv); goto error; } rv = bus_setup_intr(dev, sc->irq_res_mbox, INTR_TYPE_MISC | INTR_MPSAFE, NULL, intr_mbox, sc, &sc->irq_hdl_mbox); if (rv != 0) { device_printf(dev, "Could not setup error IRQ: %d\n",rv); xsc->sc_intr_hdl = NULL; goto error; } rv = bus_setup_intr(dev, xsc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, (driver_intr_t *)xhci_interrupt, xsc, &xsc->sc_intr_hdl); if (rv != 0) { device_printf(dev, "Could not setup error IRQ: %d\n",rv); xsc->sc_intr_hdl = NULL; goto error; } /* Probe the bus. */ rv = device_probe_and_attach(xsc->sc_bus.bdev); if (rv != 0) { device_printf(sc->dev, "Could not initialize USB: %d\n", rv); goto error; } return (0); error: panic("XXXXX"); tegra_xhci_detach(dev); return (rv); } static device_method_t xhci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra_xhci_probe), DEVMETHOD(device_attach, tegra_xhci_attach), DEVMETHOD(device_detach, tegra_xhci_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD_END }; static DEFINE_CLASS_0(xhci, xhci_driver, xhci_methods, sizeof(struct tegra_xhci_softc)); DRIVER_MODULE(tegra_xhci, simplebus, xhci_driver, NULL, NULL); MODULE_DEPEND(tegra_xhci, usb, 1, 1, 1); diff --git a/sys/arm/ti/am335x/am335x_dmtimer.c b/sys/arm/ti/am335x/am335x_dmtimer.c index d6ea5267a9be..a4ca188bd83b 100644 --- a/sys/arm/ti/am335x/am335x_dmtimer.c +++ b/sys/arm/ti/am335x/am335x_dmtimer.c @@ -1,403 +1,403 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2012 Damjan Marion * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include /* For arm_set_delay */ -#include +#include #include #include #include #include #include "am335x_dmtreg.h" struct am335x_dmtimer_softc { device_t dev; int tmr_mem_rid; struct resource * tmr_mem_res; int tmr_irq_rid; struct resource * tmr_irq_res; void *tmr_irq_handler; clk_t clk_fck; uint64_t sysclk_freq; uint32_t tclr; /* Cached TCLR register. */ union { struct timecounter tc; struct eventtimer et; } func; int tmr_num; /* Hardware unit number. */ char tmr_name[12]; /* "DMTimerN", N = tmr_num */ }; static struct am335x_dmtimer_softc *am335x_dmtimer_et_sc = NULL; static struct am335x_dmtimer_softc *am335x_dmtimer_tc_sc = NULL; static void am335x_dmtimer_delay(int, void *); /* * We use dmtimer2 for eventtimer and dmtimer3 for timecounter. */ #define ET_TMR_NUM 2 #define TC_TMR_NUM 3 /* List of compatible strings for FDT tree */ static struct ofw_compat_data compat_data[] = { {"ti,am335x-timer", 1}, {"ti,am335x-timer-1ms", 1}, {NULL, 0}, }; #define DMTIMER_READ4(sc, reg) bus_read_4((sc)->tmr_mem_res, (reg)) #define DMTIMER_WRITE4(sc, reg, val) bus_write_4((sc)->tmr_mem_res, (reg), (val)) static int am335x_dmtimer_et_start(struct eventtimer *et, sbintime_t first, sbintime_t period) { struct am335x_dmtimer_softc *sc; uint32_t initial_count, reload_count; sc = et->et_priv; /* * Stop the timer before changing it. This routine will often be called * while the timer is still running, to either lengthen or shorten the * current event time. We need to ensure the timer doesn't expire while * we're working with it. * * Also clear any pending interrupt status, because it's at least * theoretically possible that we're running in a primary interrupt * context now, and a timer interrupt could be pending even before we * stopped the timer. The more likely case is that we're being called * from the et_event_cb() routine dispatched from our own handler, but * it's not clear to me that that's the only case possible. */ sc->tclr &= ~(DMT_TCLR_START | DMT_TCLR_AUTOLOAD); DMTIMER_WRITE4(sc, DMT_TCLR, sc->tclr); DMTIMER_WRITE4(sc, DMT_IRQSTATUS, DMT_IRQ_OVF); if (period != 0) { reload_count = ((uint32_t)et->et_frequency * period) >> 32; sc->tclr |= DMT_TCLR_AUTOLOAD; } else { reload_count = 0; } if (first != 0) initial_count = ((uint32_t)et->et_frequency * first) >> 32; else initial_count = reload_count; /* * Set auto-reload and current-count values. This timer hardware counts * up from the initial/reload value and interrupts on the zero rollover. */ DMTIMER_WRITE4(sc, DMT_TLDR, 0xFFFFFFFF - reload_count); DMTIMER_WRITE4(sc, DMT_TCRR, 0xFFFFFFFF - initial_count); /* Enable overflow interrupt, and start the timer. */ DMTIMER_WRITE4(sc, DMT_IRQENABLE_SET, DMT_IRQ_OVF); sc->tclr |= DMT_TCLR_START; DMTIMER_WRITE4(sc, DMT_TCLR, sc->tclr); return (0); } static int am335x_dmtimer_et_stop(struct eventtimer *et) { struct am335x_dmtimer_softc *sc; sc = et->et_priv; /* Stop timer, disable and clear interrupt. */ sc->tclr &= ~(DMT_TCLR_START | DMT_TCLR_AUTOLOAD); DMTIMER_WRITE4(sc, DMT_TCLR, sc->tclr); DMTIMER_WRITE4(sc, DMT_IRQENABLE_CLR, DMT_IRQ_OVF); DMTIMER_WRITE4(sc, DMT_IRQSTATUS, DMT_IRQ_OVF); return (0); } static int am335x_dmtimer_et_intr(void *arg) { struct am335x_dmtimer_softc *sc; sc = arg; /* Ack the interrupt, and invoke the callback if it's still enabled. */ DMTIMER_WRITE4(sc, DMT_IRQSTATUS, DMT_IRQ_OVF); if (sc->func.et.et_active) sc->func.et.et_event_cb(&sc->func.et, sc->func.et.et_arg); return (FILTER_HANDLED); } static int am335x_dmtimer_et_init(struct am335x_dmtimer_softc *sc) { KASSERT(am335x_dmtimer_et_sc == NULL, ("already have an eventtimer")); /* * Setup eventtimer interrupt handling. Panic if anything goes wrong, * because the system just isn't going to run without an eventtimer. */ sc->tmr_irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->tmr_irq_rid, RF_ACTIVE); if (sc->tmr_irq_res == NULL) panic("am335x_dmtimer: could not allocate irq resources"); if (bus_setup_intr(sc->dev, sc->tmr_irq_res, INTR_TYPE_CLK, am335x_dmtimer_et_intr, NULL, sc, &sc->tmr_irq_handler) != 0) panic("am335x_dmtimer: count not setup irq handler"); sc->func.et.et_name = sc->tmr_name; sc->func.et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT; sc->func.et.et_quality = 500; sc->func.et.et_frequency = sc->sysclk_freq; sc->func.et.et_min_period = ((0x00000005LLU << 32) / sc->func.et.et_frequency); sc->func.et.et_max_period = (0xfffffffeLLU << 32) / sc->func.et.et_frequency; sc->func.et.et_start = am335x_dmtimer_et_start; sc->func.et.et_stop = am335x_dmtimer_et_stop; sc->func.et.et_priv = sc; am335x_dmtimer_et_sc = sc; et_register(&sc->func.et); return (0); } static unsigned am335x_dmtimer_tc_get_timecount(struct timecounter *tc) { struct am335x_dmtimer_softc *sc; sc = tc->tc_priv; return (DMTIMER_READ4(sc, DMT_TCRR)); } static int am335x_dmtimer_tc_init(struct am335x_dmtimer_softc *sc) { KASSERT(am335x_dmtimer_tc_sc == NULL, ("already have a timecounter")); /* Set up timecounter, start it, register it. */ DMTIMER_WRITE4(sc, DMT_TSICR, DMT_TSICR_RESET); while (DMTIMER_READ4(sc, DMT_TIOCP_CFG) & DMT_TIOCP_RESET) continue; sc->tclr |= DMT_TCLR_START | DMT_TCLR_AUTOLOAD; DMTIMER_WRITE4(sc, DMT_TLDR, 0); DMTIMER_WRITE4(sc, DMT_TCRR, 0); DMTIMER_WRITE4(sc, DMT_TCLR, sc->tclr); sc->func.tc.tc_name = sc->tmr_name; sc->func.tc.tc_get_timecount = am335x_dmtimer_tc_get_timecount; sc->func.tc.tc_counter_mask = ~0u; sc->func.tc.tc_frequency = sc->sysclk_freq; sc->func.tc.tc_quality = 500; sc->func.tc.tc_priv = sc; am335x_dmtimer_tc_sc = sc; tc_init(&sc->func.tc); arm_set_delay(am335x_dmtimer_delay, sc); return (0); } static int am335x_dmtimer_probe(device_t dev) { char strbuf[32]; int tmr_num; uint64_t rev_address; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); /* * Get the hardware unit number from address of rev register. * If this isn't the hardware unit we're going to use for either the * eventtimer or the timecounter, no point in instantiating the device. */ rev_address = ti_sysc_get_rev_address(device_get_parent(dev)); switch (rev_address) { case DMTIMER2_REV: tmr_num = 2; break; case DMTIMER3_REV: tmr_num = 3; break; default: /* Not DMTIMER2 or DMTIMER3 */ return (ENXIO); } snprintf(strbuf, sizeof(strbuf), "AM335x DMTimer%d", tmr_num); device_set_desc_copy(dev, strbuf); return(BUS_PROBE_DEFAULT); } static int am335x_dmtimer_attach(device_t dev) { struct am335x_dmtimer_softc *sc; int err; uint64_t rev_address; clk_t sys_clkin; sc = device_get_softc(dev); sc->dev = dev; /* expect one clock */ err = clk_get_by_ofw_index(dev, 0, 0, &sc->clk_fck); if (err != 0) { device_printf(dev, "Cant find clock index 0. err: %d\n", err); return (ENXIO); } err = clk_get_by_name(dev, "sys_clkin_ck@40", &sys_clkin); if (err != 0) { device_printf(dev, "Cant find sys_clkin_ck@40 err: %d\n", err); return (ENXIO); } /* Select M_OSC as DPLL parent */ err = clk_set_parent_by_clk(sc->clk_fck, sys_clkin); if (err != 0) { device_printf(dev, "Cant set mux to CLK_M_OSC\n"); return (ENXIO); } /* Enable clocks and power on the device. */ err = ti_sysc_clock_enable(device_get_parent(dev)); if (err != 0) { device_printf(dev, "Cant enable sysc clkctrl, err %d\n", err); return (ENXIO); } /* Get the base clock frequency. */ err = clk_get_freq(sc->clk_fck, &sc->sysclk_freq); if (err != 0) { device_printf(dev, "Cant get sysclk frequency, err %d\n", err); return (ENXIO); } /* Request the memory resources. */ sc->tmr_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->tmr_mem_rid, RF_ACTIVE); if (sc->tmr_mem_res == NULL) { return (ENXIO); } rev_address = ti_sysc_get_rev_address(device_get_parent(dev)); switch (rev_address) { case DMTIMER2_REV: sc->tmr_num = 2; break; case DMTIMER3_REV: sc->tmr_num = 3; break; default: device_printf(dev, "Not timer 2 or 3! %#jx\n", rev_address); return (ENXIO); } snprintf(sc->tmr_name, sizeof(sc->tmr_name), "DMTimer%d", sc->tmr_num); /* * Go set up either a timecounter or eventtimer. We wouldn't have * attached if we weren't one or the other. */ if (sc->tmr_num == ET_TMR_NUM) am335x_dmtimer_et_init(sc); else if (sc->tmr_num == TC_TMR_NUM) am335x_dmtimer_tc_init(sc); else panic("am335x_dmtimer: bad timer number %d", sc->tmr_num); return (0); } static device_method_t am335x_dmtimer_methods[] = { DEVMETHOD(device_probe, am335x_dmtimer_probe), DEVMETHOD(device_attach, am335x_dmtimer_attach), { 0, 0 } }; static driver_t am335x_dmtimer_driver = { "am335x_dmtimer", am335x_dmtimer_methods, sizeof(struct am335x_dmtimer_softc), }; DRIVER_MODULE(am335x_dmtimer, simplebus, am335x_dmtimer_driver, 0, 0); MODULE_DEPEND(am335x_dmtimer, ti_sysc, 1, 1, 1); static void am335x_dmtimer_delay(int usec, void *arg) { struct am335x_dmtimer_softc *sc = arg; int32_t counts; uint32_t first, last; /* Get the number of times to count */ counts = (usec + 1) * (sc->sysclk_freq / 1000000); first = DMTIMER_READ4(sc, DMT_TCRR); while (counts > 0) { last = DMTIMER_READ4(sc, DMT_TCRR); if (last > first) { counts -= (int32_t)(last - first); } else { counts -= (int32_t)((0xFFFFFFFF - first) + last); } first = last; } } diff --git a/sys/arm/ti/am335x/am335x_dmtpps.c b/sys/arm/ti/am335x/am335x_dmtpps.c index 23b930741c2a..f3e4386e4837 100644 --- a/sys/arm/ti/am335x/am335x_dmtpps.c +++ b/sys/arm/ti/am335x/am335x_dmtpps.c @@ -1,617 +1,617 @@ /*- * Copyright (c) 2015 Ian lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * AM335x PPS driver using DMTimer capture. * * Note that this PPS driver does not use an interrupt. Instead it uses the * hardware's ability to latch the timer's count register in response to a * signal on an IO pin. Each of timers 4-7 have an associated pin, and this * code allows any one of those to be used. * * The timecounter routines in kern_tc.c call the pps poll routine periodically * to see if a new counter value has been latched. When a new value has been * latched, the only processing done in the poll routine is to capture the * current set of timecounter timehands (done with pps_capture()) and the * latched value from the timer. The remaining work (done by pps_event() while * holding a mutex) is scheduled to be done later in a non-interrupt context. */ #include #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "am335x_dmtreg.h" #define PPS_CDEV_NAME "dmtpps" struct dmtpps_softc { device_t dev; int mem_rid; struct resource * mem_res; int tmr_num; /* N from hwmod str "timerN" */ char tmr_name[12]; /* "DMTimerN" */ uint32_t tclr; /* Cached TCLR register. */ struct timecounter tc; int pps_curmode; /* Edge mode now set in hw. */ struct cdev * pps_cdev; struct pps_state pps_state; struct mtx pps_mtx; clk_t clk_fck; uint64_t sysclk_freq; }; static int dmtpps_tmr_num; /* Set by probe() */ /* List of compatible strings for FDT tree */ static struct ofw_compat_data compat_data[] = { {"ti,am335x-timer", 1}, {"ti,am335x-timer-1ms", 1}, {NULL, 0}, }; SIMPLEBUS_PNP_INFO(compat_data); /* * A table relating pad names to the hardware timer number they can be mux'd to. */ struct padinfo { char * ballname; int tmr_num; }; static struct padinfo dmtpps_padinfo[] = { {"GPMC_ADVn_ALE", 4}, {"I2C0_SDA", 4}, {"MII1_TX_EN", 4}, {"XDMA_EVENT_INTR0", 4}, {"GPMC_BEn0_CLE", 5}, {"MDC", 5}, {"MMC0_DAT3", 5}, {"UART1_RTSn", 5}, {"GPMC_WEn", 6}, {"MDIO", 6}, {"MMC0_DAT2", 6}, {"UART1_CTSn", 6}, {"GPMC_OEn_REn", 7}, {"I2C0_SCL", 7}, {"UART0_CTSn", 7}, {"XDMA_EVENT_INTR1", 7}, {NULL, 0} }; /* * This is either brilliantly user-friendly, or utterly lame... * * The am335x chip is used on the popular Beaglebone boards. Those boards have * pins for all four capture-capable timers available on the P8 header. Allow * users to configure the input pin by giving the name of the header pin. */ struct nicknames { const char * nick; const char * name; }; static struct nicknames dmtpps_pin_nicks[] = { {"P8-7", "GPMC_ADVn_ALE"}, {"P8-9", "GPMC_BEn0_CLE"}, {"P8-10", "GPMC_WEn"}, {"P8-8", "GPMC_OEn_REn",}, {NULL, NULL} }; #define DMTIMER_READ4(sc, reg) bus_read_4((sc)->mem_res, (reg)) #define DMTIMER_WRITE4(sc, reg, val) bus_write_4((sc)->mem_res, (reg), (val)) /* * Translate a short friendly case-insensitive name to its canonical name. */ static const char * dmtpps_translate_nickname(const char *nick) { struct nicknames *nn; for (nn = dmtpps_pin_nicks; nn->nick != NULL; nn++) if (strcasecmp(nick, nn->nick) == 0) return nn->name; return (nick); } /* * See if our tunable is set to the name of the input pin. If not, that's NOT * an error, return 0. If so, try to configure that pin as a timer capture * input pin, and if that works, then we have our timer unit number and if it * fails that IS an error, return -1. */ static int dmtpps_find_tmr_num_by_tunable(void) { struct padinfo *pi; char iname[20]; char muxmode[12]; const char * ballname; int err; if (!TUNABLE_STR_FETCH("hw.am335x_dmtpps.input", iname, sizeof(iname))) return (0); ballname = dmtpps_translate_nickname(iname); for (pi = dmtpps_padinfo; pi->ballname != NULL; pi++) { if (strcmp(ballname, pi->ballname) != 0) continue; snprintf(muxmode, sizeof(muxmode), "timer%d", pi->tmr_num); err = ti_pinmux_padconf_set(pi->ballname, muxmode, PADCONF_INPUT); if (err != 0) { printf("am335x_dmtpps: unable to configure capture pin " "for %s to input mode\n", muxmode); return (-1); } else if (bootverbose) { printf("am335x_dmtpps: configured pin %s as input " "for %s\n", iname, muxmode); } return (pi->tmr_num); } /* Invalid name in the tunable, that's an error. */ printf("am335x_dmtpps: unknown pin name '%s'\n", iname); return (-1); } /* * Ask the pinmux driver whether any pin has been configured as a TIMER4..TIMER7 * input pin. If so, return the timer number, if not return 0. */ static int dmtpps_find_tmr_num_by_padconf(void) { int err; unsigned int padstate; const char * padmux; struct padinfo *pi; char muxmode[12]; for (pi = dmtpps_padinfo; pi->ballname != NULL; pi++) { err = ti_pinmux_padconf_get(pi->ballname, &padmux, &padstate); snprintf(muxmode, sizeof(muxmode), "timer%d", pi->tmr_num); if (err == 0 && (padstate & RXACTIVE) != 0 && strcmp(muxmode, padmux) == 0) return (pi->tmr_num); } /* Nothing found, not an error. */ return (0); } /* * Figure out which hardware timer number to use based on input pin * configuration. This is done just once, the first time probe() runs. */ static int dmtpps_find_tmr_num(void) { int tmr_num; if ((tmr_num = dmtpps_find_tmr_num_by_tunable()) == 0) tmr_num = dmtpps_find_tmr_num_by_padconf(); if (tmr_num <= 0) { printf("am335x_dmtpps: PPS driver not enabled: unable to find " "or configure a capture input pin\n"); tmr_num = -1; /* Must return non-zero to prevent re-probing. */ } return (tmr_num); } static void dmtpps_set_hw_capture(struct dmtpps_softc *sc, bool force_off) { int newmode; if (force_off) newmode = 0; else newmode = sc->pps_state.ppsparam.mode & PPS_CAPTUREASSERT; if (newmode == sc->pps_curmode) return; sc->pps_curmode = newmode; if (newmode == PPS_CAPTUREASSERT) sc->tclr |= DMT_TCLR_CAPTRAN_LOHI; else sc->tclr &= ~DMT_TCLR_CAPTRAN_MASK; DMTIMER_WRITE4(sc, DMT_TCLR, sc->tclr); } static unsigned dmtpps_get_timecount(struct timecounter *tc) { struct dmtpps_softc *sc; sc = tc->tc_priv; return (DMTIMER_READ4(sc, DMT_TCRR)); } static void dmtpps_poll(struct timecounter *tc) { struct dmtpps_softc *sc; sc = tc->tc_priv; /* * If a new value has been latched we've got a PPS event. Capture the * timecounter data, then override the capcount field (pps_capture() * populates it from the current DMT_TCRR register) with the latched * value from the TCAR1 register. * * Note that we don't have the TCAR interrupt enabled, but the hardware * still provides the status bits in the "RAW" status register even when * they're masked from generating an irq. However, when clearing the * TCAR status to re-arm the capture for the next second, we have to * write to the IRQ status register, not the RAW register. Quirky. * * We do not need to hold a lock while capturing the pps data, because * it is captured into an area of the pps_state struct which is read * only by pps_event(). We do need to hold a lock while calling * pps_event(), because it manipulates data which is also accessed from * the ioctl(2) context by userland processes. */ if (DMTIMER_READ4(sc, DMT_IRQSTATUS_RAW) & DMT_IRQ_TCAR) { pps_capture(&sc->pps_state); sc->pps_state.capcount = DMTIMER_READ4(sc, DMT_TCAR1); DMTIMER_WRITE4(sc, DMT_IRQSTATUS, DMT_IRQ_TCAR); mtx_lock_spin(&sc->pps_mtx); pps_event(&sc->pps_state, PPS_CAPTUREASSERT); mtx_unlock_spin(&sc->pps_mtx); } } static int dmtpps_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct dmtpps_softc *sc; sc = dev->si_drv1; /* * Begin polling for pps and enable capture in the hardware whenever the * device is open. Doing this stuff again is harmless if this isn't the * first open. */ sc->tc.tc_poll_pps = dmtpps_poll; dmtpps_set_hw_capture(sc, false); return 0; } static int dmtpps_close(struct cdev *dev, int flags, int fmt, struct thread *td) { struct dmtpps_softc *sc; sc = dev->si_drv1; /* * Stop polling and disable capture on last close. Use the force-off * flag to override the configured mode and turn off the hardware. */ sc->tc.tc_poll_pps = NULL; dmtpps_set_hw_capture(sc, true); return 0; } static int dmtpps_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags, struct thread *td) { struct dmtpps_softc *sc; int err; sc = dev->si_drv1; /* Let the kernel do the heavy lifting for ioctl. */ mtx_lock_spin(&sc->pps_mtx); err = pps_ioctl(cmd, data, &sc->pps_state); mtx_unlock_spin(&sc->pps_mtx); if (err != 0) return (err); /* * The capture mode could have changed, set the hardware to whatever * mode is now current. Effectively a no-op if nothing changed. */ dmtpps_set_hw_capture(sc, false); return (err); } static struct cdevsw dmtpps_cdevsw = { .d_version = D_VERSION, .d_open = dmtpps_open, .d_close = dmtpps_close, .d_ioctl = dmtpps_ioctl, .d_name = PPS_CDEV_NAME, }; static int dmtpps_probe(device_t dev) { char strbuf[64]; int tmr_num; uint64_t rev_address; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); /* * If we haven't chosen which hardware timer to use yet, go do that now. * We need to know that to decide whether to return success for this * hardware timer instance or not. */ if (dmtpps_tmr_num == 0) dmtpps_tmr_num = dmtpps_find_tmr_num(); /* * Figure out which hardware timer is being probed and see if it matches * the configured timer number determined earlier. */ rev_address = ti_sysc_get_rev_address(device_get_parent(dev)); switch (rev_address) { case DMTIMER1_1MS_REV: tmr_num = 1; break; case DMTIMER2_REV: tmr_num = 2; break; case DMTIMER3_REV: tmr_num = 3; break; case DMTIMER4_REV: tmr_num = 4; break; case DMTIMER5_REV: tmr_num = 5; break; case DMTIMER6_REV: tmr_num = 6; break; case DMTIMER7_REV: tmr_num = 7; break; default: return (ENXIO); } if (dmtpps_tmr_num != tmr_num) return (ENXIO); snprintf(strbuf, sizeof(strbuf), "AM335x PPS-Capture DMTimer%d", tmr_num); device_set_desc_copy(dev, strbuf); return(BUS_PROBE_DEFAULT); } static int dmtpps_attach(device_t dev) { struct dmtpps_softc *sc; struct make_dev_args mda; int err; clk_t sys_clkin; uint64_t rev_address; sc = device_get_softc(dev); sc->dev = dev; /* Figure out which hardware timer this is and set the name string. */ rev_address = ti_sysc_get_rev_address(device_get_parent(dev)); switch (rev_address) { case DMTIMER1_1MS_REV: sc->tmr_num = 1; break; case DMTIMER2_REV: sc->tmr_num = 2; break; case DMTIMER3_REV: sc->tmr_num = 3; break; case DMTIMER4_REV: sc->tmr_num = 4; break; case DMTIMER5_REV: sc->tmr_num = 5; break; case DMTIMER6_REV: sc->tmr_num = 6; break; case DMTIMER7_REV: sc->tmr_num = 7; break; } snprintf(sc->tmr_name, sizeof(sc->tmr_name), "DMTimer%d", sc->tmr_num); /* expect one clock */ err = clk_get_by_ofw_index(dev, 0, 0, &sc->clk_fck); if (err != 0) { device_printf(dev, "Cant find clock index 0. err: %d\n", err); return (ENXIO); } err = clk_get_by_name(dev, "sys_clkin_ck@40", &sys_clkin); if (err != 0) { device_printf(dev, "Cant find sys_clkin_ck@40 err: %d\n", err); return (ENXIO); } /* Select M_OSC as DPLL parent */ err = clk_set_parent_by_clk(sc->clk_fck, sys_clkin); if (err != 0) { device_printf(dev, "Cant set mux to CLK_M_OSC\n"); return (ENXIO); } /* Enable clocks and power on the device. */ err = ti_sysc_clock_enable(device_get_parent(dev)); if (err != 0) { device_printf(dev, "Cant enable sysc clkctrl, err %d\n", err); return (ENXIO); } /* Get the base clock frequency. */ err = clk_get_freq(sc->clk_fck, &sc->sysclk_freq); if (err != 0) { device_printf(dev, "Cant get sysclk frequency, err %d\n", err); return (ENXIO); } /* Request the memory resources. */ sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (sc->mem_res == NULL) { return (ENXIO); } /* * Configure the timer pulse/capture pin to input/capture mode. This is * required in addition to configuring the pin as input with the pinmux * controller (which was done via fdt data or tunable at probe time). */ sc->tclr = DMT_TCLR_GPO_CFG; DMTIMER_WRITE4(sc, DMT_TCLR, sc->tclr); /* Set up timecounter hardware, start it. */ DMTIMER_WRITE4(sc, DMT_TSICR, DMT_TSICR_RESET); while (DMTIMER_READ4(sc, DMT_TIOCP_CFG) & DMT_TIOCP_RESET) continue; sc->tclr |= DMT_TCLR_START | DMT_TCLR_AUTOLOAD; DMTIMER_WRITE4(sc, DMT_TLDR, 0); DMTIMER_WRITE4(sc, DMT_TCRR, 0); DMTIMER_WRITE4(sc, DMT_TCLR, sc->tclr); /* Register the timecounter. */ sc->tc.tc_name = sc->tmr_name; sc->tc.tc_get_timecount = dmtpps_get_timecount; sc->tc.tc_counter_mask = ~0u; sc->tc.tc_frequency = sc->sysclk_freq; sc->tc.tc_quality = 1000; sc->tc.tc_priv = sc; tc_init(&sc->tc); /* * Indicate our PPS capabilities. Have the kernel init its part of the * pps_state struct and add its capabilities. * * While the hardware has a mode to capture each edge, it's not clear we * can use it that way, because there's only a single interrupt/status * bit to say something was captured, but not which edge it was. For * now, just say we can only capture assert events (the positive-going * edge of the pulse). */ mtx_init(&sc->pps_mtx, "dmtpps", NULL, MTX_SPIN); sc->pps_state.flags = PPSFLAG_MTX_SPIN; sc->pps_state.ppscap = PPS_CAPTUREASSERT; sc->pps_state.driver_abi = PPS_ABI_VERSION; sc->pps_state.driver_mtx = &sc->pps_mtx; pps_init_abi(&sc->pps_state); /* Create the PPS cdev. */ make_dev_args_init(&mda); mda.mda_flags = MAKEDEV_WAITOK; mda.mda_devsw = &dmtpps_cdevsw; mda.mda_cr = NULL; mda.mda_uid = UID_ROOT; mda.mda_gid = GID_WHEEL; mda.mda_mode = 0600; mda.mda_unit = device_get_unit(dev); mda.mda_si_drv1 = sc; if ((err = make_dev_s(&mda, &sc->pps_cdev, PPS_CDEV_NAME)) != 0) { device_printf(dev, "Failed to create cdev %s\n", PPS_CDEV_NAME); return (err); } if (bootverbose) device_printf(sc->dev, "Using %s for PPS device /dev/%s\n", sc->tmr_name, PPS_CDEV_NAME); return (0); } static int dmtpps_detach(device_t dev) { /* * There is no way to remove a timecounter once it has been registered, * even if it's not in use, so we can never detach. If we were * dynamically loaded as a module this will prevent unloading. */ return (EBUSY); } static device_method_t dmtpps_methods[] = { DEVMETHOD(device_probe, dmtpps_probe), DEVMETHOD(device_attach, dmtpps_attach), DEVMETHOD(device_detach, dmtpps_detach), { 0, 0 } }; static driver_t dmtpps_driver = { "am335x_dmtpps", dmtpps_methods, sizeof(struct dmtpps_softc), }; DRIVER_MODULE(am335x_dmtpps, simplebus, dmtpps_driver, 0, 0); MODULE_DEPEND(am335x_dmtpps, ti_sysc, 1, 1, 1); diff --git a/sys/arm/ti/am335x/am335x_lcd.c b/sys/arm/ti/am335x/am335x_lcd.c index 82895436bdf1..9bef1df0c64c 100644 --- a/sys/arm/ti/am335x/am335x_lcd.c +++ b/sys/arm/ti/am335x/am335x_lcd.c @@ -1,1100 +1,1100 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2013 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_syscons.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #ifdef DEV_SC #include #else /* VT */ #include #endif #include #include #include "am335x_lcd.h" #include "am335x_pwm.h" #include "fb_if.h" #include "crtc_if.h" #define LCD_PID 0x00 #define LCD_CTRL 0x04 #define CTRL_DIV_MASK 0xff #define CTRL_DIV_SHIFT 8 #define CTRL_AUTO_UFLOW_RESTART (1 << 1) #define CTRL_RASTER_MODE 1 #define CTRL_LIDD_MODE 0 #define LCD_LIDD_CTRL 0x0C #define LCD_LIDD_CS0_CONF 0x10 #define LCD_LIDD_CS0_ADDR 0x14 #define LCD_LIDD_CS0_DATA 0x18 #define LCD_LIDD_CS1_CONF 0x1C #define LCD_LIDD_CS1_ADDR 0x20 #define LCD_LIDD_CS1_DATA 0x24 #define LCD_RASTER_CTRL 0x28 #define RASTER_CTRL_TFT24_UNPACKED (1 << 26) #define RASTER_CTRL_TFT24 (1 << 25) #define RASTER_CTRL_STN565 (1 << 24) #define RASTER_CTRL_TFTPMAP (1 << 23) #define RASTER_CTRL_NIBMODE (1 << 22) #define RASTER_CTRL_PALMODE_SHIFT 20 #define PALETTE_PALETTE_AND_DATA 0x00 #define PALETTE_PALETTE_ONLY 0x01 #define PALETTE_DATA_ONLY 0x02 #define RASTER_CTRL_REQDLY_SHIFT 12 #define RASTER_CTRL_MONO8B (1 << 9) #define RASTER_CTRL_RBORDER (1 << 8) #define RASTER_CTRL_LCDTFT (1 << 7) #define RASTER_CTRL_LCDBW (1 << 1) #define RASTER_CTRL_LCDEN (1 << 0) #define LCD_RASTER_TIMING_0 0x2C #define RASTER_TIMING_0_HBP_SHIFT 24 #define RASTER_TIMING_0_HFP_SHIFT 16 #define RASTER_TIMING_0_HSW_SHIFT 10 #define RASTER_TIMING_0_PPLLSB_SHIFT 4 #define RASTER_TIMING_0_PPLMSB_SHIFT 3 #define LCD_RASTER_TIMING_1 0x30 #define RASTER_TIMING_1_VBP_SHIFT 24 #define RASTER_TIMING_1_VFP_SHIFT 16 #define RASTER_TIMING_1_VSW_SHIFT 10 #define RASTER_TIMING_1_LPP_SHIFT 0 #define LCD_RASTER_TIMING_2 0x34 #define RASTER_TIMING_2_HSWHI_SHIFT 27 #define RASTER_TIMING_2_LPP_B10_SHIFT 26 #define RASTER_TIMING_2_PHSVS (1 << 25) #define RASTER_TIMING_2_PHSVS_RISE (1 << 24) #define RASTER_TIMING_2_PHSVS_FALL (0 << 24) #define RASTER_TIMING_2_IOE (1 << 23) #define RASTER_TIMING_2_IPC (1 << 22) #define RASTER_TIMING_2_IHS (1 << 21) #define RASTER_TIMING_2_IVS (1 << 20) #define RASTER_TIMING_2_ACBI_SHIFT 16 #define RASTER_TIMING_2_ACB_SHIFT 8 #define RASTER_TIMING_2_HBPHI_SHIFT 4 #define RASTER_TIMING_2_HFPHI_SHIFT 0 #define LCD_RASTER_SUBPANEL 0x38 #define LCD_RASTER_SUBPANEL2 0x3C #define LCD_LCDDMA_CTRL 0x40 #define LCDDMA_CTRL_DMA_MASTER_PRIO_SHIFT 16 #define LCDDMA_CTRL_TH_FIFO_RDY_SHIFT 8 #define LCDDMA_CTRL_BURST_SIZE_SHIFT 4 #define LCDDMA_CTRL_BYTES_SWAP (1 << 3) #define LCDDMA_CTRL_BE (1 << 1) #define LCDDMA_CTRL_FB0_ONLY 0 #define LCDDMA_CTRL_FB0_FB1 (1 << 0) #define LCD_LCDDMA_FB0_BASE 0x44 #define LCD_LCDDMA_FB0_CEILING 0x48 #define LCD_LCDDMA_FB1_BASE 0x4C #define LCD_LCDDMA_FB1_CEILING 0x50 #define LCD_SYSCONFIG 0x54 #define SYSCONFIG_STANDBY_FORCE (0 << 4) #define SYSCONFIG_STANDBY_NONE (1 << 4) #define SYSCONFIG_STANDBY_SMART (2 << 4) #define SYSCONFIG_IDLE_FORCE (0 << 2) #define SYSCONFIG_IDLE_NONE (1 << 2) #define SYSCONFIG_IDLE_SMART (2 << 2) #define LCD_IRQSTATUS_RAW 0x58 #define LCD_IRQSTATUS 0x5C #define LCD_IRQENABLE_SET 0x60 #define LCD_IRQENABLE_CLEAR 0x64 #define IRQ_EOF1 (1 << 9) #define IRQ_EOF0 (1 << 8) #define IRQ_PL (1 << 6) #define IRQ_FUF (1 << 5) #define IRQ_ACB (1 << 3) #define IRQ_SYNC_LOST (1 << 2) #define IRQ_RASTER_DONE (1 << 1) #define IRQ_FRAME_DONE (1 << 0) #define LCD_END_OF_INT_IND 0x68 #define LCD_CLKC_ENABLE 0x6C #define CLKC_ENABLE_DMA (1 << 2) #define CLKC_ENABLE_LDID (1 << 1) #define CLKC_ENABLE_CORE (1 << 0) #define LCD_CLKC_RESET 0x70 #define CLKC_RESET_MAIN (1 << 3) #define CLKC_RESET_DMA (1 << 2) #define CLKC_RESET_LDID (1 << 1) #define CLKC_RESET_CORE (1 << 0) #define LCD_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define LCD_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define LCD_LOCK_INIT(_sc) mtx_init(&(_sc)->sc_mtx, \ device_get_nameunit(_sc->sc_dev), "am335x_lcd", MTX_DEF) #define LCD_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx); #define LCD_READ4(_sc, reg) bus_read_4((_sc)->sc_mem_res, reg); #define LCD_WRITE4(_sc, reg, value) \ bus_write_4((_sc)->sc_mem_res, reg, value); /* Backlight is controlled by eCAS interface on PWM unit 0 */ #define PWM_UNIT 0 #define PWM_PERIOD 100 #define MODE_HBP(mode) ((mode)->htotal - (mode)->hsync_end) #define MODE_HFP(mode) ((mode)->hsync_start - (mode)->hdisplay) #define MODE_HSW(mode) ((mode)->hsync_end - (mode)->hsync_start) #define MODE_VBP(mode) ((mode)->vtotal - (mode)->vsync_end) #define MODE_VFP(mode) ((mode)->vsync_start - (mode)->vdisplay) #define MODE_VSW(mode) ((mode)->vsync_end - (mode)->vsync_start) #define MAX_PIXEL_CLOCK 126000 #define MAX_BANDWIDTH (1280*1024*60) struct am335x_lcd_softc { device_t sc_dev; struct fb_info sc_fb_info; struct resource *sc_mem_res; struct resource *sc_irq_res; void *sc_intr_hl; struct mtx sc_mtx; int sc_backlight; struct sysctl_oid *sc_oid; struct panel_info sc_panel; /* Framebuffer */ bus_dma_tag_t sc_dma_tag; bus_dmamap_t sc_dma_map; size_t sc_fb_size; bus_addr_t sc_fb_phys; uint8_t *sc_fb_base; /* HDMI framer */ phandle_t sc_hdmi_framer; eventhandler_tag sc_hdmi_evh; /* Clock */ clk_t sc_clk_dpll_disp_ck; }; static void am335x_fb_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) { bus_addr_t *addr; if (err) return; addr = (bus_addr_t*)arg; *addr = segs[0].ds_addr; } static uint32_t am335x_lcd_calc_divisor(uint32_t reference, uint32_t freq) { uint32_t div, i; uint32_t delta, min_delta; min_delta = freq; div = 255; /* Raster mode case: divisors are in range from 2 to 255 */ for (i = 2; i < 255; i++) { delta = abs(reference/i - freq); if (delta < min_delta) { div = i; min_delta = delta; } } return (div); } static int am335x_lcd_sysctl_backlight(SYSCTL_HANDLER_ARGS) { struct am335x_lcd_softc *sc = (struct am335x_lcd_softc*)arg1; int error; int backlight; backlight = sc->sc_backlight; error = sysctl_handle_int(oidp, &backlight, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (backlight < 0) backlight = 0; if (backlight > 100) backlight = 100; LCD_LOCK(sc); error = am335x_pwm_config_ecap(PWM_UNIT, PWM_PERIOD, backlight*PWM_PERIOD/100); if (error == 0) sc->sc_backlight = backlight; LCD_UNLOCK(sc); return (error); } static uint32_t am335x_mode_vrefresh(const struct videomode *mode) { uint32_t refresh; /* Calculate vertical refresh rate */ refresh = (mode->dot_clock * 1000 / mode->htotal); refresh = (refresh + mode->vtotal / 2) / mode->vtotal; if (mode->flags & VID_INTERLACE) refresh *= 2; if (mode->flags & VID_DBLSCAN) refresh /= 2; return refresh; } static int am335x_mode_is_valid(const struct videomode *mode) { uint32_t hbp, hfp, hsw; uint32_t vbp, vfp, vsw; if (mode->dot_clock > MAX_PIXEL_CLOCK) return (0); if (mode->hdisplay & 0xf) return (0); if (mode->vdisplay > 2048) return (0); /* Check ranges for timing parameters */ hbp = MODE_HBP(mode) - 1; hfp = MODE_HFP(mode) - 1; hsw = MODE_HSW(mode) - 1; vbp = MODE_VBP(mode); vfp = MODE_VFP(mode); vsw = MODE_VSW(mode) - 1; if (hbp > 0x3ff) return (0); if (hfp > 0x3ff) return (0); if (hsw > 0x3ff) return (0); if (vbp > 0xff) return (0); if (vfp > 0xff) return (0); if (vsw > 0x3f) return (0); if (mode->vdisplay*mode->hdisplay*am335x_mode_vrefresh(mode) > MAX_BANDWIDTH) return (0); return (1); } static void am335x_read_hdmi_property(device_t dev) { phandle_t node, xref; phandle_t endpoint; phandle_t hdmi_xref; struct am335x_lcd_softc *sc; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); sc->sc_hdmi_framer = 0; /* * Old FreeBSD way of referencing to HDMI framer */ if (OF_getencprop(node, "hdmi", &hdmi_xref, sizeof(hdmi_xref)) != -1) { sc->sc_hdmi_framer = hdmi_xref; return; } /* * Use bindings described in Linux docs: * bindings/media/video-interfaces.txt * We assume that the only endpoint in LCDC node * is HDMI framer. */ node = ofw_bus_find_child(node, "port"); /* No media bindings */ if (node == 0) return; for (endpoint = OF_child(node); endpoint != 0; endpoint = OF_peer(endpoint)) { if (OF_getencprop(endpoint, "remote-endpoint", &xref, sizeof(xref)) != -1) { /* port/port@0/endpoint@0 */ node = OF_node_from_xref(xref); /* port/port@0 */ node = OF_parent(node); /* port */ node = OF_parent(node); /* actual owner of port, in our case HDMI framer */ sc->sc_hdmi_framer = OF_xref_from_node(OF_parent(node)); if (sc->sc_hdmi_framer != 0) return; } } } static int am335x_read_property(device_t dev, phandle_t node, const char *name, uint32_t *val) { pcell_t cell; if ((OF_getencprop(node, name, &cell, sizeof(cell))) <= 0) { device_printf(dev, "missing '%s' attribute in LCD panel info\n", name); return (ENXIO); } *val = cell; return (0); } static int am335x_read_timing(device_t dev, phandle_t node, struct panel_info *panel) { int error; phandle_t timings_node, timing_node, native; timings_node = ofw_bus_find_child(node, "display-timings"); if (timings_node == 0) { device_printf(dev, "no \"display-timings\" node\n"); return (-1); } if (OF_searchencprop(timings_node, "native-mode", &native, sizeof(native)) == -1) { device_printf(dev, "no \"native-mode\" reference in \"timings\" node\n"); return (-1); } timing_node = OF_node_from_xref(native); error = 0; if ((error = am335x_read_property(dev, timing_node, "hactive", &panel->panel_width))) goto out; if ((error = am335x_read_property(dev, timing_node, "vactive", &panel->panel_height))) goto out; if ((error = am335x_read_property(dev, timing_node, "hfront-porch", &panel->panel_hfp))) goto out; if ((error = am335x_read_property(dev, timing_node, "hback-porch", &panel->panel_hbp))) goto out; if ((error = am335x_read_property(dev, timing_node, "hsync-len", &panel->panel_hsw))) goto out; if ((error = am335x_read_property(dev, timing_node, "vfront-porch", &panel->panel_vfp))) goto out; if ((error = am335x_read_property(dev, timing_node, "vback-porch", &panel->panel_vbp))) goto out; if ((error = am335x_read_property(dev, timing_node, "vsync-len", &panel->panel_vsw))) goto out; if ((error = am335x_read_property(dev, timing_node, "clock-frequency", &panel->panel_pxl_clk))) goto out; if ((error = am335x_read_property(dev, timing_node, "pixelclk-active", &panel->pixelclk_active))) goto out; if ((error = am335x_read_property(dev, timing_node, "hsync-active", &panel->hsync_active))) goto out; if ((error = am335x_read_property(dev, timing_node, "vsync-active", &panel->vsync_active))) goto out; out: return (error); } static int am335x_read_panel_info(device_t dev, phandle_t node, struct panel_info *panel) { phandle_t panel_info_node; panel_info_node = ofw_bus_find_child(node, "panel-info"); if (panel_info_node == 0) return (-1); am335x_read_property(dev, panel_info_node, "ac-bias", &panel->ac_bias); am335x_read_property(dev, panel_info_node, "ac-bias-intrpt", &panel->ac_bias_intrpt); am335x_read_property(dev, panel_info_node, "dma-burst-sz", &panel->dma_burst_sz); am335x_read_property(dev, panel_info_node, "bpp", &panel->bpp); am335x_read_property(dev, panel_info_node, "fdd", &panel->fdd); am335x_read_property(dev, panel_info_node, "sync-edge", &panel->sync_edge); am335x_read_property(dev, panel_info_node, "sync-ctrl", &panel->sync_ctrl); return (0); } static void am335x_lcd_intr(void *arg) { struct am335x_lcd_softc *sc = arg; uint32_t reg; reg = LCD_READ4(sc, LCD_IRQSTATUS); LCD_WRITE4(sc, LCD_IRQSTATUS, reg); /* Read value back to make sure it reached the hardware */ reg = LCD_READ4(sc, LCD_IRQSTATUS); if (reg & IRQ_SYNC_LOST) { reg = LCD_READ4(sc, LCD_RASTER_CTRL); reg &= ~RASTER_CTRL_LCDEN; LCD_WRITE4(sc, LCD_RASTER_CTRL, reg); reg = LCD_READ4(sc, LCD_RASTER_CTRL); reg |= RASTER_CTRL_LCDEN; LCD_WRITE4(sc, LCD_RASTER_CTRL, reg); goto done; } if (reg & IRQ_PL) { reg = LCD_READ4(sc, LCD_RASTER_CTRL); reg &= ~RASTER_CTRL_LCDEN; LCD_WRITE4(sc, LCD_RASTER_CTRL, reg); reg = LCD_READ4(sc, LCD_RASTER_CTRL); reg |= RASTER_CTRL_LCDEN; LCD_WRITE4(sc, LCD_RASTER_CTRL, reg); goto done; } if (reg & IRQ_EOF0) { LCD_WRITE4(sc, LCD_LCDDMA_FB0_BASE, sc->sc_fb_phys); LCD_WRITE4(sc, LCD_LCDDMA_FB0_CEILING, sc->sc_fb_phys + sc->sc_fb_size - 1); reg &= ~IRQ_EOF0; } if (reg & IRQ_EOF1) { LCD_WRITE4(sc, LCD_LCDDMA_FB1_BASE, sc->sc_fb_phys); LCD_WRITE4(sc, LCD_LCDDMA_FB1_CEILING, sc->sc_fb_phys + sc->sc_fb_size - 1); reg &= ~IRQ_EOF1; } if (reg & IRQ_FUF) { /* TODO: Handle FUF */ } if (reg & IRQ_ACB) { /* TODO: Handle ACB */ } done: LCD_WRITE4(sc, LCD_END_OF_INT_IND, 0); /* Read value back to make sure it reached the hardware */ reg = LCD_READ4(sc, LCD_END_OF_INT_IND); } static const struct videomode * am335x_lcd_pick_mode(struct edid_info *ei) { const struct videomode *videomode; const struct videomode *m; int n; /* Get standard VGA as default */ videomode = NULL; /* * Pick a mode. */ if (ei->edid_preferred_mode != NULL) { if (am335x_mode_is_valid(ei->edid_preferred_mode)) videomode = ei->edid_preferred_mode; } if (videomode == NULL) { m = ei->edid_modes; sort_modes(ei->edid_modes, &ei->edid_preferred_mode, ei->edid_nmodes); for (n = 0; n < ei->edid_nmodes; n++) if (am335x_mode_is_valid(&m[n])) { videomode = &m[n]; break; } } return videomode; } static int am335x_lcd_configure(struct am335x_lcd_softc *sc) { int div; uint32_t reg, timing0, timing1, timing2; uint32_t burst_log; size_t dma_size; uint32_t hbp, hfp, hsw; uint32_t vbp, vfp, vsw; uint32_t width, height; uint64_t ref_freq; int err; /* * try to adjust clock to get double of requested frequency * HDMI/DVI displays are very sensitive to error in frequncy value */ err = clk_set_freq(sc->sc_clk_dpll_disp_ck, sc->sc_panel.panel_pxl_clk*2, CLK_SET_ROUND_ANY); if (err != 0) { device_printf(sc->sc_dev, "can't set source frequency\n"); return (ENXIO); } err = clk_get_freq(sc->sc_clk_dpll_disp_ck, &ref_freq); if (err != 0) { device_printf(sc->sc_dev, "can't get reference frequency\n"); return (ENXIO); } /* Panel initialization */ dma_size = round_page(sc->sc_panel.panel_width*sc->sc_panel.panel_height*sc->sc_panel.bpp/8); /* * Now allocate framebuffer memory */ err = bus_dma_tag_create( bus_get_dma_tag(sc->sc_dev), 4, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dma_size, 1, /* maxsize, nsegments */ dma_size, 0, /* maxsegsize, flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sc_dma_tag); if (err) goto done; err = bus_dmamem_alloc(sc->sc_dma_tag, (void **)&sc->sc_fb_base, BUS_DMA_COHERENT, &sc->sc_dma_map); if (err) { device_printf(sc->sc_dev, "cannot allocate framebuffer\n"); goto done; } err = bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map, sc->sc_fb_base, dma_size, am335x_fb_dmamap_cb, &sc->sc_fb_phys, BUS_DMA_NOWAIT); if (err) { device_printf(sc->sc_dev, "cannot load DMA map\n"); goto done; } /* Make sure it's blank */ memset(sc->sc_fb_base, 0x0, dma_size); /* Calculate actual FB Size */ sc->sc_fb_size = sc->sc_panel.panel_width*sc->sc_panel.panel_height*sc->sc_panel.bpp/8; /* Only raster mode is supported */ reg = CTRL_RASTER_MODE; div = am335x_lcd_calc_divisor(ref_freq, sc->sc_panel.panel_pxl_clk); reg |= (div << CTRL_DIV_SHIFT); LCD_WRITE4(sc, LCD_CTRL, reg); /* Set timing */ timing0 = timing1 = timing2 = 0; hbp = sc->sc_panel.panel_hbp - 1; hfp = sc->sc_panel.panel_hfp - 1; hsw = sc->sc_panel.panel_hsw - 1; vbp = sc->sc_panel.panel_vbp; vfp = sc->sc_panel.panel_vfp; vsw = sc->sc_panel.panel_vsw - 1; height = sc->sc_panel.panel_height - 1; width = sc->sc_panel.panel_width - 1; /* Horizontal back porch */ timing0 |= (hbp & 0xff) << RASTER_TIMING_0_HBP_SHIFT; timing2 |= ((hbp >> 8) & 3) << RASTER_TIMING_2_HBPHI_SHIFT; /* Horizontal front porch */ timing0 |= (hfp & 0xff) << RASTER_TIMING_0_HFP_SHIFT; timing2 |= ((hfp >> 8) & 3) << RASTER_TIMING_2_HFPHI_SHIFT; /* Horizontal sync width */ timing0 |= (hsw & 0x3f) << RASTER_TIMING_0_HSW_SHIFT; timing2 |= ((hsw >> 6) & 0xf) << RASTER_TIMING_2_HSWHI_SHIFT; /* Vertical back porch, front porch, sync width */ timing1 |= (vbp & 0xff) << RASTER_TIMING_1_VBP_SHIFT; timing1 |= (vfp & 0xff) << RASTER_TIMING_1_VFP_SHIFT; timing1 |= (vsw & 0x3f) << RASTER_TIMING_1_VSW_SHIFT; /* Pixels per line */ timing0 |= ((width >> 10) & 1) << RASTER_TIMING_0_PPLMSB_SHIFT; timing0 |= ((width >> 4) & 0x3f) << RASTER_TIMING_0_PPLLSB_SHIFT; /* Lines per panel */ timing1 |= (height & 0x3ff) << RASTER_TIMING_1_LPP_SHIFT; timing2 |= ((height >> 10 ) & 1) << RASTER_TIMING_2_LPP_B10_SHIFT; /* clock signal settings */ if (sc->sc_panel.sync_ctrl) timing2 |= RASTER_TIMING_2_PHSVS; if (sc->sc_panel.sync_edge) timing2 |= RASTER_TIMING_2_PHSVS_RISE; else timing2 |= RASTER_TIMING_2_PHSVS_FALL; if (sc->sc_panel.hsync_active == 0) timing2 |= RASTER_TIMING_2_IHS; if (sc->sc_panel.vsync_active == 0) timing2 |= RASTER_TIMING_2_IVS; if (sc->sc_panel.pixelclk_active == 0) timing2 |= RASTER_TIMING_2_IPC; /* AC bias */ timing2 |= (sc->sc_panel.ac_bias << RASTER_TIMING_2_ACB_SHIFT); timing2 |= (sc->sc_panel.ac_bias_intrpt << RASTER_TIMING_2_ACBI_SHIFT); LCD_WRITE4(sc, LCD_RASTER_TIMING_0, timing0); LCD_WRITE4(sc, LCD_RASTER_TIMING_1, timing1); LCD_WRITE4(sc, LCD_RASTER_TIMING_2, timing2); /* DMA settings */ reg = LCDDMA_CTRL_FB0_FB1; /* Find power of 2 for current burst size */ switch (sc->sc_panel.dma_burst_sz) { case 1: burst_log = 0; break; case 2: burst_log = 1; break; case 4: burst_log = 2; break; case 8: burst_log = 3; break; case 16: default: burst_log = 4; break; } reg |= (burst_log << LCDDMA_CTRL_BURST_SIZE_SHIFT); /* XXX: FIFO TH */ reg |= (0 << LCDDMA_CTRL_TH_FIFO_RDY_SHIFT); LCD_WRITE4(sc, LCD_LCDDMA_CTRL, reg); LCD_WRITE4(sc, LCD_LCDDMA_FB0_BASE, sc->sc_fb_phys); LCD_WRITE4(sc, LCD_LCDDMA_FB0_CEILING, sc->sc_fb_phys + sc->sc_fb_size - 1); LCD_WRITE4(sc, LCD_LCDDMA_FB1_BASE, sc->sc_fb_phys); LCD_WRITE4(sc, LCD_LCDDMA_FB1_CEILING, sc->sc_fb_phys + sc->sc_fb_size - 1); /* Enable LCD */ reg = RASTER_CTRL_LCDTFT; reg |= (sc->sc_panel.fdd << RASTER_CTRL_REQDLY_SHIFT); reg |= (PALETTE_DATA_ONLY << RASTER_CTRL_PALMODE_SHIFT); if (sc->sc_panel.bpp >= 24) reg |= RASTER_CTRL_TFT24; if (sc->sc_panel.bpp == 32) reg |= RASTER_CTRL_TFT24_UNPACKED; LCD_WRITE4(sc, LCD_RASTER_CTRL, reg); LCD_WRITE4(sc, LCD_CLKC_ENABLE, CLKC_ENABLE_DMA | CLKC_ENABLE_LDID | CLKC_ENABLE_CORE); LCD_WRITE4(sc, LCD_CLKC_RESET, CLKC_RESET_MAIN); DELAY(100); LCD_WRITE4(sc, LCD_CLKC_RESET, 0); reg = IRQ_EOF1 | IRQ_EOF0 | IRQ_FUF | IRQ_PL | IRQ_ACB | IRQ_SYNC_LOST | IRQ_RASTER_DONE | IRQ_FRAME_DONE; LCD_WRITE4(sc, LCD_IRQENABLE_SET, reg); reg = LCD_READ4(sc, LCD_RASTER_CTRL); reg |= RASTER_CTRL_LCDEN; LCD_WRITE4(sc, LCD_RASTER_CTRL, reg); LCD_WRITE4(sc, LCD_SYSCONFIG, SYSCONFIG_STANDBY_SMART | SYSCONFIG_IDLE_SMART); sc->sc_fb_info.fb_name = device_get_nameunit(sc->sc_dev); sc->sc_fb_info.fb_vbase = (intptr_t)sc->sc_fb_base; sc->sc_fb_info.fb_pbase = sc->sc_fb_phys; sc->sc_fb_info.fb_size = sc->sc_fb_size; sc->sc_fb_info.fb_bpp = sc->sc_fb_info.fb_depth = sc->sc_panel.bpp; sc->sc_fb_info.fb_stride = sc->sc_panel.panel_width*sc->sc_panel.bpp / 8; sc->sc_fb_info.fb_width = sc->sc_panel.panel_width; sc->sc_fb_info.fb_height = sc->sc_panel.panel_height; #ifdef DEV_SC err = (sc_attach_unit(device_get_unit(sc->sc_dev), device_get_flags(sc->sc_dev) | SC_AUTODETECT_KBD)); if (err) { device_printf(sc->sc_dev, "failed to attach syscons\n"); goto fail; } am335x_lcd_syscons_setup((vm_offset_t)sc->sc_fb_base, sc->sc_fb_phys, &panel); #else /* VT */ device_t fbd = device_add_child(sc->sc_dev, "fbd", device_get_unit(sc->sc_dev)); if (fbd != NULL) { if (device_probe_and_attach(fbd) != 0) device_printf(sc->sc_dev, "failed to attach fbd device\n"); } else device_printf(sc->sc_dev, "failed to add fbd child\n"); #endif done: return (err); } static void am335x_lcd_hdmi_event(void *arg, device_t hdmi, int event) { struct am335x_lcd_softc *sc; const struct videomode *videomode; struct videomode hdmi_mode; device_t hdmi_dev; uint8_t *edid; uint32_t edid_len; struct edid_info ei; sc = arg; /* Nothing to work with */ if (!sc->sc_hdmi_framer) { device_printf(sc->sc_dev, "HDMI event without HDMI framer set\n"); return; } hdmi_dev = OF_device_from_xref(sc->sc_hdmi_framer); if (!hdmi_dev) { device_printf(sc->sc_dev, "no actual device for \"hdmi\" property\n"); return; } edid = NULL; edid_len = 0; if (CRTC_GET_EDID(hdmi_dev, &edid, &edid_len) != 0) { device_printf(sc->sc_dev, "failed to get EDID info from HDMI framer\n"); return; } videomode = NULL; if (edid_parse(edid, &ei) == 0) { edid_print(&ei); videomode = am335x_lcd_pick_mode(&ei); } else device_printf(sc->sc_dev, "failed to parse EDID\n"); /* Use standard VGA as fallback */ if (videomode == NULL) videomode = pick_mode_by_ref(640, 480, 60); if (videomode == NULL) { device_printf(sc->sc_dev, "failed to find usable videomode"); return; } device_printf(sc->sc_dev, "detected videomode: %dx%d @ %dKHz\n", videomode->hdisplay, videomode->vdisplay, am335x_mode_vrefresh(videomode)); sc->sc_panel.panel_width = videomode->hdisplay; sc->sc_panel.panel_height = videomode->vdisplay; sc->sc_panel.panel_hfp = videomode->hsync_start - videomode->hdisplay; sc->sc_panel.panel_hbp = videomode->htotal - videomode->hsync_end; sc->sc_panel.panel_hsw = videomode->hsync_end - videomode->hsync_start; sc->sc_panel.panel_vfp = videomode->vsync_start - videomode->vdisplay; sc->sc_panel.panel_vbp = videomode->vtotal - videomode->vsync_end; sc->sc_panel.panel_vsw = videomode->vsync_end - videomode->vsync_start; sc->sc_panel.pixelclk_active = 1; /* logic for HSYNC should be reversed */ if (videomode->flags & VID_NHSYNC) sc->sc_panel.hsync_active = 1; else sc->sc_panel.hsync_active = 0; if (videomode->flags & VID_NVSYNC) sc->sc_panel.vsync_active = 0; else sc->sc_panel.vsync_active = 1; sc->sc_panel.panel_pxl_clk = videomode->dot_clock * 1000; am335x_lcd_configure(sc); memcpy(&hdmi_mode, videomode, sizeof(hdmi_mode)); hdmi_mode.hskew = videomode->hsync_end - videomode->hsync_start; hdmi_mode.flags |= VID_HSKEW; CRTC_SET_VIDEOMODE(hdmi_dev, &hdmi_mode); } static int am335x_lcd_probe(device_t dev) { #ifdef DEV_SC int err; #endif if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "ti,am33xx-tilcdc")) return (ENXIO); device_set_desc(dev, "AM335x LCD controller"); #ifdef DEV_SC err = sc_probe_unit(device_get_unit(dev), device_get_flags(dev) | SC_AUTODETECT_KBD); if (err != 0) return (err); #endif return (BUS_PROBE_DEFAULT); } static int am335x_lcd_attach(device_t dev) { struct am335x_lcd_softc *sc; int err; int rid; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; phandle_t root, panel_node; err = 0; sc = device_get_softc(dev); sc->sc_dev = dev; am335x_read_hdmi_property(dev); root = OF_finddevice("/"); if (root == -1) { device_printf(dev, "failed to get FDT root node\n"); return (ENXIO); } /* Fixme: Cant find any reference in DTS for dpll_disp_ck@498 for now. */ err = clk_get_by_name(dev, "dpll_disp_ck@498", &sc->sc_clk_dpll_disp_ck); if (err != 0) { device_printf(dev, "Cant get dpll_disp_ck@49\n"); return (ENXIO); } sc->sc_panel.ac_bias = 255; sc->sc_panel.ac_bias_intrpt = 0; sc->sc_panel.dma_burst_sz = 16; sc->sc_panel.bpp = 16; sc->sc_panel.fdd = 128; sc->sc_panel.sync_edge = 0; sc->sc_panel.sync_ctrl = 1; panel_node = fdt_find_compatible(root, "ti,tilcdc,panel", 1); if (panel_node != 0) { device_printf(dev, "using static panel info\n"); if (am335x_read_panel_info(dev, panel_node, &sc->sc_panel)) { device_printf(dev, "failed to read panel info\n"); return (ENXIO); } if (am335x_read_timing(dev, panel_node, &sc->sc_panel)) { device_printf(dev, "failed to read timings\n"); return (ENXIO); } } err = ti_sysc_clock_enable(device_get_parent(dev)); if (err != 0) { device_printf(dev, "Failed to enable sysc clkctrl, err %d\n", err); return (ENXIO); } rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "cannot allocate memory window\n"); return (ENXIO); } rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->sc_irq_res) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); device_printf(dev, "cannot allocate interrupt\n"); return (ENXIO); } if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, am335x_lcd_intr, sc, &sc->sc_intr_hl) != 0) { bus_release_resource(dev, SYS_RES_IRQ, rid, sc->sc_irq_res); bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->sc_mem_res); device_printf(dev, "Unable to setup the irq handler.\n"); return (ENXIO); } LCD_LOCK_INIT(sc); /* Init backlight interface */ ctx = device_get_sysctl_ctx(sc->sc_dev); tree = device_get_sysctl_tree(sc->sc_dev); sc->sc_oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "backlight", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, am335x_lcd_sysctl_backlight, "I", "LCD backlight"); sc->sc_backlight = 0; /* Check if eCAS interface is available at this point */ if (am335x_pwm_config_ecap(PWM_UNIT, PWM_PERIOD, PWM_PERIOD) == 0) sc->sc_backlight = 100; if (panel_node != 0) am335x_lcd_configure(sc); else sc->sc_hdmi_evh = EVENTHANDLER_REGISTER(hdmi_event, am335x_lcd_hdmi_event, sc, EVENTHANDLER_PRI_ANY); return (0); } static int am335x_lcd_detach(device_t dev) { /* Do not let unload driver */ return (EBUSY); } static struct fb_info * am335x_lcd_fb_getinfo(device_t dev) { struct am335x_lcd_softc *sc; sc = device_get_softc(dev); return (&sc->sc_fb_info); } static device_method_t am335x_lcd_methods[] = { DEVMETHOD(device_probe, am335x_lcd_probe), DEVMETHOD(device_attach, am335x_lcd_attach), DEVMETHOD(device_detach, am335x_lcd_detach), /* Framebuffer service methods */ DEVMETHOD(fb_getinfo, am335x_lcd_fb_getinfo), DEVMETHOD_END }; static driver_t am335x_lcd_driver = { "fb", am335x_lcd_methods, sizeof(struct am335x_lcd_softc), }; DRIVER_MODULE(am335x_lcd, simplebus, am335x_lcd_driver, 0, 0); MODULE_VERSION(am335x_lcd, 1); MODULE_DEPEND(am335x_lcd, simplebus, 1, 1, 1); MODULE_DEPEND(am335x_lcd, ti_sysc, 1, 1, 1); diff --git a/sys/arm/ti/am335x/am335x_musb.c b/sys/arm/ti/am335x/am335x_musb.c index 28058cc96899..b24f12990640 100644 --- a/sys/arm/ti/am335x/am335x_musb.c +++ b/sys/arm/ti/am335x/am335x_musb.c @@ -1,457 +1,457 @@ /*- * Copyright (c) 2013 Oleksandr Tymoshenko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define USB_DEBUG_VAR usbssdebug #include #include #include #include #include #include #include -#include +#include #include #include "syscon_if.h" #define USBCTRL_REV 0x00 #define USBCTRL_CTRL 0x14 #define USBCTRL_STAT 0x18 #define USBCTRL_IRQ_STAT0 0x30 #define IRQ_STAT0_RXSHIFT 16 #define IRQ_STAT0_TXSHIFT 0 #define USBCTRL_IRQ_STAT1 0x34 #define IRQ_STAT1_DRVVBUS (1 << 8) #define USBCTRL_INTEN_SET0 0x38 #define USBCTRL_INTEN_SET1 0x3C #define USBCTRL_INTEN_USB_ALL 0x1ff #define USBCTRL_INTEN_USB_SOF (1 << 3) #define USBCTRL_INTEN_CLR0 0x40 #define USBCTRL_INTEN_CLR1 0x44 #define USBCTRL_UTMI 0xE0 #define USBCTRL_UTMI_FSDATAEXT (1 << 1) #define USBCTRL_MODE 0xE8 #define USBCTRL_MODE_IDDIG (1 << 8) #define USBCTRL_MODE_IDDIGMUX (1 << 7) /* USBSS resource + 2 MUSB ports */ #define RES_USBCORE 0 #define RES_USBCTRL 1 #define USB_WRITE4(sc, idx, reg, val) do { \ bus_write_4((sc)->sc_mem_res[idx], (reg), (val)); \ } while (0) #define USB_READ4(sc, idx, reg) bus_read_4((sc)->sc_mem_res[idx], (reg)) #define USBCTRL_WRITE4(sc, reg, val) \ USB_WRITE4((sc), RES_USBCTRL, (reg), (val)) #define USBCTRL_READ4(sc, reg) \ USB_READ4((sc), RES_USBCTRL, (reg)) static struct resource_spec am335x_musbotg_mem_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_MEMORY, 1, RF_ACTIVE }, { -1, 0, 0 } }; #ifdef USB_DEBUG static int usbssdebug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, am335x_usbss, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "AM335x USBSS"); SYSCTL_INT(_hw_usb_am335x_usbss, OID_AUTO, debug, CTLFLAG_RW, &usbssdebug, 0, "Debug level"); #endif static device_probe_t musbotg_probe; static device_attach_t musbotg_attach; static device_detach_t musbotg_detach; struct musbotg_super_softc { struct musbotg_softc sc_otg; struct resource *sc_mem_res[2]; int sc_irq_rid; struct syscon *syscon; }; static void musbotg_vbus_poll(struct musbotg_super_softc *sc) { uint32_t stat; if (sc->sc_otg.sc_mode == MUSB2_DEVICE_MODE) musbotg_vbus_interrupt(&sc->sc_otg, 1); else { stat = USBCTRL_READ4(sc, USBCTRL_STAT); musbotg_vbus_interrupt(&sc->sc_otg, stat & 1); } } /* * Arg to musbotg_clocks_on and musbot_clocks_off is * a uint32_t * pointing to the SCM register offset. */ static uint32_t USB_CTRL[] = {SCM_USB_CTRL0, SCM_USB_CTRL1}; static void musbotg_clocks_on(void *arg) { struct musbotg_softc *sc; struct musbotg_super_softc *ssc; uint32_t reg; sc = arg; ssc = sc->sc_platform_data; reg = SYSCON_READ_4(ssc->syscon, USB_CTRL[sc->sc_id]); reg &= ~3; /* Enable power */ reg |= 1 << 19; /* VBUS detect enable */ reg |= 1 << 20; /* Session end enable */ SYSCON_WRITE_4(ssc->syscon, USB_CTRL[sc->sc_id], reg); } static void musbotg_clocks_off(void *arg) { struct musbotg_softc *sc; struct musbotg_super_softc *ssc; uint32_t reg; sc = arg; ssc = sc->sc_platform_data; /* Disable power to PHY */ reg = SYSCON_READ_4(ssc->syscon, USB_CTRL[sc->sc_id]); SYSCON_WRITE_4(ssc->syscon, USB_CTRL[sc->sc_id], reg | 3); } static void musbotg_ep_int_set(struct musbotg_softc *sc, int ep, int on) { struct musbotg_super_softc *ssc = sc->sc_platform_data; uint32_t epmask; epmask = ((1 << ep) << IRQ_STAT0_RXSHIFT); epmask |= ((1 << ep) << IRQ_STAT0_TXSHIFT); if (on) USBCTRL_WRITE4(ssc, USBCTRL_INTEN_SET0, epmask); else USBCTRL_WRITE4(ssc, USBCTRL_INTEN_CLR0, epmask); } static void musbotg_wrapper_interrupt(void *arg) { struct musbotg_softc *sc = arg; struct musbotg_super_softc *ssc = sc->sc_platform_data; uint32_t stat, stat0, stat1; stat = USBCTRL_READ4(ssc, USBCTRL_STAT); stat0 = USBCTRL_READ4(ssc, USBCTRL_IRQ_STAT0); stat1 = USBCTRL_READ4(ssc, USBCTRL_IRQ_STAT1); if (stat0) USBCTRL_WRITE4(ssc, USBCTRL_IRQ_STAT0, stat0); if (stat1) USBCTRL_WRITE4(ssc, USBCTRL_IRQ_STAT1, stat1); DPRINTFN(4, "port%d: stat0=%08x stat1=%08x, stat=%08x\n", sc->sc_id, stat0, stat1, stat); if (stat1 & IRQ_STAT1_DRVVBUS) musbotg_vbus_interrupt(sc, stat & 1); musbotg_interrupt(arg, ((stat0 >> 16) & 0xffff), stat0 & 0xffff, stat1 & 0xff); } static int musbotg_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "ti,musb-am33xx")) return (ENXIO); device_set_desc(dev, "TI AM33xx integrated USB OTG controller"); return (BUS_PROBE_DEFAULT); } static int musbotg_attach(device_t dev) { struct musbotg_super_softc *sc = device_get_softc(dev); char mode[16]; int err; uint32_t reg; phandle_t opp_table; clk_t clk_usbotg_fck; sc->sc_otg.sc_id = device_get_unit(dev); /* FIXME: The devicetree needs to be updated to get a handle to the gate * usbotg_fck@47c. see TRM 8.1.12.2 CM_WKUP CM_CLKDCOLDO_DPLL_PER. */ err = clk_get_by_name(dev, "usbotg_fck@47c", &clk_usbotg_fck); if (err) { device_printf(dev, "Can not find usbotg_fck@47c\n"); return (ENXIO); } err = clk_enable(clk_usbotg_fck); if (err) { device_printf(dev, "Can not enable usbotg_fck@47c\n"); return (ENXIO); } /* FIXME: For now; Go and kidnap syscon from opp-table */ opp_table = OF_finddevice("/opp-table"); if (opp_table == -1) { device_printf(dev, "Cant find /opp-table\n"); return (ENXIO); } if (!OF_hasprop(opp_table, "syscon")) { device_printf(dev, "/opp-table missing syscon property\n"); return (ENXIO); } err = syscon_get_by_ofw_property(dev, opp_table, "syscon", &sc->syscon); if (err) { device_printf(dev, "Failed to get syscon\n"); return (ENXIO); } /* Request the memory resources */ err = bus_alloc_resources(dev, am335x_musbotg_mem_spec, sc->sc_mem_res); if (err) { device_printf(dev, "Error: could not allocate mem resources\n"); return (ENXIO); } /* Request the IRQ resources */ sc->sc_otg.sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irq_rid, RF_ACTIVE); if (sc->sc_otg.sc_irq_res == NULL) { device_printf(dev, "Error: could not allocate irq resources\n"); return (ENXIO); } /* setup MUSB OTG USB controller interface softc */ sc->sc_otg.sc_clocks_on = &musbotg_clocks_on; sc->sc_otg.sc_clocks_off = &musbotg_clocks_off; sc->sc_otg.sc_clocks_arg = &sc->sc_otg; sc->sc_otg.sc_ep_int_set = musbotg_ep_int_set; /* initialise some bus fields */ sc->sc_otg.sc_bus.parent = dev; sc->sc_otg.sc_bus.devices = sc->sc_otg.sc_devices; sc->sc_otg.sc_bus.devices_max = MUSB2_MAX_DEVICES; sc->sc_otg.sc_bus.dma_bits = 32; /* get all DMA memory */ if (usb_bus_mem_alloc_all(&sc->sc_otg.sc_bus, USB_GET_DMA_TAG(dev), NULL)) { device_printf(dev, "Failed allocate bus mem for musb\n"); return (ENOMEM); } sc->sc_otg.sc_io_res = sc->sc_mem_res[RES_USBCORE]; sc->sc_otg.sc_io_tag = rman_get_bustag(sc->sc_otg.sc_io_res); sc->sc_otg.sc_io_hdl = rman_get_bushandle(sc->sc_otg.sc_io_res); sc->sc_otg.sc_io_size = rman_get_size(sc->sc_otg.sc_io_res); sc->sc_otg.sc_bus.bdev = device_add_child(dev, "usbus", -1); if (!(sc->sc_otg.sc_bus.bdev)) { device_printf(dev, "No busdev for musb\n"); goto error; } device_set_ivars(sc->sc_otg.sc_bus.bdev, &sc->sc_otg.sc_bus); err = bus_setup_intr(dev, sc->sc_otg.sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, (driver_intr_t *)musbotg_wrapper_interrupt, &sc->sc_otg, &sc->sc_otg.sc_intr_hdl); if (err) { sc->sc_otg.sc_intr_hdl = NULL; device_printf(dev, "Failed to setup interrupt for musb\n"); goto error; } sc->sc_otg.sc_platform_data = sc; if (OF_getprop(ofw_bus_get_node(dev), "dr_mode", mode, sizeof(mode)) > 0) { if (strcasecmp(mode, "host") == 0) sc->sc_otg.sc_mode = MUSB2_HOST_MODE; else sc->sc_otg.sc_mode = MUSB2_DEVICE_MODE; } else { /* Beaglebone defaults: USB0 device, USB1 HOST. */ if (sc->sc_otg.sc_id == 0) sc->sc_otg.sc_mode = MUSB2_DEVICE_MODE; else sc->sc_otg.sc_mode = MUSB2_HOST_MODE; } /* * software-controlled function */ if (sc->sc_otg.sc_mode == MUSB2_HOST_MODE) { reg = USBCTRL_READ4(sc, USBCTRL_MODE); reg |= USBCTRL_MODE_IDDIGMUX; reg &= ~USBCTRL_MODE_IDDIG; USBCTRL_WRITE4(sc, USBCTRL_MODE, reg); USBCTRL_WRITE4(sc, USBCTRL_UTMI, USBCTRL_UTMI_FSDATAEXT); } else { reg = USBCTRL_READ4(sc, USBCTRL_MODE); reg |= USBCTRL_MODE_IDDIGMUX; reg |= USBCTRL_MODE_IDDIG; USBCTRL_WRITE4(sc, USBCTRL_MODE, reg); } reg = USBCTRL_INTEN_USB_ALL & ~USBCTRL_INTEN_USB_SOF; USBCTRL_WRITE4(sc, USBCTRL_INTEN_SET1, reg); USBCTRL_WRITE4(sc, USBCTRL_INTEN_CLR0, 0xffffffff); err = musbotg_init(&sc->sc_otg); if (!err) err = device_probe_and_attach(sc->sc_otg.sc_bus.bdev); if (err) goto error; /* poll VBUS one time */ musbotg_vbus_poll(sc); return (0); error: musbotg_detach(dev); return (ENXIO); } static int musbotg_detach(device_t dev) { struct musbotg_super_softc *sc = device_get_softc(dev); /* during module unload there are lots of children leftover */ device_delete_children(dev); if (sc->sc_otg.sc_irq_res && sc->sc_otg.sc_intr_hdl) { /* * only call musbotg_uninit() after musbotg_init() */ musbotg_uninit(&sc->sc_otg); bus_teardown_intr(dev, sc->sc_otg.sc_irq_res, sc->sc_otg.sc_intr_hdl); sc->sc_otg.sc_intr_hdl = NULL; } usb_bus_mem_free_all(&sc->sc_otg.sc_bus, NULL); /* Free resources if any */ if (sc->sc_mem_res[0]) bus_release_resources(dev, am335x_musbotg_mem_spec, sc->sc_mem_res); if (sc->sc_otg.sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, sc->sc_otg.sc_irq_res); return (0); } static device_method_t musbotg_methods[] = { /* Device interface */ DEVMETHOD(device_probe, musbotg_probe), DEVMETHOD(device_attach, musbotg_attach), DEVMETHOD(device_detach, musbotg_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD_END }; static driver_t musbotg_driver = { .name = "musbotg", .methods = musbotg_methods, .size = sizeof(struct musbotg_super_softc), }; DRIVER_MODULE(musbotg, ti_sysc, musbotg_driver, 0, 0); MODULE_DEPEND(musbotg, ti_sysc, 1, 1, 1); MODULE_DEPEND(musbotg, ti_am3359_cppi41, 1, 1, 1); MODULE_DEPEND(usbss, usb, 1, 1, 1); diff --git a/sys/arm/ti/clk/clock_common.c b/sys/arm/ti/clk/clock_common.c index 78f60f609146..f8c9745dfc89 100644 --- a/sys/arm/ti/clk/clock_common.c +++ b/sys/arm/ti/clk/clock_common.c @@ -1,147 +1,147 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Oskar Holmlund * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include "clock_common.h" #if 0 #define DPRINTF(dev, msg...) device_printf(dev, msg) #else #define DPRINTF(dev, msg...) #endif void read_clock_cells(device_t dev, struct clock_cell_info *clk) { ssize_t numbytes_clocks; phandle_t node, parent, *cells; int index, ncells, rv; node = ofw_bus_get_node(dev); /* Get names of parent clocks */ numbytes_clocks = OF_getproplen(node, "clocks"); clk->num_clock_cells = numbytes_clocks / sizeof(cell_t); /* Allocate space and get clock cells content */ /* clock_cells / clock_cells_ncells will be freed in * find_parent_clock_names() */ clk->clock_cells = malloc(numbytes_clocks, M_DEVBUF, M_WAITOK|M_ZERO); clk->clock_cells_ncells = malloc(clk->num_clock_cells*sizeof(uint8_t), M_DEVBUF, M_WAITOK|M_ZERO); OF_getencprop(node, "clocks", clk->clock_cells, numbytes_clocks); /* Count number of clocks */ clk->num_real_clocks = 0; for (index = 0; index < clk->num_clock_cells; index++) { rv = ofw_bus_parse_xref_list_alloc(node, "clocks", "#clock-cells", clk->num_real_clocks, &parent, &ncells, &cells); if (rv != 0) continue; if (cells != NULL) OF_prop_free(cells); clk->clock_cells_ncells[index] = ncells; index += ncells; clk->num_real_clocks++; } } int find_parent_clock_names(device_t dev, struct clock_cell_info *clk, struct clknode_init_def *def) { int index, clock_index, err; bool found_all = true; clk_t parent; /* Figure out names */ for (index = 0, clock_index = 0; index < clk->num_clock_cells; index++) { /* Get name of parent clock */ err = clk_get_by_ofw_index(dev, 0, clock_index, &parent); if (err != 0) { clock_index++; found_all = false; DPRINTF(dev, "Failed to find clock_cells[%d]=0x%x\n", index, clk->clock_cells[index]); index += clk->clock_cells_ncells[index]; continue; } def->parent_names[clock_index] = clk_get_name(parent); clk_release(parent); DPRINTF(dev, "Found parent clock[%d/%d]: %s\n", clock_index, clk->num_real_clocks, def->parent_names[clock_index]); clock_index++; index += clk->clock_cells_ncells[index]; } if (!found_all) { return 1; } free(clk->clock_cells, M_DEVBUF); free(clk->clock_cells_ncells, M_DEVBUF); return 0; } void create_clkdef(device_t dev, struct clock_cell_info *clk, struct clknode_init_def *def) { def->id = 1; clk_parse_ofw_clk_name(dev, ofw_bus_get_node(dev), &def->name); DPRINTF(dev, "node name: %s\n", def->name); def->parent_cnt = clk->num_real_clocks; def->parent_names = malloc(clk->num_real_clocks*sizeof(char *), M_OFWPROP, M_WAITOK); } void free_clkdef(struct clknode_init_def *def) { OF_prop_free(__DECONST(char *, def->name)); OF_prop_free(def->parent_names); } diff --git a/sys/arm/ti/clk/ti_clk_clkctrl.c b/sys/arm/ti/clk/ti_clk_clkctrl.c index 1fc15e679051..037f02a64f0f 100644 --- a/sys/arm/ti/clk/ti_clk_clkctrl.c +++ b/sys/arm/ti/clk/ti_clk_clkctrl.c @@ -1,214 +1,214 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Oskar Holmlund * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include -#include +#include #include #include "clkdev_if.h" #if 0 #define DPRINTF(dev, msg...) device_printf(dev, msg) #else #define DPRINTF(dev, msg...) #endif /* * clknode for clkctrl, implements gate and mux (for gpioc) */ #define GPIO_X_GDBCLK_MASK 0x00040000 #define IDLEST_MASK 0x00030000 #define MODULEMODE_MASK 0x00000003 #define GPIOX_GDBCLK_ENABLE 0x00040000 #define GPIOX_GDBCLK_DISABLE 0x00000000 #define IDLEST_FUNC 0x00000000 #define IDLEST_TRANS 0x00010000 #define IDLEST_IDLE 0x00020000 #define IDLEST_DISABLE 0x00030000 #define MODULEMODE_DISABLE 0x0 #define MODULEMODE_ENABLE 0x2 struct ti_clkctrl_clknode_sc { device_t dev; bool gdbclk; /* omap4-cm range.host + ti,clkctrl reg[0] */ uint32_t register_offset; }; #define WRITE4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define READ4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) static int ti_clkctrl_init(struct clknode *clk, device_t dev) { struct ti_clkctrl_clknode_sc *sc; sc = clknode_get_softc(clk); sc->dev = dev; clknode_init_parent_idx(clk, 0); return (0); } static int ti_clkctrl_set_gdbclk_gate(struct clknode *clk, bool enable) { struct ti_clkctrl_clknode_sc *sc; uint32_t val, gpio_x_gdbclk; uint32_t timeout = 100; sc = clknode_get_softc(clk); READ4(clk, sc->register_offset, &val); DPRINTF(sc->dev, "val(%x) & (%x | %x = %x)\n", val, GPIO_X_GDBCLK_MASK, MODULEMODE_MASK, GPIO_X_GDBCLK_MASK | MODULEMODE_MASK); if (enable) { val = val & MODULEMODE_MASK; val |= GPIOX_GDBCLK_ENABLE; } else { val = val & MODULEMODE_MASK; val |= GPIOX_GDBCLK_DISABLE; } DPRINTF(sc->dev, "val %x\n", val); WRITE4(clk, sc->register_offset, val); /* Wait */ while (timeout) { READ4(clk, sc->register_offset, &val); gpio_x_gdbclk = val & GPIO_X_GDBCLK_MASK; if (enable && (gpio_x_gdbclk == GPIOX_GDBCLK_ENABLE)) break; else if (!enable && (gpio_x_gdbclk == GPIOX_GDBCLK_DISABLE)) break; DELAY(10); timeout--; } if (timeout == 0) { device_printf(sc->dev, "ti_clkctrl_set_gdbclk_gate: Timeout\n"); return (1); } return (0); } static int ti_clkctrl_set_gate(struct clknode *clk, bool enable) { struct ti_clkctrl_clknode_sc *sc; uint32_t val, idlest, module; uint32_t timeout=100; int err; sc = clknode_get_softc(clk); if (sc->gdbclk) { err = ti_clkctrl_set_gdbclk_gate(clk, enable); return (err); } READ4(clk, sc->register_offset, &val); if (enable) WRITE4(clk, sc->register_offset, MODULEMODE_ENABLE); else WRITE4(clk, sc->register_offset, MODULEMODE_DISABLE); while (timeout) { READ4(clk, sc->register_offset, &val); idlest = val & IDLEST_MASK; module = val & MODULEMODE_MASK; if (enable && (idlest == IDLEST_FUNC || idlest == IDLEST_TRANS) && module == MODULEMODE_ENABLE) break; else if (!enable && idlest == IDLEST_DISABLE && module == MODULEMODE_DISABLE) break; DELAY(10); timeout--; } if (timeout == 0) { device_printf(sc->dev, "ti_clkctrl_set_gate: Timeout\n"); return (1); } return (0); } static clknode_method_t ti_clkctrl_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, ti_clkctrl_init), CLKNODEMETHOD(clknode_set_gate, ti_clkctrl_set_gate), CLKNODEMETHOD_END }; DEFINE_CLASS_1(ti_clkctrl_clknode, ti_clkctrl_clknode_class, ti_clkctrl_clknode_methods, sizeof(struct ti_clkctrl_clknode_sc), clknode_class); int ti_clknode_clkctrl_register(struct clkdom *clkdom, struct ti_clk_clkctrl_def *clkdef) { struct clknode *clk; struct ti_clkctrl_clknode_sc *sc; clk = clknode_create(clkdom, &ti_clkctrl_clknode_class, &clkdef->clkdef); if (clk == NULL) { return (1); } sc = clknode_get_softc(clk); sc->register_offset = clkdef->register_offset; sc->gdbclk = clkdef->gdbclk; if (clknode_register(clkdom, clk) == NULL) { return (2); } return (0); } diff --git a/sys/arm/ti/clk/ti_clk_clkctrl.h b/sys/arm/ti/clk/ti_clk_clkctrl.h index 2ecda3e0d1f6..6410c0488bb9 100644 --- a/sys/arm/ti/clk/ti_clk_clkctrl.h +++ b/sys/arm/ti/clk/ti_clk_clkctrl.h @@ -1,41 +1,41 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Oskar Holmlund * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _TI_CLK_CLKCTRL_H_ #define _TI_CLK_CLKCTRL_H_ -#include +#include struct ti_clk_clkctrl_def { struct clknode_init_def clkdef; bool gdbclk; uint32_t register_offset; }; int ti_clknode_clkctrl_register(struct clkdom *clkdom, struct ti_clk_clkctrl_def *clkdef); #endif /* _TI_CLK_CLKCTRL_H_ */ diff --git a/sys/arm/ti/clk/ti_clk_dpll.c b/sys/arm/ti/clk/ti_clk_dpll.c index 0a23a2222c0e..c3d9d04b80c3 100644 --- a/sys/arm/ti/clk/ti_clk_dpll.c +++ b/sys/arm/ti/clk/ti_clk_dpll.c @@ -1,331 +1,331 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Emmanuel Vadot * * Copyright (c) 2020 Oskar Holmlund * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * based on sys/arm/allwinner/clkng/aw_clk_np.c */ #include #include #include -#include +#include #include #include "clkdev_if.h" /* * clknode for clocks matching the formula : * * clk = clkin * n / p * */ struct ti_dpll_clknode_sc { uint32_t ti_clkmode_offset; /* control */ uint8_t ti_clkmode_flags; uint32_t ti_idlest_offset; uint32_t ti_clksel_offset; /* mult-div1 */ struct ti_clk_factor n; /* ti_clksel_mult */ struct ti_clk_factor p; /* ti_clksel_div */ uint32_t ti_autoidle_offset; }; #define WRITE4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define READ4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) static int ti_dpll_clk_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } /* helper to keep aw_clk_np_find_best "intact" */ static inline uint32_t ti_clk_factor_get_max(struct ti_clk_factor *factor) { uint32_t max; if (factor->flags & TI_CLK_FACTOR_FIXED) max = factor->value; else { max = (1 << factor->width); } return (max); } static inline uint32_t ti_clk_factor_get_min(struct ti_clk_factor *factor) { uint32_t min; if (factor->flags & TI_CLK_FACTOR_FIXED) min = factor->value; else if (factor->flags & TI_CLK_FACTOR_ZERO_BASED) min = 0; else if (factor->flags & TI_CLK_FACTOR_MIN_VALUE) min = factor->min_value; else min = 1; return (min); } static uint64_t ti_dpll_clk_find_best(struct ti_dpll_clknode_sc *sc, uint64_t fparent, uint64_t *fout, uint32_t *factor_n, uint32_t *factor_p) { uint64_t cur, best; uint32_t n, p, max_n, max_p, min_n, min_p; *factor_n = *factor_p = 0; max_n = ti_clk_factor_get_max(&sc->n); max_p = ti_clk_factor_get_max(&sc->p); min_n = ti_clk_factor_get_min(&sc->n); min_p = ti_clk_factor_get_min(&sc->p); for (p = min_p; p <= max_p; ) { for (n = min_n; n <= max_n; ) { cur = fparent * n / p; if (abs(*fout - cur) < abs(*fout - best)) { best = cur; *factor_n = n; *factor_p = p; } n++; } p++; } return (best); } static inline uint32_t ti_clk_get_factor(uint32_t val, struct ti_clk_factor *factor) { uint32_t factor_val; if (factor->flags & TI_CLK_FACTOR_FIXED) return (factor->value); factor_val = (val & factor->mask) >> factor->shift; if (!(factor->flags & TI_CLK_FACTOR_ZERO_BASED)) factor_val += 1; return (factor_val); } static inline uint32_t ti_clk_factor_get_value(struct ti_clk_factor *factor, uint32_t raw) { uint32_t val; if (factor->flags & TI_CLK_FACTOR_FIXED) return (factor->value); if (factor->flags & TI_CLK_FACTOR_ZERO_BASED) val = raw; else if (factor->flags & TI_CLK_FACTOR_MAX_VALUE && raw > factor->max_value) val = factor->max_value; else val = raw - 1; return (val); } static int ti_dpll_clk_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout, int flags, int *stop) { struct ti_dpll_clknode_sc *sc; uint64_t cur, best; uint32_t val, n, p, best_n, best_p, timeout; sc = clknode_get_softc(clk); best = cur = 0; best = ti_dpll_clk_find_best(sc, fparent, fout, &best_n, &best_p); if ((flags & CLK_SET_DRYRUN) != 0) { *fout = best; *stop = 1; return (0); } if ((best < *fout) && (flags == CLK_SET_ROUND_DOWN)) { *stop = 1; return (ERANGE); } if ((best > *fout) && (flags == CLK_SET_ROUND_UP)) { *stop = 1; return (ERANGE); } DEVICE_LOCK(clk); /* 1 switch PLL to bypass mode */ WRITE4(clk, sc->ti_clkmode_offset, DPLL_EN_MN_BYPASS_MODE); /* 2 Ensure PLL is in bypass */ timeout = 10000; do { DELAY(10); READ4(clk, sc->ti_idlest_offset, &val); } while (!(val & ST_MN_BYPASS_MASK) && timeout--); if (timeout == 0) { DEVICE_UNLOCK(clk); return (ERANGE); // FIXME: Better return value? } /* 3 Set DPLL_MULT & DPLL_DIV bits */ READ4(clk, sc->ti_clksel_offset, &val); n = ti_clk_factor_get_value(&sc->n, best_n); p = ti_clk_factor_get_value(&sc->p, best_p); val &= ~sc->n.mask; val &= ~sc->p.mask; val |= n << sc->n.shift; val |= p << sc->p.shift; WRITE4(clk, sc->ti_clksel_offset, val); /* 4. configure M2, M4, M5 and M6 */ /* * FIXME: According to documentation M2/M4/M5/M6 can be set "later" * See note in TRM 8.1.6.7.1 */ /* 5 Switch over to lock mode */ WRITE4(clk, sc->ti_clkmode_offset, DPLL_EN_LOCK_MODE); /* 6 Ensure PLL is locked */ timeout = 10000; do { DELAY(10); READ4(clk, sc->ti_idlest_offset, &val); } while (!(val & ST_DPLL_CLK_MASK) && timeout--); DEVICE_UNLOCK(clk); if (timeout == 0) { return (ERANGE); // FIXME: Better return value? } *fout = best; *stop = 1; return (0); } static int ti_dpll_clk_recalc(struct clknode *clk, uint64_t *freq) { struct ti_dpll_clknode_sc *sc; uint32_t val, n, p; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->ti_clksel_offset, &val); DEVICE_UNLOCK(clk); n = ti_clk_get_factor(val, &sc->n); p = ti_clk_get_factor(val, &sc->p); *freq = *freq * n / p; return (0); } static clknode_method_t ti_dpll_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, ti_dpll_clk_init), CLKNODEMETHOD(clknode_recalc_freq, ti_dpll_clk_recalc), CLKNODEMETHOD(clknode_set_freq, ti_dpll_clk_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(ti_dpll_clknode, ti_dpll_clknode_class, ti_dpll_clknode_methods, sizeof(struct ti_dpll_clknode_sc), clknode_class); int ti_clknode_dpll_register(struct clkdom *clkdom, struct ti_clk_dpll_def *clkdef) { struct clknode *clk; struct ti_dpll_clknode_sc *sc; clk = clknode_create(clkdom, &ti_dpll_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->ti_clkmode_offset = clkdef->ti_clkmode_offset; sc->ti_clkmode_flags = clkdef->ti_clkmode_flags; sc->ti_idlest_offset = clkdef->ti_idlest_offset; sc->ti_clksel_offset = clkdef->ti_clksel_offset; sc->n.shift = clkdef->ti_clksel_mult.shift; sc->n.mask = clkdef->ti_clksel_mult.mask; sc->n.width = clkdef->ti_clksel_mult.width; sc->n.value = clkdef->ti_clksel_mult.value; sc->n.min_value = clkdef->ti_clksel_mult.min_value; sc->n.max_value = clkdef->ti_clksel_mult.max_value; sc->n.flags = clkdef->ti_clksel_mult.flags; sc->p.shift = clkdef->ti_clksel_div.shift; sc->p.mask = clkdef->ti_clksel_div.mask; sc->p.width = clkdef->ti_clksel_div.width; sc->p.value = clkdef->ti_clksel_div.value; sc->p.min_value = clkdef->ti_clksel_div.min_value; sc->p.max_value = clkdef->ti_clksel_div.max_value; sc->p.flags = clkdef->ti_clksel_div.flags; sc->ti_autoidle_offset = clkdef->ti_autoidle_offset; clknode_register(clkdom, clk); return (0); } diff --git a/sys/arm/ti/clk/ti_clk_dpll.h b/sys/arm/ti/clk/ti_clk_dpll.h index 7bb51cd5efbe..731df77828f6 100644 --- a/sys/arm/ti/clk/ti_clk_dpll.h +++ b/sys/arm/ti/clk/ti_clk_dpll.h @@ -1,94 +1,94 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2017 Emmanuel Vadot * * Copyright (c) 2020 Oskar Holmlund * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _TI_DPLL_CLOCK_H_ #define _TI_DPLL_CLOCK_H_ -#include +#include /* Registers are described in AM335x TRM chapter 8.1.12.2.* */ /* Register offsets */ #define CM_CLKSEL_DPLL_PERIPH 0x49C /* CM_IDLEST_DPLL_xxx */ #define ST_MN_BYPASS_MASK 0x0100 #define ST_MN_BYPASS_SHIFT 8 #define ST_DPLL_CLK_MASK 0x0001 /* CM_CLKMODE_DPLL_DPLL_EN feature flag */ #define LOW_POWER_STOP_MODE_FLAG 0x01 #define MN_BYPASS_MODE_FLAG 0x02 #define IDLE_BYPASS_LOW_POWER_MODE_FLAG 0x04 #define IDLE_BYPASS_FAST_RELOCK_MODE_FLAG 0x08 #define LOCK_MODE_FLAG 0x10 /* CM_CLKMODE_DPLL_xxx */ #define DPLL_EN_LOW_POWER_STOP_MODE 0x01 #define DPLL_EN_MN_BYPASS_MODE 0x04 #define DPLL_EN_IDLE_BYPASS_LOW_POWER_MODE 0x05 #define DPLL_EN_IDLE_BYPASS_FAST_RELOCK_MODE 0x06 #define DPLL_EN_LOCK_MODE 0x07 #define TI_CLK_FACTOR_ZERO_BASED 0x0002 #define TI_CLK_FACTOR_FIXED 0x0008 #define TI_CLK_FACTOR_MIN_VALUE 0x0020 #define TI_CLK_FACTOR_MAX_VALUE 0x0040 /* Based on aw_clk_factor sys/arm/allwinner/clkng/aw_clk.h */ struct ti_clk_factor { uint32_t shift; /* Shift bits for the factor */ uint32_t mask; /* Mask to get the factor */ uint32_t width; /* Number of bits for the factor */ uint32_t value; /* Fixed value */ uint32_t min_value; uint32_t max_value; uint32_t flags; /* Flags */ }; struct ti_clk_dpll_def { struct clknode_init_def clkdef; uint32_t ti_clkmode_offset; /* control */ uint8_t ti_clkmode_flags; uint32_t ti_idlest_offset; uint32_t ti_clksel_offset; /* mult-div1 */ struct ti_clk_factor ti_clksel_mult; struct ti_clk_factor ti_clksel_div; uint32_t ti_autoidle_offset; }; int ti_clknode_dpll_register(struct clkdom *clkdom, struct ti_clk_dpll_def *clkdef); #endif /* _TI_DPLL_CLOCK_H_ */ diff --git a/sys/arm/ti/clk/ti_divider_clock.c b/sys/arm/ti/clk/ti_divider_clock.c index c1b2e25a7651..ebe623762efc 100644 --- a/sys/arm/ti/clk/ti_divider_clock.c +++ b/sys/arm/ti/clk/ti_divider_clock.c @@ -1,257 +1,257 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Oskar Holmlund * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include "clock_common.h" #if 0 #define DPRINTF(dev, msg...) device_printf(dev, msg) #else #define DPRINTF(dev, msg...) #endif /* * Devicetree description * Documentation/devicetree/bindings/clock/ti/divider.txt */ struct ti_divider_softc { device_t sc_dev; bool attach_done; struct clk_div_def div_def; struct clock_cell_info clock_cell; struct clkdom *clkdom; }; static int ti_divider_probe(device_t dev); static int ti_divider_attach(device_t dev); static int ti_divider_detach(device_t dev); #define TI_DIVIDER_CLOCK 2 #define TI_COMPOSITE_DIVIDER_CLOCK 1 #define TI_DIVIDER_END 0 static struct ofw_compat_data compat_data[] = { { "ti,divider-clock", TI_DIVIDER_CLOCK }, { "ti,composite-divider-clock", TI_COMPOSITE_DIVIDER_CLOCK }, { NULL, TI_DIVIDER_END } }; static int register_clk(struct ti_divider_softc *sc) { int err; sc->clkdom = clkdom_create(sc->sc_dev); if (sc->clkdom == NULL) { DPRINTF(sc->sc_dev, "Failed to create clkdom\n"); return (ENXIO); } err = clknode_div_register(sc->clkdom, &sc->div_def); if (err) { DPRINTF(sc->sc_dev, "clknode_div_register failed %x\n", err); return (ENXIO); } err = clkdom_finit(sc->clkdom); if (err) { DPRINTF(sc->sc_dev, "Clk domain finit fails %x.\n", err); return (ENXIO); } return (0); } static int ti_divider_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "TI Divider Clock"); return (BUS_PROBE_DEFAULT); } static int ti_divider_attach(device_t dev) { struct ti_divider_softc *sc; phandle_t node; int err; cell_t value; uint32_t ti_max_div; sc = device_get_softc(dev); sc->sc_dev = dev; node = ofw_bus_get_node(dev); /* Grab the content of reg properties */ OF_getencprop(node, "reg", &value, sizeof(value)); sc->div_def.offset = value; if (OF_hasprop(node, "ti,bit-shift")) { OF_getencprop(node, "ti,bit-shift", &value, sizeof(value)); sc->div_def.i_shift = value; } if (OF_hasprop(node, "ti,index-starts-at-one")) { sc->div_def.div_flags = CLK_DIV_ZERO_BASED; } if (OF_hasprop(node, "ti,index-power-of-two")) { /* FIXME: later */ device_printf(sc->sc_dev, "ti,index-power-of-two - Not implemented\n"); /* remember to update i_width a few lines below */ } if (OF_hasprop(node, "ti,max-div")) { OF_getencprop(node, "ti,max-div", &value, sizeof(value)); ti_max_div = value; } if (OF_hasprop(node, "clock-output-names")) device_printf(sc->sc_dev, "clock-output-names\n"); if (OF_hasprop(node, "ti,dividers")) device_printf(sc->sc_dev, "ti,dividers\n"); if (OF_hasprop(node, "ti,min-div")) device_printf(sc->sc_dev, "ti,min-div - Not implemented\n"); if (OF_hasprop(node, "ti,autoidle-shift")) device_printf(sc->sc_dev, "ti,autoidle-shift - Not implemented\n"); if (OF_hasprop(node, "ti,set-rate-parent")) device_printf(sc->sc_dev, "ti,set-rate-parent - Not implemented\n"); if (OF_hasprop(node, "ti,latch-bit")) device_printf(sc->sc_dev, "ti,latch-bit - Not implemented\n"); /* Figure out the width from ti_max_div */ if (sc->div_def.div_flags) sc->div_def.i_width = fls(ti_max_div-1); else sc->div_def.i_width = fls(ti_max_div); DPRINTF(sc->sc_dev, "div_def.i_width %x\n", sc->div_def.i_width); read_clock_cells(sc->sc_dev, &sc->clock_cell); create_clkdef(sc->sc_dev, &sc->clock_cell, &sc->div_def.clkdef); err = find_parent_clock_names(sc->sc_dev, &sc->clock_cell, &sc->div_def.clkdef); if (err) { /* free_clkdef will be called in ti_divider_new_pass */ DPRINTF(sc->sc_dev, "find_parent_clock_names failed\n"); return (bus_generic_attach(sc->sc_dev)); } err = register_clk(sc); if (err) { /* free_clkdef will be called in ti_divider_new_pass */ DPRINTF(sc->sc_dev, "register_clk failed\n"); return (bus_generic_attach(sc->sc_dev)); } sc->attach_done = true; free_clkdef(&sc->div_def.clkdef); return (bus_generic_attach(sc->sc_dev)); } static int ti_divider_detach(device_t dev) { return (EBUSY); } static void ti_divider_new_pass(device_t dev) { struct ti_divider_softc *sc; int err; sc = device_get_softc(dev); if (sc->attach_done) { return; } err = find_parent_clock_names(sc->sc_dev, &sc->clock_cell, &sc->div_def.clkdef); if (err) { /* free_clkdef will be called in a later call to ti_divider_new_pass */ DPRINTF(sc->sc_dev, "new_pass find_parent_clock_names failed\n"); return; } err = register_clk(sc); if (err) { /* free_clkdef will be called in a later call to ti_divider_new_pass */ DPRINTF(sc->sc_dev, "new_pass register_clk failed\n"); return; } sc->attach_done = true; free_clkdef(&sc->div_def.clkdef); } static device_method_t ti_divider_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ti_divider_probe), DEVMETHOD(device_attach, ti_divider_attach), DEVMETHOD(device_detach, ti_divider_detach), /* Bus interface */ DEVMETHOD(bus_new_pass, ti_divider_new_pass), DEVMETHOD_END }; DEFINE_CLASS_0(ti_divider, ti_divider_driver, ti_divider_methods, sizeof(struct ti_divider_softc)); EARLY_DRIVER_MODULE(ti_divider, simplebus, ti_divider_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(ti_divider, 1); diff --git a/sys/arm/ti/clk/ti_dpll_clock.c b/sys/arm/ti/clk/ti_dpll_clock.c index b47fef83bc3f..84b86008b6d6 100644 --- a/sys/arm/ti/clk/ti_dpll_clock.c +++ b/sys/arm/ti/clk/ti_dpll_clock.c @@ -1,368 +1,368 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Oskar Holmlund * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "clock_common.h" #if 0 #define DPRINTF(dev, msg...) device_printf(dev, msg) #else #define DPRINTF(dev, msg...) #endif /* * Devicetree description * Documentation/devicetree/bindings/clock/ti/dpll.txt */ struct ti_dpll_softc { device_t dev; uint8_t dpll_type; bool attach_done; struct ti_clk_dpll_def dpll_def; struct clock_cell_info clock_cell; struct clkdom *clkdom; }; static int ti_dpll_probe(device_t dev); static int ti_dpll_attach(device_t dev); static int ti_dpll_detach(device_t dev); #define TI_OMAP3_DPLL_CLOCK 17 #define TI_OMAP3_DPLL_CORE_CLOCK 16 #define TI_OMAP3_DPLL_PER_CLOCK 15 #define TI_OMAP3_DPLL_PER_J_TYPE_CLOCK 14 #define TI_OMAP4_DPLL_CLOCK 13 #define TI_OMAP4_DPLL_X2_CLOCK 12 #define TI_OMAP4_DPLL_CORE_CLOCK 11 #define TI_OMAP4_DPLL_M4XEN_CLOCK 10 #define TI_OMAP4_DPLL_J_TYPE_CLOCK 9 #define TI_OMAP5_MPU_DPLL_CLOCK 8 #define TI_AM3_DPLL_NO_GATE_CLOCK 7 #define TI_AM3_DPLL_J_TYPE_CLOCK 6 #define TI_AM3_DPLL_NO_GATE_J_TYPE_CLOCK 5 #define TI_AM3_DPLL_CLOCK 4 #define TI_AM3_DPLL_CORE_CLOCK 3 #define TI_AM3_DPLL_X2_CLOCK 2 #define TI_OMAP2_DPLL_CORE_CLOCK 1 #define TI_DPLL_END 0 static struct ofw_compat_data compat_data[] = { { "ti,omap3-dpll-clock", TI_OMAP3_DPLL_CLOCK }, { "ti,omap3-dpll-core-clock", TI_OMAP3_DPLL_CORE_CLOCK }, { "ti,omap3-dpll-per-clock", TI_OMAP3_DPLL_PER_CLOCK }, { "ti,omap3-dpll-per-j-type-clock",TI_OMAP3_DPLL_PER_J_TYPE_CLOCK }, { "ti,omap4-dpll-clock", TI_OMAP4_DPLL_CLOCK }, { "ti,omap4-dpll-x2-clock", TI_OMAP4_DPLL_X2_CLOCK }, { "ti,omap4-dpll-core-clock", TI_OMAP4_DPLL_CORE_CLOCK }, { "ti,omap4-dpll-m4xen-clock", TI_OMAP4_DPLL_M4XEN_CLOCK }, { "ti,omap4-dpll-j-type-clock", TI_OMAP4_DPLL_J_TYPE_CLOCK }, { "ti,omap5-mpu-dpll-clock", TI_OMAP5_MPU_DPLL_CLOCK }, { "ti,am3-dpll-no-gate-clock", TI_AM3_DPLL_NO_GATE_CLOCK }, { "ti,am3-dpll-j-type-clock", TI_AM3_DPLL_J_TYPE_CLOCK }, { "ti,am3-dpll-no-gate-j-type-clock",TI_AM3_DPLL_NO_GATE_J_TYPE_CLOCK }, { "ti,am3-dpll-clock", TI_AM3_DPLL_CLOCK }, { "ti,am3-dpll-core-clock", TI_AM3_DPLL_CORE_CLOCK }, { "ti,am3-dpll-x2-clock", TI_AM3_DPLL_X2_CLOCK }, { "ti,omap2-dpll-core-clock", TI_OMAP2_DPLL_CORE_CLOCK }, { NULL, TI_DPLL_END } }; static int register_clk(struct ti_dpll_softc *sc) { int err; sc->clkdom = clkdom_create(sc->dev); if (sc->clkdom == NULL) { DPRINTF(sc->dev, "Failed to create clkdom\n"); return (ENXIO); } err = ti_clknode_dpll_register(sc->clkdom, &sc->dpll_def); if (err) { DPRINTF(sc->dev, "ti_clknode_dpll_register failed %x\n", err); return (ENXIO); } err = clkdom_finit(sc->clkdom); if (err) { DPRINTF(sc->dev, "Clk domain finit fails %x.\n", err); return (ENXIO); } return (0); } static int ti_dpll_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "TI DPLL Clock"); return (BUS_PROBE_DEFAULT); } static int parse_dpll_reg(struct ti_dpll_softc *sc) { ssize_t numbytes_regs; uint32_t num_regs; phandle_t node; cell_t reg_cells[4]; if (sc->dpll_type == TI_AM3_DPLL_X2_CLOCK || sc->dpll_type == TI_OMAP4_DPLL_X2_CLOCK) { sc->dpll_def.ti_clksel_mult.value = 2; sc->dpll_def.ti_clksel_mult.flags = TI_CLK_FACTOR_FIXED; sc->dpll_def.ti_clksel_div.value = 1; sc->dpll_def.ti_clksel_div.flags = TI_CLK_FACTOR_FIXED; return (0); } node = ofw_bus_get_node(sc->dev); numbytes_regs = OF_getproplen(node, "reg"); num_regs = numbytes_regs / sizeof(cell_t); /* Sanity check */ if (num_regs > 4) return (ENXIO); OF_getencprop(node, "reg", reg_cells, numbytes_regs); switch (sc->dpll_type) { case TI_AM3_DPLL_NO_GATE_CLOCK: case TI_AM3_DPLL_J_TYPE_CLOCK: case TI_AM3_DPLL_NO_GATE_J_TYPE_CLOCK: case TI_AM3_DPLL_CLOCK: case TI_AM3_DPLL_CORE_CLOCK: case TI_AM3_DPLL_X2_CLOCK: if (num_regs != 3) return (ENXIO); sc->dpll_def.ti_clkmode_offset = reg_cells[0]; sc->dpll_def.ti_idlest_offset = reg_cells[1]; sc->dpll_def.ti_clksel_offset = reg_cells[2]; break; case TI_OMAP2_DPLL_CORE_CLOCK: if (num_regs != 2) return (ENXIO); sc->dpll_def.ti_clkmode_offset = reg_cells[0]; sc->dpll_def.ti_clksel_offset = reg_cells[1]; break; default: sc->dpll_def.ti_clkmode_offset = reg_cells[0]; sc->dpll_def.ti_idlest_offset = reg_cells[1]; sc->dpll_def.ti_clksel_offset = reg_cells[2]; sc->dpll_def.ti_autoidle_offset = reg_cells[3]; break; } /* AM335x */ if (sc->dpll_def.ti_clksel_offset == CM_CLKSEL_DPLL_PERIPH) { sc->dpll_def.ti_clksel_mult.shift = 8; sc->dpll_def.ti_clksel_mult.mask = 0x000FFF00; sc->dpll_def.ti_clksel_mult.width = 12; sc->dpll_def.ti_clksel_mult.value = 0; sc->dpll_def.ti_clksel_mult.min_value = 2; sc->dpll_def.ti_clksel_mult.max_value = 4095; sc->dpll_def.ti_clksel_mult.flags = TI_CLK_FACTOR_ZERO_BASED | TI_CLK_FACTOR_MIN_VALUE | TI_CLK_FACTOR_MAX_VALUE; sc->dpll_def.ti_clksel_div.shift = 0; sc->dpll_def.ti_clksel_div.mask = 0x000000FF; sc->dpll_def.ti_clksel_div.width = 8; sc->dpll_def.ti_clksel_div.value = 0; sc->dpll_def.ti_clksel_div.min_value = 0; sc->dpll_def.ti_clksel_div.max_value = 255; sc->dpll_def.ti_clksel_div.flags = TI_CLK_FACTOR_MIN_VALUE | TI_CLK_FACTOR_MAX_VALUE; } else { sc->dpll_def.ti_clksel_mult.shift = 8; sc->dpll_def.ti_clksel_mult.mask = 0x0007FF00; sc->dpll_def.ti_clksel_mult.width = 11; sc->dpll_def.ti_clksel_mult.value = 0; sc->dpll_def.ti_clksel_mult.min_value = 2; sc->dpll_def.ti_clksel_mult.max_value = 2047; sc->dpll_def.ti_clksel_mult.flags = TI_CLK_FACTOR_ZERO_BASED | TI_CLK_FACTOR_MIN_VALUE | TI_CLK_FACTOR_MAX_VALUE; sc->dpll_def.ti_clksel_div.shift = 0; sc->dpll_def.ti_clksel_div.mask = 0x0000007F; sc->dpll_def.ti_clksel_div.width = 7; sc->dpll_def.ti_clksel_div.value = 0; sc->dpll_def.ti_clksel_div.min_value = 0; sc->dpll_def.ti_clksel_div.max_value = 127; sc->dpll_def.ti_clksel_div.flags = TI_CLK_FACTOR_MIN_VALUE | TI_CLK_FACTOR_MAX_VALUE; } DPRINTF(sc->dev, "clkmode %x idlest %x clksel %x autoidle %x\n", sc->dpll_def.ti_clkmode_offset, sc->dpll_def.ti_idlest_offset, sc->dpll_def.ti_clksel_offset, sc->dpll_def.ti_autoidle_offset); return (0); } static int ti_dpll_attach(device_t dev) { struct ti_dpll_softc *sc; phandle_t node; int err; sc = device_get_softc(dev); sc->dev = dev; sc->dpll_type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; node = ofw_bus_get_node(dev); /* Grab the content of reg properties */ parse_dpll_reg(sc); /* default flags (OMAP4&AM335x) not present in the dts at moment */ sc->dpll_def.ti_clkmode_flags = MN_BYPASS_MODE_FLAG | LOCK_MODE_FLAG; if (OF_hasprop(node, "ti,low-power-stop")) { sc->dpll_def.ti_clkmode_flags |= LOW_POWER_STOP_MODE_FLAG; } if (OF_hasprop(node, "ti,low-power-bypass")) { sc->dpll_def.ti_clkmode_flags |= IDLE_BYPASS_LOW_POWER_MODE_FLAG; } if (OF_hasprop(node, "ti,lock")) { sc->dpll_def.ti_clkmode_flags |= LOCK_MODE_FLAG; } read_clock_cells(sc->dev, &sc->clock_cell); create_clkdef(sc->dev, &sc->clock_cell, &sc->dpll_def.clkdef); err = find_parent_clock_names(sc->dev, &sc->clock_cell, &sc->dpll_def.clkdef); if (err) { /* free_clkdef will be called in ti_dpll_new_pass */ DPRINTF(sc->dev, "find_parent_clock_names failed\n"); return (bus_generic_attach(sc->dev)); } err = register_clk(sc); if (err) { /* free_clkdef will be called in ti_dpll_new_pass */ DPRINTF(sc->dev, "register_clk failed\n"); return (bus_generic_attach(sc->dev)); } sc->attach_done = true; free_clkdef(&sc->dpll_def.clkdef); return (bus_generic_attach(sc->dev)); } static int ti_dpll_detach(device_t dev) { return (EBUSY); } static void ti_dpll_new_pass(device_t dev) { struct ti_dpll_softc *sc; int err; sc = device_get_softc(dev); if (sc->attach_done) { return; } err = find_parent_clock_names(sc->dev, &sc->clock_cell, &sc->dpll_def.clkdef); if (err) { /* free_clkdef will be called in a later call to ti_dpll_new_pass */ DPRINTF(sc->dev, "new_pass find_parent_clock_names failed\n"); return; } err = register_clk(sc); if (err) { /* free_clkdef will be called in a later call to ti_dpll_new_pass */ DPRINTF(sc->dev, "new_pass register_clk failed\n"); return; } sc->attach_done = true; free_clkdef(&sc->dpll_def.clkdef); } static device_method_t ti_dpll_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ti_dpll_probe), DEVMETHOD(device_attach, ti_dpll_attach), DEVMETHOD(device_detach, ti_dpll_detach), /* Bus interface */ DEVMETHOD(bus_new_pass, ti_dpll_new_pass), DEVMETHOD_END }; DEFINE_CLASS_0(ti_dpll, ti_dpll_driver, ti_dpll_methods, sizeof(struct ti_dpll_softc)); EARLY_DRIVER_MODULE(ti_dpll, simplebus, ti_dpll_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(ti_dpll, 1); diff --git a/sys/arm/ti/clk/ti_gate_clock.c b/sys/arm/ti/clk/ti_gate_clock.c index cf33f06574b7..fc54a196fb5e 100644 --- a/sys/arm/ti/clk/ti_gate_clock.c +++ b/sys/arm/ti/clk/ti_gate_clock.c @@ -1,259 +1,259 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Oskar Holmlund * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include "clock_common.h" #define DEBUG_GATE 0 #if DEBUG_GATE #define DPRINTF(dev, msg...) device_printf(dev, msg) #else #define DPRINTF(dev, msg...) #endif /* * Devicetree description * Documentation/devicetree/bindings/clock/ti/gate.txt */ struct ti_gate_softc { device_t sc_dev; bool attach_done; uint8_t sc_type; struct clk_gate_def gate_def; struct clock_cell_info clock_cell; struct clkdom *clkdom; }; static int ti_gate_probe(device_t dev); static int ti_gate_attach(device_t dev); static int ti_gate_detach(device_t dev); #define TI_GATE_CLOCK 7 #define TI_WAIT_GATE_CLOCK 6 #define TI_DSS_GATE_CLOCK 5 #define TI_AM35XX_GATE_CLOCK 4 #define TI_CLKDM_GATE_CLOCK 3 #define TI_HSDIV_GATE_CLOCK 2 #define TI_COMPOSITE_NO_WAIT_GATE_CLOCK 1 #define TI_GATE_END 0 static struct ofw_compat_data compat_data[] = { { "ti,gate-clock", TI_GATE_CLOCK }, { "ti,wait-gate-clock", TI_WAIT_GATE_CLOCK }, { "ti,dss-gate-clock", TI_DSS_GATE_CLOCK }, { "ti,am35xx-gate-clock", TI_AM35XX_GATE_CLOCK }, { "ti,clkdm-gate-clock", TI_CLKDM_GATE_CLOCK }, { "ti,hsdiv-gate-cloc", TI_HSDIV_GATE_CLOCK }, { "ti,composite-no-wait-gate-clock", TI_COMPOSITE_NO_WAIT_GATE_CLOCK }, { NULL, TI_GATE_END } }; static int ti_gate_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "TI Gate Clock"); return (BUS_PROBE_DEFAULT); } static int register_clk(struct ti_gate_softc *sc) { int err; sc->clkdom = clkdom_create(sc->sc_dev); if (sc->clkdom == NULL) { DPRINTF(sc->sc_dev, "Failed to create clkdom\n"); return ENXIO; } err = clknode_gate_register(sc->clkdom, &sc->gate_def); if (err) { DPRINTF(sc->sc_dev, "clknode_gate_register failed %x\n", err); return ENXIO; } err = clkdom_finit(sc->clkdom); if (err) { DPRINTF(sc->sc_dev, "Clk domain finit fails %x.\n", err); return ENXIO; } return (0); } static int ti_gate_attach(device_t dev) { struct ti_gate_softc *sc; phandle_t node; int err; cell_t value; sc = device_get_softc(dev); sc->sc_dev = dev; node = ofw_bus_get_node(dev); /* Get the compatible type */ sc->sc_type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; /* Get the content of reg properties */ if (sc->sc_type != TI_CLKDM_GATE_CLOCK) { OF_getencprop(node, "reg", &value, sizeof(value)); sc->gate_def.offset = value; } #if DEBUG_GATE else { DPRINTF(sc->sc_dev, "no reg (TI_CLKDM_GATE_CLOCK)\n"); } #endif if (OF_hasprop(node, "ti,bit-shift")) { OF_getencprop(node, "ti,bit-shift", &value, sizeof(value)); sc->gate_def.shift = value; DPRINTF(sc->sc_dev, "ti,bit-shift => shift %x\n", sc->gate_def.shift); } if (OF_hasprop(node, "ti,set-bit-to-disable")) { sc->gate_def.on_value = 0; sc->gate_def.off_value = 1; DPRINTF(sc->sc_dev, "on_value = 0, off_value = 1 (ti,set-bit-to-disable)\n"); } else { sc->gate_def.on_value = 1; sc->gate_def.off_value = 0; DPRINTF(sc->sc_dev, "on_value = 1, off_value = 0\n"); } sc->gate_def.gate_flags = 0x0; read_clock_cells(sc->sc_dev, &sc->clock_cell); create_clkdef(sc->sc_dev, &sc->clock_cell, &sc->gate_def.clkdef); /* Calculate mask */ sc->gate_def.mask = (1 << fls(sc->clock_cell.num_real_clocks)) - 1; DPRINTF(sc->sc_dev, "num_real_clocks %x gate_def.mask %x\n", sc->clock_cell.num_real_clocks, sc->gate_def.mask); err = find_parent_clock_names(sc->sc_dev, &sc->clock_cell, &sc->gate_def.clkdef); if (err) { /* free_clkdef will be called in ti_gate_new_pass */ DPRINTF(sc->sc_dev, "find_parent_clock_names failed\n"); return (bus_generic_attach(sc->sc_dev)); } err = register_clk(sc); if (err) { /* free_clkdef will be called in ti_gate_new_pass */ DPRINTF(sc->sc_dev, "register_clk failed\n"); return (bus_generic_attach(sc->sc_dev)); } sc->attach_done = true; free_clkdef(&sc->gate_def.clkdef); return (bus_generic_attach(sc->sc_dev)); } static int ti_gate_detach(device_t dev) { return (EBUSY); } static void ti_gate_new_pass(device_t dev) { struct ti_gate_softc *sc; int err; sc = device_get_softc(dev); if (sc->attach_done) { return; } err = find_parent_clock_names(sc->sc_dev, &sc->clock_cell, &sc->gate_def.clkdef); if (err) { /* free_clkdef will be called in later call to ti_gate_new_pass */ DPRINTF(sc->sc_dev, "new_pass find_parent_clock_names failed\n"); return; } err = register_clk(sc); if (err) { /* free_clkdef will be called in later call to ti_gate_new_pass */ DPRINTF(sc->sc_dev, "new_pass register_clk failed\n"); return; } sc->attach_done = true; free_clkdef(&sc->gate_def.clkdef); } static device_method_t ti_gate_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ti_gate_probe), DEVMETHOD(device_attach, ti_gate_attach), DEVMETHOD(device_detach, ti_gate_detach), /* Bus interface */ DEVMETHOD(bus_new_pass, ti_gate_new_pass), DEVMETHOD_END }; DEFINE_CLASS_0(ti_gate, ti_gate_driver, ti_gate_methods, sizeof(struct ti_gate_softc)); EARLY_DRIVER_MODULE(ti_gate, simplebus, ti_gate_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(ti_gate, 1); diff --git a/sys/arm/ti/clk/ti_mux_clock.c b/sys/arm/ti/clk/ti_mux_clock.c index 684e95305038..a6c506fedce0 100644 --- a/sys/arm/ti/clk/ti_mux_clock.c +++ b/sys/arm/ti/clk/ti_mux_clock.c @@ -1,242 +1,242 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Oskar Holmlund * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include "clock_common.h" #if 0 #define DPRINTF(dev, msg...) device_printf(dev, msg) #else #define DPRINTF(dev, msg...) #endif /* * Devicetree description * Documentation/devicetree/bindings/clock/ti/mux.txt */ struct ti_mux_softc { device_t sc_dev; bool attach_done; struct clk_mux_def mux_def; struct clock_cell_info clock_cell; struct clkdom *clkdom; }; static int ti_mux_probe(device_t dev); static int ti_mux_attach(device_t dev); static int ti_mux_detach(device_t dev); #define TI_MUX_CLOCK 2 #define TI_COMPOSITE_MUX_CLOCK 1 #define TI_MUX_END 0 static struct ofw_compat_data compat_data[] = { { "ti,mux-clock", TI_MUX_CLOCK }, { "ti,composite-mux-clock", TI_COMPOSITE_MUX_CLOCK }, { NULL, TI_MUX_END } }; static int ti_mux_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "TI Mux Clock"); return (BUS_PROBE_DEFAULT); } static int register_clk(struct ti_mux_softc *sc) { int err; sc->clkdom = clkdom_create(sc->sc_dev); if (sc->clkdom == NULL) { DPRINTF(sc->sc_dev, "Failed to create clkdom\n"); return ENXIO; } err = clknode_mux_register(sc->clkdom, &sc->mux_def); if (err) { DPRINTF(sc->sc_dev, "clknode_mux_register failed %x\n", err); return ENXIO; } err = clkdom_finit(sc->clkdom); if (err) { DPRINTF(sc->sc_dev, "Clk domain finit fails %x.\n", err); return ENXIO; } return 0; } static int ti_mux_attach(device_t dev) { struct ti_mux_softc *sc; phandle_t node; int err; cell_t value; sc = device_get_softc(dev); sc->sc_dev = dev; node = ofw_bus_get_node(dev); /* Grab the content of reg properties */ OF_getencprop(node, "reg", &value, sizeof(value)); sc->mux_def.offset = value; if (OF_hasprop(node, "ti,bit-shift")) { OF_getencprop(node, "ti,bit-shift", &value, sizeof(value)); sc->mux_def.shift = value; DPRINTF(sc->sc_dev, "ti,bit-shift => shift %x\n", sc->mux_def.shift); } if (OF_hasprop(node, "ti,index-starts-at-one")) { /* FIXME: Add support in dev/extres/clk */ /*sc->mux_def.mux_flags = ... */ device_printf(sc->sc_dev, "ti,index-starts-at-one - Not implemented\n"); } if (OF_hasprop(node, "ti,set-rate-parent")) device_printf(sc->sc_dev, "ti,set-rate-parent - Not implemented\n"); if (OF_hasprop(node, "ti,latch-bit")) device_printf(sc->sc_dev, "ti,latch-bit - Not implemented\n"); read_clock_cells(sc->sc_dev, &sc->clock_cell); create_clkdef(sc->sc_dev, &sc->clock_cell, &sc->mux_def.clkdef); /* Figure out the width from ti_max_div */ if (sc->mux_def.mux_flags) sc->mux_def.width = fls(sc->clock_cell.num_real_clocks-1); else sc->mux_def.width = fls(sc->clock_cell.num_real_clocks); DPRINTF(sc->sc_dev, "sc->clock_cell.num_real_clocks %x def.width %x\n", sc->clock_cell.num_real_clocks, sc->mux_def.width); err = find_parent_clock_names(sc->sc_dev, &sc->clock_cell, &sc->mux_def.clkdef); if (err) { /* free_clkdef will be called in ti_mux_new_pass */ DPRINTF(sc->sc_dev, "find_parent_clock_names failed\n"); return (bus_generic_attach(sc->sc_dev)); } err = register_clk(sc); if (err) { /* free_clkdef will be called in ti_mux_new_pass */ DPRINTF(sc->sc_dev, "register_clk failed\n"); return (bus_generic_attach(sc->sc_dev)); } sc->attach_done = true; free_clkdef(&sc->mux_def.clkdef); return (bus_generic_attach(sc->sc_dev)); } static void ti_mux_new_pass(device_t dev) { struct ti_mux_softc *sc; int err; sc = device_get_softc(dev); if (sc->attach_done) { return; } err = find_parent_clock_names(sc->sc_dev, &sc->clock_cell, &sc->mux_def.clkdef); if (err) { /* free_clkdef will be called in later call to ti_mux_new_pass */ DPRINTF(sc->sc_dev, "ti_mux_new_pass find_parent_clock_names failed\n"); return; } err = register_clk(sc); if (err) { /* free_clkdef will be called in later call to ti_mux_new_pass */ DPRINTF(sc->sc_dev, "ti_mux_new_pass register_clk failed\n"); return; } sc->attach_done = true; free_clkdef(&sc->mux_def.clkdef); } static int ti_mux_detach(device_t dev) { return (EBUSY); } static device_method_t ti_mux_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ti_mux_probe), DEVMETHOD(device_attach, ti_mux_attach), DEVMETHOD(device_detach, ti_mux_detach), /* Bus interface */ DEVMETHOD(bus_new_pass, ti_mux_new_pass), DEVMETHOD_END }; DEFINE_CLASS_0(ti_mux, ti_mux_driver, ti_mux_methods, sizeof(struct ti_mux_softc)); EARLY_DRIVER_MODULE(ti_mux, simplebus, ti_mux_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(ti_mux, 1); diff --git a/sys/arm/ti/ti_pruss.c b/sys/arm/ti/ti_pruss.c index 378c892c95ec..85d075419fe8 100644 --- a/sys/arm/ti/ti_pruss.c +++ b/sys/arm/ti/ti_pruss.c @@ -1,846 +1,846 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013 Rui Paulo * Copyright (c) 2017 Manuel Stuehn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #ifdef DEBUG #define DPRINTF(fmt, ...) do { \ printf("%s: ", __func__); \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(fmt, ...) #endif static d_open_t ti_pruss_irq_open; static d_read_t ti_pruss_irq_read; static d_poll_t ti_pruss_irq_poll; static device_probe_t ti_pruss_probe; static device_attach_t ti_pruss_attach; static device_detach_t ti_pruss_detach; static void ti_pruss_intr(void *); static d_open_t ti_pruss_open; static d_mmap_t ti_pruss_mmap; static void ti_pruss_irq_kqread_detach(struct knote *); static int ti_pruss_irq_kqevent(struct knote *, long); static d_kqfilter_t ti_pruss_irq_kqfilter; static void ti_pruss_privdtor(void *data); #define TI_PRUSS_PRU_IRQS 2 #define TI_PRUSS_HOST_IRQS 8 #define TI_PRUSS_IRQS (TI_PRUSS_HOST_IRQS+TI_PRUSS_PRU_IRQS) #define TI_PRUSS_EVENTS 64 #define NOT_SET_STR "NONE" #define TI_TS_ARRAY 16 struct ctl { size_t cnt; size_t idx; }; struct ts_ring_buf { struct ctl ctl; uint64_t ts[TI_TS_ARRAY]; }; struct ti_pruss_irqsc { struct mtx sc_mtx; struct cdev *sc_pdev; struct selinfo sc_selinfo; int8_t channel; int8_t last; int8_t event; bool enable; struct ts_ring_buf tstamps; }; static struct cdevsw ti_pruss_cdevirq = { .d_version = D_VERSION, .d_name = "ti_pruss_irq", .d_open = ti_pruss_irq_open, .d_read = ti_pruss_irq_read, .d_poll = ti_pruss_irq_poll, .d_kqfilter = ti_pruss_irq_kqfilter, }; struct ti_pruss_softc { struct mtx sc_mtx; struct resource *sc_mem_res; struct resource *sc_irq_res[TI_PRUSS_HOST_IRQS]; void *sc_intr[TI_PRUSS_HOST_IRQS]; struct ti_pruss_irqsc sc_irq_devs[TI_PRUSS_IRQS]; bus_space_tag_t sc_bt; bus_space_handle_t sc_bh; struct cdev *sc_pdev; struct selinfo sc_selinfo; bool sc_glob_irqen; }; static struct cdevsw ti_pruss_cdevsw = { .d_version = D_VERSION, .d_name = "ti_pruss", .d_open = ti_pruss_open, .d_mmap = ti_pruss_mmap, }; static device_method_t ti_pruss_methods[] = { DEVMETHOD(device_probe, ti_pruss_probe), DEVMETHOD(device_attach, ti_pruss_attach), DEVMETHOD(device_detach, ti_pruss_detach), DEVMETHOD_END }; static driver_t ti_pruss_driver = { "ti_pruss", ti_pruss_methods, sizeof(struct ti_pruss_softc) }; DRIVER_MODULE(ti_pruss, simplebus, ti_pruss_driver, 0, 0); MODULE_DEPEND(ti_pruss, ti_sysc, 1, 1, 1); MODULE_DEPEND(ti_pruss, ti_prm, 1, 1, 1); static struct resource_spec ti_pruss_irq_spec[] = { { SYS_RES_IRQ, 0, RF_ACTIVE }, { SYS_RES_IRQ, 1, RF_ACTIVE }, { SYS_RES_IRQ, 2, RF_ACTIVE }, { SYS_RES_IRQ, 3, RF_ACTIVE }, { SYS_RES_IRQ, 4, RF_ACTIVE }, { SYS_RES_IRQ, 5, RF_ACTIVE }, { SYS_RES_IRQ, 6, RF_ACTIVE }, { SYS_RES_IRQ, 7, RF_ACTIVE }, { -1, 0, 0 } }; CTASSERT(TI_PRUSS_HOST_IRQS == nitems(ti_pruss_irq_spec) - 1); static int ti_pruss_irq_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct ctl* irqs; struct ti_pruss_irqsc *sc; sc = dev->si_drv1; irqs = malloc(sizeof(struct ctl), M_DEVBUF, M_WAITOK); if (!irqs) return (ENOMEM); irqs->cnt = sc->tstamps.ctl.cnt; irqs->idx = sc->tstamps.ctl.idx; return devfs_set_cdevpriv(irqs, ti_pruss_privdtor); } static void ti_pruss_privdtor(void *data) { free(data, M_DEVBUF); } static int ti_pruss_irq_poll(struct cdev *dev, int events, struct thread *td) { struct ctl* irqs; struct ti_pruss_irqsc *sc; sc = dev->si_drv1; devfs_get_cdevpriv((void**)&irqs); if (events & (POLLIN | POLLRDNORM)) { if (sc->tstamps.ctl.cnt != irqs->cnt) return events & (POLLIN | POLLRDNORM); else selrecord(td, &sc->sc_selinfo); } return 0; } static int ti_pruss_irq_read(struct cdev *cdev, struct uio *uio, int ioflag) { const size_t ts_len = sizeof(uint64_t); struct ti_pruss_irqsc* irq; struct ctl* priv; int error = 0; size_t idx; ssize_t level; irq = cdev->si_drv1; if (uio->uio_resid < ts_len) return (EINVAL); error = devfs_get_cdevpriv((void**)&priv); if (error) return (error); mtx_lock(&irq->sc_mtx); if (irq->tstamps.ctl.cnt - priv->cnt > TI_TS_ARRAY) { priv->cnt = irq->tstamps.ctl.cnt; priv->idx = irq->tstamps.ctl.idx; mtx_unlock(&irq->sc_mtx); return (ENXIO); } do { idx = priv->idx; level = irq->tstamps.ctl.idx - idx; if (level < 0) level += TI_TS_ARRAY; if (level == 0) { if (ioflag & O_NONBLOCK) { mtx_unlock(&irq->sc_mtx); return (EWOULDBLOCK); } error = msleep(irq, &irq->sc_mtx, PCATCH | PDROP, "pruirq", 0); if (error) return error; mtx_lock(&irq->sc_mtx); } }while(level == 0); mtx_unlock(&irq->sc_mtx); error = uiomove(&irq->tstamps.ts[idx], ts_len, uio); if (++idx == TI_TS_ARRAY) idx = 0; priv->idx = idx; atomic_add_32(&priv->cnt, 1); return (error); } static struct ti_pruss_irq_arg { int irq; struct ti_pruss_softc *sc; } ti_pruss_irq_args[TI_PRUSS_IRQS]; static __inline uint32_t ti_pruss_reg_read(struct ti_pruss_softc *sc, uint32_t reg) { return (bus_space_read_4(sc->sc_bt, sc->sc_bh, reg)); } static __inline void ti_pruss_reg_write(struct ti_pruss_softc *sc, uint32_t reg, uint32_t val) { bus_space_write_4(sc->sc_bt, sc->sc_bh, reg, val); } static __inline void ti_pruss_interrupts_clear(struct ti_pruss_softc *sc) { /* disable global interrupt */ ti_pruss_reg_write(sc, PRUSS_INTC_GER, 0 ); /* clear all events */ ti_pruss_reg_write(sc, PRUSS_INTC_SECR0, 0xFFFFFFFF); ti_pruss_reg_write(sc, PRUSS_INTC_SECR1, 0xFFFFFFFF); /* disable all host interrupts */ ti_pruss_reg_write(sc, PRUSS_INTC_HIER, 0); } static __inline int ti_pruss_interrupts_enable(struct ti_pruss_softc *sc, int8_t irq, bool enable) { if (enable && ((sc->sc_irq_devs[irq].channel == -1) || (sc->sc_irq_devs[irq].event== -1))) { device_printf( sc->sc_pdev->si_drv1, "Interrupt chain not fully configured, not possible to enable\n" ); return (EINVAL); } sc->sc_irq_devs[irq].enable = enable; if (sc->sc_irq_devs[irq].sc_pdev) { destroy_dev(sc->sc_irq_devs[irq].sc_pdev); sc->sc_irq_devs[irq].sc_pdev = NULL; } if (enable) { sc->sc_irq_devs[irq].sc_pdev = make_dev(&ti_pruss_cdevirq, 0, UID_ROOT, GID_WHEEL, 0600, "pruss%d.irq%d", device_get_unit(sc->sc_pdev->si_drv1), irq); sc->sc_irq_devs[irq].sc_pdev->si_drv1 = &sc->sc_irq_devs[irq]; sc->sc_irq_devs[irq].tstamps.ctl.idx = 0; } uint32_t reg = enable ? PRUSS_INTC_HIEISR : PRUSS_INTC_HIDISR; ti_pruss_reg_write(sc, reg, sc->sc_irq_devs[irq].channel); reg = enable ? PRUSS_INTC_EISR : PRUSS_INTC_EICR; ti_pruss_reg_write(sc, reg, sc->sc_irq_devs[irq].event ); return (0); } static __inline void ti_pruss_map_write(struct ti_pruss_softc *sc, uint32_t basereg, uint8_t index, uint8_t content) { const size_t regadr = basereg + index & ~0x03; const size_t bitpos = (index & 0x03) * 8; uint32_t rmw = ti_pruss_reg_read(sc, regadr); rmw = (rmw & ~( 0xF << bitpos)) | ( (content & 0xF) << bitpos); ti_pruss_reg_write(sc, regadr, rmw); } static int ti_pruss_event_map( SYSCTL_HANDLER_ARGS ) { struct ti_pruss_softc *sc; const int8_t irq = arg2; int err; char event[sizeof(NOT_SET_STR)]; sc = arg1; if(sc->sc_irq_devs[irq].event == -1) bcopy(NOT_SET_STR, event, sizeof(event)); else snprintf(event, sizeof(event), "%d", sc->sc_irq_devs[irq].event); err = sysctl_handle_string(oidp, event, sizeof(event), req); if(err != 0) return (err); if (req->newptr) { // write event if (strcmp(NOT_SET_STR, event) == 0) { ti_pruss_interrupts_enable(sc, irq, false); sc->sc_irq_devs[irq].event = -1; } else { if (sc->sc_irq_devs[irq].channel == -1) { device_printf( sc->sc_pdev->si_drv1, "corresponding channel not configured\n"); return (ENXIO); } const int8_t channelnr = sc->sc_irq_devs[irq].channel; const int8_t eventnr = strtol( event, NULL, 10 ); // TODO: check if strol is valid if (eventnr > TI_PRUSS_EVENTS || eventnr < 0) { device_printf( sc->sc_pdev->si_drv1, "Event number %d not valid (0 - %d)", channelnr, TI_PRUSS_EVENTS -1); return (EINVAL); } sc->sc_irq_devs[irq].channel = channelnr; sc->sc_irq_devs[irq].event = eventnr; // event[nr] <= channel ti_pruss_map_write(sc, PRUSS_INTC_CMR_BASE, eventnr, channelnr); } } return (err); } static int ti_pruss_channel_map(SYSCTL_HANDLER_ARGS) { struct ti_pruss_softc *sc; int err; char channel[sizeof(NOT_SET_STR)]; const int8_t irq = arg2; sc = arg1; if (sc->sc_irq_devs[irq].channel == -1) bcopy(NOT_SET_STR, channel, sizeof(channel)); else snprintf(channel, sizeof(channel), "%d", sc->sc_irq_devs[irq].channel); err = sysctl_handle_string(oidp, channel, sizeof(channel), req); if (err != 0) return (err); if (req->newptr) { // write event if (strcmp(NOT_SET_STR, channel) == 0) { ti_pruss_interrupts_enable(sc, irq, false); ti_pruss_reg_write(sc, PRUSS_INTC_HIDISR, sc->sc_irq_devs[irq].channel); sc->sc_irq_devs[irq].channel = -1; } else { const int8_t channelnr = strtol(channel, NULL, 10); // TODO: check if strol is valid if (channelnr > TI_PRUSS_IRQS || channelnr < 0) { device_printf(sc->sc_pdev->si_drv1, "Channel number %d not valid (0 - %d)", channelnr, TI_PRUSS_IRQS-1); return (EINVAL); } sc->sc_irq_devs[irq].channel = channelnr; sc->sc_irq_devs[irq].last = -1; // channel[nr] <= irqnr ti_pruss_map_write(sc, PRUSS_INTC_HMR_BASE, irq, channelnr); } } return (err); } static int ti_pruss_interrupt_enable(SYSCTL_HANDLER_ARGS) { struct ti_pruss_softc *sc; int err; bool irqenable; const int8_t irq = arg2; sc = arg1; irqenable = sc->sc_irq_devs[arg2].enable; err = sysctl_handle_bool(oidp, &irqenable, arg2, req); if (err != 0) return (err); if (req->newptr) // write enable return ti_pruss_interrupts_enable(sc, irq, irqenable); return (err); } static int ti_pruss_global_interrupt_enable(SYSCTL_HANDLER_ARGS) { struct ti_pruss_softc *sc; int err; bool glob_irqen; sc = arg1; glob_irqen = sc->sc_glob_irqen; err = sysctl_handle_bool(oidp, &glob_irqen, arg2, req); if (err != 0) return (err); if (req->newptr) { sc->sc_glob_irqen = glob_irqen; ti_pruss_reg_write(sc, PRUSS_INTC_GER, glob_irqen); } return (err); } static int ti_pruss_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "ti,pruss-v1") || ofw_bus_is_compatible(dev, "ti,pruss-v2")) { device_set_desc(dev, "TI Programmable Realtime Unit Subsystem"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int ti_pruss_attach(device_t dev) { struct ti_pruss_softc *sc; int rid, i, err, ncells; phandle_t node; clk_t l3_gclk, pruss_ocp_gclk; phandle_t ti_prm_ref, *cells; device_t ti_prm_dev; rid = 0; sc = device_get_softc(dev); node = ofw_bus_get_node(device_get_parent(dev)); if (node <= 0) { device_printf(dev, "Cant get ofw node\n"); return (ENXIO); } /* * Follow activate pattern from sys/arm/ti/am335x/am335x_prcm.c * by Damjan Marion */ /* Set MODULEMODE to ENABLE(2) */ /* Wait for MODULEMODE to become ENABLE(2) */ if (ti_sysc_clock_enable(device_get_parent(dev)) != 0) { device_printf(dev, "Could not enable PRUSS clock\n"); return (ENXIO); } /* Set CLKTRCTRL to SW_WKUP(2) */ /* Wait for the 200 MHz OCP clock to become active */ /* Wait for the 200 MHz IEP clock to become active */ /* Wait for the 192 MHz UART clock to become active */ /* * At the moment there is no reference to CM_PER_PRU_ICSS_CLKSTCTRL@140 * in the devicetree. The register reset state are SW_WKUP(2) as default * so at the moment ignore setting this register. */ /* Select L3F as OCP clock */ /* Get the clock and set the parent */ err = clk_get_by_name(dev, "l3_gclk", &l3_gclk); if (err) { device_printf(dev, "Cant get l3_gclk err %d\n", err); return (ENXIO); } err = clk_get_by_name(dev, "pruss_ocp_gclk@530", &pruss_ocp_gclk); if (err) { device_printf(dev, "Cant get pruss_ocp_gclk@530 err %d\n", err); return (ENXIO); } err = clk_set_parent_by_clk(pruss_ocp_gclk, l3_gclk); if (err) { device_printf(dev, "Cant set pruss_ocp_gclk parent to l3_gclk err %d\n", err); return (ENXIO); } /* Clear the RESET bit */ /* Find the ti_prm */ /* #reset-cells should not been used in this way but... */ err = ofw_bus_parse_xref_list_alloc(node, "resets", "#reset-cells", 0, &ti_prm_ref, &ncells, &cells); OF_prop_free(cells); if (err) { device_printf(dev, "Cant fetch \"resets\" reference %x\n", err); return (ENXIO); } ti_prm_dev = OF_device_from_xref(ti_prm_ref); if (ti_prm_dev == NULL) { device_printf(dev, "Cant get device from \"resets\"\n"); return (ENXIO); } err = ti_prm_reset(ti_prm_dev); if (err) { device_printf(dev, "ti_prm_reset failed %d\n", err); return (ENXIO); } /* End of clock activation */ mtx_init(&sc->sc_mtx, "TI PRUSS", NULL, MTX_DEF); sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_mem_res == NULL) { device_printf(dev, "could not allocate memory resource\n"); return (ENXIO); } struct sysctl_ctx_list *clist = device_get_sysctl_ctx(dev); if (!clist) return (EINVAL); struct sysctl_oid *poid; poid = device_get_sysctl_tree( dev ); if (!poid) return (EINVAL); sc->sc_glob_irqen = false; struct sysctl_oid *irq_root = SYSCTL_ADD_NODE(clist, SYSCTL_CHILDREN(poid), OID_AUTO, "irq", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "PRUSS Host Interrupts"); SYSCTL_ADD_PROC(clist, SYSCTL_CHILDREN(poid), OID_AUTO, "global_interrupt_enable", CTLFLAG_RW | CTLTYPE_U8 | CTLFLAG_NEEDGIANT, sc, 0, ti_pruss_global_interrupt_enable, "CU", "Global interrupt enable"); sc->sc_bt = rman_get_bustag(sc->sc_mem_res); sc->sc_bh = rman_get_bushandle(sc->sc_mem_res); if (bus_alloc_resources(dev, ti_pruss_irq_spec, sc->sc_irq_res) != 0) { device_printf(dev, "could not allocate interrupt resource\n"); ti_pruss_detach(dev); return (ENXIO); } ti_pruss_interrupts_clear(sc); for (i = 0; i < TI_PRUSS_IRQS; i++) { char name[8]; snprintf(name, sizeof(name), "%d", i); struct sysctl_oid *irq_nodes = SYSCTL_ADD_NODE(clist, SYSCTL_CHILDREN(irq_root), OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "PRUSS Interrupts"); SYSCTL_ADD_PROC(clist, SYSCTL_CHILDREN(irq_nodes), OID_AUTO, "channel", CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_NEEDGIANT, sc, i, ti_pruss_channel_map, "A", "Channel attached to this irq"); SYSCTL_ADD_PROC(clist, SYSCTL_CHILDREN(irq_nodes), OID_AUTO, "event", CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_NEEDGIANT, sc, i, ti_pruss_event_map, "A", "Event attached to this irq"); SYSCTL_ADD_PROC(clist, SYSCTL_CHILDREN(irq_nodes), OID_AUTO, "enable", CTLFLAG_RW | CTLTYPE_U8 | CTLFLAG_NEEDGIANT, sc, i, ti_pruss_interrupt_enable, "CU", "Enable/Disable interrupt"); sc->sc_irq_devs[i].event = -1; sc->sc_irq_devs[i].channel = -1; sc->sc_irq_devs[i].tstamps.ctl.idx = 0; if (i < TI_PRUSS_HOST_IRQS) { ti_pruss_irq_args[i].irq = i; ti_pruss_irq_args[i].sc = sc; if (bus_setup_intr(dev, sc->sc_irq_res[i], INTR_MPSAFE | INTR_TYPE_MISC, NULL, ti_pruss_intr, &ti_pruss_irq_args[i], &sc->sc_intr[i]) != 0) { device_printf(dev, "unable to setup the interrupt handler\n"); ti_pruss_detach(dev); return (ENXIO); } mtx_init(&sc->sc_irq_devs[i].sc_mtx, "TI PRUSS IRQ", NULL, MTX_DEF); knlist_init_mtx(&sc->sc_irq_devs[i].sc_selinfo.si_note, &sc->sc_irq_devs[i].sc_mtx); } } if (ti_pruss_reg_read(sc, PRUSS_AM33XX_INTC) == PRUSS_AM33XX_REV) device_printf(dev, "AM33xx PRU-ICSS\n"); sc->sc_pdev = make_dev(&ti_pruss_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "pruss%d", device_get_unit(dev)); sc->sc_pdev->si_drv1 = dev; /* Acc. to datasheet always write 1 to polarity registers */ ti_pruss_reg_write(sc, PRUSS_INTC_SIPR0, 0xFFFFFFFF); ti_pruss_reg_write(sc, PRUSS_INTC_SIPR1, 0xFFFFFFFF); /* Acc. to datasheet always write 0 to event type registers */ ti_pruss_reg_write(sc, PRUSS_INTC_SITR0, 0); ti_pruss_reg_write(sc, PRUSS_INTC_SITR1, 0); return (0); } static int ti_pruss_detach(device_t dev) { struct ti_pruss_softc *sc = device_get_softc(dev); ti_pruss_interrupts_clear(sc); for (int i = 0; i < TI_PRUSS_HOST_IRQS; i++) { ti_pruss_interrupts_enable( sc, i, false ); if (sc->sc_intr[i]) bus_teardown_intr(dev, sc->sc_irq_res[i], sc->sc_intr[i]); if (sc->sc_irq_res[i]) bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->sc_irq_res[i]), sc->sc_irq_res[i]); knlist_clear(&sc->sc_irq_devs[i].sc_selinfo.si_note, 0); mtx_lock(&sc->sc_irq_devs[i].sc_mtx); if (!knlist_empty(&sc->sc_irq_devs[i].sc_selinfo.si_note)) printf("IRQ %d KQueue not empty!\n", i ); mtx_unlock(&sc->sc_irq_devs[i].sc_mtx); knlist_destroy(&sc->sc_irq_devs[i].sc_selinfo.si_note); mtx_destroy(&sc->sc_irq_devs[i].sc_mtx); } mtx_destroy(&sc->sc_mtx); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->sc_mem_res), sc->sc_mem_res); if (sc->sc_pdev) destroy_dev(sc->sc_pdev); return (0); } static void ti_pruss_intr(void *arg) { int val; struct ti_pruss_irq_arg *iap = arg; struct ti_pruss_softc *sc = iap->sc; /* * Interrupts pr1_host_intr[0:7] are mapped to * Host-2 to Host-9 of PRU-ICSS IRQ-controller. */ const int pru_int = iap->irq + TI_PRUSS_PRU_IRQS; const int pru_int_mask = (1 << pru_int); const int pru_channel = sc->sc_irq_devs[pru_int].channel; const int pru_event = sc->sc_irq_devs[pru_channel].event; val = ti_pruss_reg_read(sc, PRUSS_INTC_HIER); if (!(val & pru_int_mask)) return; ti_pruss_reg_write(sc, PRUSS_INTC_HIDISR, pru_int); ti_pruss_reg_write(sc, PRUSS_INTC_SICR, pru_event); ti_pruss_reg_write(sc, PRUSS_INTC_HIEISR, pru_int); struct ti_pruss_irqsc* irq = &sc->sc_irq_devs[pru_channel]; size_t wr = irq->tstamps.ctl.idx; struct timespec ts; nanouptime(&ts); irq->tstamps.ts[wr] = ts.tv_sec * 1000000000 + ts.tv_nsec; if (++wr == TI_TS_ARRAY) wr = 0; atomic_add_32(&irq->tstamps.ctl.cnt, 1); irq->tstamps.ctl.idx = wr; KNOTE_UNLOCKED(&irq->sc_selinfo.si_note, pru_int); wakeup(irq); selwakeup(&irq->sc_selinfo); } static int ti_pruss_open(struct cdev *cdev __unused, int oflags __unused, int devtype __unused, struct thread *td __unused) { return (0); } static int ti_pruss_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr) { device_t dev = cdev->si_drv1; struct ti_pruss_softc *sc = device_get_softc(dev); if (offset >= rman_get_size(sc->sc_mem_res)) return (ENOSPC); *paddr = rman_get_start(sc->sc_mem_res) + offset; *memattr = VM_MEMATTR_UNCACHEABLE; return (0); } static struct filterops ti_pruss_kq_read = { .f_isfd = 1, .f_detach = ti_pruss_irq_kqread_detach, .f_event = ti_pruss_irq_kqevent, }; static void ti_pruss_irq_kqread_detach(struct knote *kn) { struct ti_pruss_irqsc *sc = kn->kn_hook; knlist_remove(&sc->sc_selinfo.si_note, kn, 0); } static int ti_pruss_irq_kqevent(struct knote *kn, long hint) { struct ti_pruss_irqsc* irq_sc; int notify; irq_sc = kn->kn_hook; if (hint > 0) kn->kn_data = hint - 2; if (hint > 0 || irq_sc->last > 0) notify = 1; else notify = 0; irq_sc->last = hint; return (notify); } static int ti_pruss_irq_kqfilter(struct cdev *cdev, struct knote *kn) { struct ti_pruss_irqsc *sc = cdev->si_drv1; switch (kn->kn_filter) { case EVFILT_READ: kn->kn_hook = sc; kn->kn_fop = &ti_pruss_kq_read; knlist_add(&sc->sc_selinfo.si_note, kn, 0); break; default: return (EINVAL); } return (0); } diff --git a/sys/arm/ti/ti_sdhci.c b/sys/arm/ti/ti_sdhci.c index 0c01693c3c52..29035fee77c9 100644 --- a/sys/arm/ti/ti_sdhci.c +++ b/sys/arm/ti/ti_sdhci.c @@ -1,765 +1,765 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013 Ian Lepore * Copyright (c) 2011 Ben Gray . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "gpio_if.h" -#include +#include #include #include #include #include #include #include #include #include "sdhci_if.h" #include #include #include #include "opt_mmccam.h" struct ti_sdhci_softc { device_t dev; struct sdhci_fdt_gpio * gpio; struct resource * mem_res; struct resource * irq_res; void * intr_cookie; struct sdhci_slot slot; uint32_t mmchs_reg_off; uint32_t sdhci_reg_off; uint64_t baseclk_hz; uint32_t cmd_and_mode; uint32_t sdhci_clkdiv; boolean_t disable_highspeed; boolean_t force_card_present; boolean_t disable_readonly; }; /* * Table of supported FDT compat strings. * * Note that "ti,mmchs" is our own invention, and should be phased out in favor * of the documented names. * * Note that vendor Beaglebone dtsi files use "ti,omap3-hsmmc" for the am335x. */ static struct ofw_compat_data compat_data[] = { {"ti,am335-sdhci", 1}, {"ti,omap3-hsmmc", 1}, {"ti,omap4-hsmmc", 1}, {"ti,mmchs", 1}, {NULL, 0}, }; /* * The MMCHS hardware has a few control and status registers at the beginning of * the device's memory map, followed by the standard sdhci register block. * Different SoCs have the register blocks at different offsets from the * beginning of the device. Define some constants to map out the registers we * access, and the various per-SoC offsets. The SDHCI_REG_OFFSET is how far * beyond the MMCHS block the SDHCI block is found; it's the same on all SoCs. */ #define OMAP3_MMCHS_REG_OFFSET 0x000 #define OMAP4_MMCHS_REG_OFFSET 0x100 #define AM335X_MMCHS_REG_OFFSET 0x100 #define SDHCI_REG_OFFSET 0x100 #define MMCHS_SYSCONFIG 0x010 #define MMCHS_SYSCONFIG_RESET (1 << 1) #define MMCHS_SYSSTATUS 0x014 #define MMCHS_SYSSTATUS_RESETDONE (1 << 0) #define MMCHS_CON 0x02C #define MMCHS_CON_DW8 (1 << 5) #define MMCHS_CON_DVAL_8_4MS (3 << 9) #define MMCHS_CON_OD (1 << 0) #define MMCHS_SYSCTL 0x12C #define MMCHS_SYSCTL_CLKD_MASK 0x3FF #define MMCHS_SYSCTL_CLKD_SHIFT 6 #define MMCHS_SD_CAPA 0x140 #define MMCHS_SD_CAPA_VS18 (1 << 26) #define MMCHS_SD_CAPA_VS30 (1 << 25) #define MMCHS_SD_CAPA_VS33 (1 << 24) /* Forward declarations, CAM-relataed */ // static void ti_sdhci_cam_poll(struct cam_sim *); // static void ti_sdhci_cam_action(struct cam_sim *, union ccb *); // static int ti_sdhci_cam_settran_settings(struct ti_sdhci_softc *sc, union ccb *); static inline uint32_t ti_mmchs_read_4(struct ti_sdhci_softc *sc, bus_size_t off) { return (bus_read_4(sc->mem_res, off + sc->mmchs_reg_off)); } static inline void ti_mmchs_write_4(struct ti_sdhci_softc *sc, bus_size_t off, uint32_t val) { bus_write_4(sc->mem_res, off + sc->mmchs_reg_off, val); } static inline uint32_t RD4(struct ti_sdhci_softc *sc, bus_size_t off) { return (bus_read_4(sc->mem_res, off + sc->sdhci_reg_off)); } static inline void WR4(struct ti_sdhci_softc *sc, bus_size_t off, uint32_t val) { bus_write_4(sc->mem_res, off + sc->sdhci_reg_off, val); } static uint8_t ti_sdhci_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct ti_sdhci_softc *sc = device_get_softc(dev); return ((RD4(sc, off & ~3) >> (off & 3) * 8) & 0xff); } static uint16_t ti_sdhci_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct ti_sdhci_softc *sc = device_get_softc(dev); uint32_t clkdiv, val32; /* * The MMCHS hardware has a non-standard interpretation of the sdclock * divisor bits. It uses the same bit positions as SDHCI 3.0 (15..6) * but doesn't split them into low:high fields. Instead they're a * single number in the range 0..1023 and the number is exactly the * clock divisor (with 0 and 1 both meaning divide by 1). The SDHCI * driver code expects a v2.0 or v3.0 divisor. The shifting and masking * here extracts the MMCHS representation from the hardware word, cleans * those bits out, applies the 2N adjustment, and plugs the result into * the bit positions for the 2.0 or 3.0 divisor in the returned register * value. The ti_sdhci_write_2() routine performs the opposite * transformation when the SDHCI driver writes to the register. */ if (off == SDHCI_CLOCK_CONTROL) { val32 = RD4(sc, SDHCI_CLOCK_CONTROL); clkdiv = ((val32 >> MMCHS_SYSCTL_CLKD_SHIFT) & MMCHS_SYSCTL_CLKD_MASK) / 2; val32 &= ~(MMCHS_SYSCTL_CLKD_MASK << MMCHS_SYSCTL_CLKD_SHIFT); val32 |= (clkdiv & SDHCI_DIVIDER_MASK) << SDHCI_DIVIDER_SHIFT; if (slot->version >= SDHCI_SPEC_300) val32 |= ((clkdiv >> SDHCI_DIVIDER_MASK_LEN) & SDHCI_DIVIDER_HI_MASK) << SDHCI_DIVIDER_HI_SHIFT; return (val32 & 0xffff); } /* * Standard 32-bit handling of command and transfer mode. */ if (off == SDHCI_TRANSFER_MODE) { return (sc->cmd_and_mode >> 16); } else if (off == SDHCI_COMMAND_FLAGS) { return (sc->cmd_and_mode & 0x0000ffff); } return ((RD4(sc, off & ~3) >> (off & 3) * 8) & 0xffff); } static uint32_t ti_sdhci_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct ti_sdhci_softc *sc = device_get_softc(dev); uint32_t val32; val32 = RD4(sc, off); /* * If we need to disallow highspeed mode due to the OMAP4 erratum, strip * that flag from the returned capabilities. */ if (off == SDHCI_CAPABILITIES && sc->disable_highspeed) val32 &= ~SDHCI_CAN_DO_HISPD; /* * Force the card-present state if necessary. */ if (off == SDHCI_PRESENT_STATE && sc->force_card_present) val32 |= SDHCI_CARD_PRESENT; return (val32); } static void ti_sdhci_read_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct ti_sdhci_softc *sc = device_get_softc(dev); bus_read_multi_4(sc->mem_res, off + sc->sdhci_reg_off, data, count); } static void ti_sdhci_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint8_t val) { struct ti_sdhci_softc *sc = device_get_softc(dev); uint32_t val32; #ifdef MMCCAM uint32_t newval32; if (off == SDHCI_HOST_CONTROL) { val32 = ti_mmchs_read_4(sc, MMCHS_CON); newval32 = val32; if (val & SDHCI_CTRL_8BITBUS) { device_printf(dev, "Custom-enabling 8-bit bus\n"); newval32 |= MMCHS_CON_DW8; } else { device_printf(dev, "Custom-disabling 8-bit bus\n"); newval32 &= ~MMCHS_CON_DW8; } if (newval32 != val32) ti_mmchs_write_4(sc, MMCHS_CON, newval32); } #endif val32 = RD4(sc, off & ~3); val32 &= ~(0xff << (off & 3) * 8); val32 |= (val << (off & 3) * 8); WR4(sc, off & ~3, val32); } static void ti_sdhci_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint16_t val) { struct ti_sdhci_softc *sc = device_get_softc(dev); uint32_t clkdiv, val32; /* * Translate between the hardware and SDHCI 2.0 or 3.0 representations * of the clock divisor. See the comments in ti_sdhci_read_2() for * details. */ if (off == SDHCI_CLOCK_CONTROL) { clkdiv = (val >> SDHCI_DIVIDER_SHIFT) & SDHCI_DIVIDER_MASK; if (slot->version >= SDHCI_SPEC_300) clkdiv |= ((val >> SDHCI_DIVIDER_HI_SHIFT) & SDHCI_DIVIDER_HI_MASK) << SDHCI_DIVIDER_MASK_LEN; clkdiv *= 2; if (clkdiv > MMCHS_SYSCTL_CLKD_MASK) clkdiv = MMCHS_SYSCTL_CLKD_MASK; val32 = RD4(sc, SDHCI_CLOCK_CONTROL); val32 &= 0xffff0000; val32 |= val & ~(MMCHS_SYSCTL_CLKD_MASK << MMCHS_SYSCTL_CLKD_SHIFT); val32 |= clkdiv << MMCHS_SYSCTL_CLKD_SHIFT; WR4(sc, SDHCI_CLOCK_CONTROL, val32); return; } /* * Standard 32-bit handling of command and transfer mode. */ if (off == SDHCI_TRANSFER_MODE) { sc->cmd_and_mode = (sc->cmd_and_mode & 0xffff0000) | ((uint32_t)val & 0x0000ffff); return; } else if (off == SDHCI_COMMAND_FLAGS) { sc->cmd_and_mode = (sc->cmd_and_mode & 0x0000ffff) | ((uint32_t)val << 16); WR4(sc, SDHCI_TRANSFER_MODE, sc->cmd_and_mode); return; } val32 = RD4(sc, off & ~3); val32 &= ~(0xffff << (off & 3) * 8); val32 |= ((val & 0xffff) << (off & 3) * 8); WR4(sc, off & ~3, val32); } static void ti_sdhci_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t val) { struct ti_sdhci_softc *sc = device_get_softc(dev); WR4(sc, off, val); } static void ti_sdhci_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct ti_sdhci_softc *sc = device_get_softc(dev); bus_write_multi_4(sc->mem_res, off + sc->sdhci_reg_off, data, count); } static void ti_sdhci_intr(void *arg) { struct ti_sdhci_softc *sc = arg; sdhci_generic_intr(&sc->slot); } static int ti_sdhci_update_ios(device_t brdev, device_t reqdev) { struct ti_sdhci_softc *sc = device_get_softc(brdev); struct sdhci_slot *slot; struct mmc_ios *ios; uint32_t val32, newval32; slot = device_get_ivars(reqdev); ios = &slot->host.ios; /* * There is an 8-bit-bus bit in the MMCHS control register which, when * set, overrides the 1 vs 4 bit setting in the standard SDHCI * registers. Set that bit first according to whether an 8-bit bus is * requested, then let the standard driver handle everything else. */ val32 = ti_mmchs_read_4(sc, MMCHS_CON); newval32 = val32; if (ios->bus_width == bus_width_8) newval32 |= MMCHS_CON_DW8; else newval32 &= ~MMCHS_CON_DW8; if (ios->bus_mode == opendrain) newval32 |= MMCHS_CON_OD; else /* if (ios->bus_mode == pushpull) */ newval32 &= ~MMCHS_CON_OD; if (newval32 != val32) ti_mmchs_write_4(sc, MMCHS_CON, newval32); return (sdhci_generic_update_ios(brdev, reqdev)); } static int ti_sdhci_get_ro(device_t brdev, device_t reqdev) { struct ti_sdhci_softc *sc = device_get_softc(brdev); if (sc->disable_readonly) return (0); return (sdhci_fdt_gpio_get_readonly(sc->gpio)); } static bool ti_sdhci_get_card_present(device_t dev, struct sdhci_slot *slot) { struct ti_sdhci_softc *sc = device_get_softc(dev); return (sdhci_fdt_gpio_get_present(sc->gpio)); } static int ti_sdhci_detach(device_t dev) { /* sdhci_fdt_gpio_teardown(sc->gpio); */ return (EBUSY); } static int ti_sdhci_hw_init(device_t dev) { struct ti_sdhci_softc *sc = device_get_softc(dev); uint32_t regval; unsigned long timeout; clk_t mmc_clk; int err; /* Enable the controller and interface/functional clocks */ if (ti_sysc_clock_enable(device_get_parent(dev)) != 0) { device_printf(dev, "Error: failed to enable MMC clock\n"); return (ENXIO); } /* FIXME: Devicetree dosent have any reference to mmc_clk */ err = clk_get_by_name(dev, "mmc_clk", &mmc_clk); if (err) { device_printf(dev, "Can not find mmc_clk\n"); return (ENXIO); } err = clk_get_freq(mmc_clk, &sc->baseclk_hz); if (err) { device_printf(dev, "Cant get mmc_clk frequency\n"); /* AM335x TRM 8.1.6.8 table 8-24 96MHz @ OPP100 */ sc->baseclk_hz = 96000000; } /* Issue a softreset to the controller */ ti_mmchs_write_4(sc, MMCHS_SYSCONFIG, MMCHS_SYSCONFIG_RESET); timeout = 1000; while (!(ti_mmchs_read_4(sc, MMCHS_SYSSTATUS) & MMCHS_SYSSTATUS_RESETDONE)) { if (--timeout == 0) { device_printf(dev, "Error: Controller reset operation timed out\n"); break; } DELAY(100); } /* * Reset the command and data state machines and also other aspects of * the controller such as bus clock and power. * * If we read the software reset register too fast after writing it we * can get back a zero that means the reset hasn't started yet rather * than that the reset is complete. Per TI recommendations, work around * it by reading until we see the reset bit asserted, then read until * it's clear. We also set the SDHCI_QUIRK_WAITFOR_RESET_ASSERTED quirk * so that the main sdhci driver uses this same logic in its resets. */ ti_sdhci_write_1(dev, NULL, SDHCI_SOFTWARE_RESET, SDHCI_RESET_ALL); timeout = 10000; while ((ti_sdhci_read_1(dev, NULL, SDHCI_SOFTWARE_RESET) & SDHCI_RESET_ALL) != SDHCI_RESET_ALL) { if (--timeout == 0) { break; } DELAY(1); } timeout = 10000; while ((ti_sdhci_read_1(dev, NULL, SDHCI_SOFTWARE_RESET) & SDHCI_RESET_ALL)) { if (--timeout == 0) { device_printf(dev, "Error: Software reset operation timed out\n"); break; } DELAY(100); } /* * The attach() routine has examined fdt data and set flags in * slot.host.caps to reflect what voltages we can handle. Set those * values in the CAPA register. Empirical testing shows that the * values in this register can be overwritten at any time, but the * manual says that these values should only be set once, "before * initialization" whatever that means, and that they survive a reset. */ regval = ti_mmchs_read_4(sc, MMCHS_SD_CAPA); if (sc->slot.host.caps & MMC_OCR_LOW_VOLTAGE) regval |= MMCHS_SD_CAPA_VS18; if (sc->slot.host.caps & (MMC_OCR_290_300 | MMC_OCR_300_310)) regval |= MMCHS_SD_CAPA_VS30; ti_mmchs_write_4(sc, MMCHS_SD_CAPA, regval); /* Set initial host configuration (1-bit, std speed, pwr off). */ ti_sdhci_write_1(dev, NULL, SDHCI_HOST_CONTROL, 0); ti_sdhci_write_1(dev, NULL, SDHCI_POWER_CONTROL, 0); /* Set the initial controller configuration. */ ti_mmchs_write_4(sc, MMCHS_CON, MMCHS_CON_DVAL_8_4MS); return (0); } static int ti_sdhci_attach(device_t dev) { struct ti_sdhci_softc *sc = device_get_softc(dev); int rid, err; pcell_t prop; phandle_t node; sc->dev = dev; /* * Get the MMCHS device id from FDT. Use rev address to identify the unit. */ node = ofw_bus_get_node(dev); /* * The hardware can inherently do dual-voltage (1p8v, 3p0v) on the first * device, and only 1p8v on other devices unless an external transceiver * is used. The only way we could know about a transceiver is fdt data. * Note that we have to do this before calling ti_sdhci_hw_init() so * that it can set the right values in the CAPA register. */ sc->slot.host.caps |= MMC_OCR_LOW_VOLTAGE; if (OF_hasprop(node, "ti,dual-volt")) { sc->slot.host.caps |= MMC_OCR_290_300 | MMC_OCR_300_310; } /* * Set the offset from the device's memory start to the MMCHS registers. * Also for OMAP4 disable high speed mode due to erratum ID i626. */ switch (ti_chip()) { #ifdef SOC_OMAP4 case CHIP_OMAP_4: sc->mmchs_reg_off = OMAP4_MMCHS_REG_OFFSET; sc->disable_highspeed = true; break; #endif #ifdef SOC_TI_AM335X case CHIP_AM335X: sc->mmchs_reg_off = AM335X_MMCHS_REG_OFFSET; break; #endif default: panic("Unknown OMAP device\n"); } /* * The standard SDHCI registers are at a fixed offset (the same on all * SoCs) beyond the MMCHS registers. */ sc->sdhci_reg_off = sc->mmchs_reg_off + SDHCI_REG_OFFSET; /* Resource setup. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->mem_res) { device_printf(dev, "cannot allocate memory window\n"); err = ENXIO; goto fail; } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->irq_res) { device_printf(dev, "cannot allocate interrupt\n"); err = ENXIO; goto fail; } if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, ti_sdhci_intr, sc, &sc->intr_cookie)) { device_printf(dev, "cannot setup interrupt handler\n"); err = ENXIO; goto fail; } /* * Set up handling of card-detect and write-protect gpio lines. * * If there is no write protect info in the fdt data, fall back to the * historical practice of assuming that the card is writable. This * works around bad fdt data from the upstream source. The alternative * would be to trust the sdhci controller's PRESENT_STATE register WP * bit, but it may say write protect is in effect when it's not if the * pinmux setup doesn't route the WP signal into the sdchi block. */ sc->gpio = sdhci_fdt_gpio_setup(sc->dev, &sc->slot); if (!OF_hasprop(node, "wp-gpios") && !OF_hasprop(node, "wp-disable")) sc->disable_readonly = true; /* Initialise the MMCHS hardware. */ err = ti_sdhci_hw_init(dev); if (err != 0) { /* err should already contain ENXIO from ti_sdhci_hw_init() */ goto fail; } /* * The capabilities register can only express base clock frequencies in * the range of 0-63MHz for a v2.0 controller. Since our clock runs * faster than that, the hardware sets the frequency to zero in the * register. When the register contains zero, the sdhci driver expects * slot.max_clk to already have the right value in it. */ sc->slot.max_clk = sc->baseclk_hz; /* * The MMCHS timeout counter is based on the output sdclock. Tell the * sdhci driver to recalculate the timeout clock whenever the output * sdclock frequency changes. */ sc->slot.quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; /* * The MMCHS hardware shifts the 136-bit response data (in violation of * the spec), so tell the sdhci driver not to do the same in software. */ sc->slot.quirks |= SDHCI_QUIRK_DONT_SHIFT_RESPONSE; /* * Reset bits are broken, have to wait to see the bits asserted * before waiting to see them de-asserted. */ sc->slot.quirks |= SDHCI_QUIRK_WAITFOR_RESET_ASSERTED; /* * The controller waits for busy responses. */ sc->slot.quirks |= SDHCI_QUIRK_WAIT_WHILE_BUSY; /* * DMA is not really broken, I just haven't implemented it yet. */ sc->slot.quirks |= SDHCI_QUIRK_BROKEN_DMA; /* * Set up the hardware and go. Note that this sets many of the * slot.host.* fields, so we have to do this before overriding any of * those values based on fdt data, below. */ sdhci_init_slot(dev, &sc->slot, 0); /* * The SDHCI controller doesn't realize it, but we can support 8-bit * even though we're not a v3.0 controller. If there's an fdt bus-width * property, honor it. */ if (OF_getencprop(node, "bus-width", &prop, sizeof(prop)) > 0) { sc->slot.host.caps &= ~(MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); switch (prop) { case 8: sc->slot.host.caps |= MMC_CAP_8_BIT_DATA; /* FALLTHROUGH */ case 4: sc->slot.host.caps |= MMC_CAP_4_BIT_DATA; break; case 1: break; default: device_printf(dev, "Bad bus-width value %u\n", prop); break; } } /* * If the slot is flagged with the non-removable property, set our flag * to always force the SDHCI_CARD_PRESENT bit on. */ node = ofw_bus_get_node(dev); if (OF_hasprop(node, "non-removable")) sc->force_card_present = true; bus_generic_probe(dev); bus_generic_attach(dev); sdhci_start_slot(&sc->slot); return (0); fail: if (sc->intr_cookie) bus_teardown_intr(dev, sc->irq_res, sc->intr_cookie); if (sc->irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (err); } static int ti_sdhci_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data != 0) { device_set_desc(dev, "TI MMCHS (SDHCI 2.0)"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static device_method_t ti_sdhci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ti_sdhci_probe), DEVMETHOD(device_attach, ti_sdhci_attach), DEVMETHOD(device_detach, ti_sdhci_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, sdhci_generic_read_ivar), DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar), /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, ti_sdhci_update_ios), DEVMETHOD(mmcbr_request, sdhci_generic_request), DEVMETHOD(mmcbr_get_ro, ti_sdhci_get_ro), DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host), DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host), /* SDHCI registers accessors */ DEVMETHOD(sdhci_read_1, ti_sdhci_read_1), DEVMETHOD(sdhci_read_2, ti_sdhci_read_2), DEVMETHOD(sdhci_read_4, ti_sdhci_read_4), DEVMETHOD(sdhci_read_multi_4, ti_sdhci_read_multi_4), DEVMETHOD(sdhci_write_1, ti_sdhci_write_1), DEVMETHOD(sdhci_write_2, ti_sdhci_write_2), DEVMETHOD(sdhci_write_4, ti_sdhci_write_4), DEVMETHOD(sdhci_write_multi_4, ti_sdhci_write_multi_4), DEVMETHOD(sdhci_get_card_present, ti_sdhci_get_card_present), DEVMETHOD_END }; static driver_t ti_sdhci_driver = { "sdhci_ti", ti_sdhci_methods, sizeof(struct ti_sdhci_softc), }; DRIVER_MODULE(sdhci_ti, simplebus, ti_sdhci_driver, NULL, NULL); MODULE_DEPEND(sdhci_ti, ti_sysc, 1, 1, 1); SDHCI_DEPEND(sdhci_ti); #ifndef MMCCAM MMC_DECLARE_BRIDGE(sdhci_ti); #endif diff --git a/sys/arm/ti/ti_sysc.c b/sys/arm/ti/ti_sysc.c index b00642bb87c5..f5a132e5702d 100644 --- a/sys/arm/ti/ti_sysc.c +++ b/sys/arm/ti/ti_sysc.c @@ -1,615 +1,615 @@ /*- * Copyright (c) 2019 Emmanuel Vadot * * Copyright (c) 2020 Oskar Holmlund * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #define DEBUG_SYSC 0 #if DEBUG_SYSC #define DPRINTF(dev, msg...) device_printf(dev, msg) #else #define DPRINTF(dev, msg...) #endif /* Documentation/devicetree/bindings/bus/ti-sysc.txt * * Documentation/devicetree/clock/clock-bindings.txt * Defines phandle + optional pair * Documentation/devicetree/clock/ti-clkctl.txt */ static int ti_sysc_probe(device_t dev); static int ti_sysc_attach(device_t dev); static int ti_sysc_detach(device_t dev); #define TI_SYSC_DRA7_MCAN 15 #define TI_SYSC_USB_HOST_FS 14 #define TI_SYSC_DRA7_MCASP 13 #define TI_SYSC_MCASP 12 #define TI_SYSC_OMAP_AES 11 #define TI_SYSC_OMAP3_SHAM 10 #define TI_SYSC_OMAP4_SR 9 #define TI_SYSC_OMAP3630_SR 8 #define TI_SYSC_OMAP3430_SR 7 #define TI_SYSC_OMAP4_TIMER 6 #define TI_SYSC_OMAP2_TIMER 5 /* Above needs special workarounds */ #define TI_SYSC_OMAP4_SIMPLE 4 #define TI_SYSC_OMAP4 3 #define TI_SYSC_OMAP2 2 #define TI_SYSC 1 #define TI_SYSC_END 0 static struct ofw_compat_data compat_data[] = { { "ti,sysc-dra7-mcan", TI_SYSC_DRA7_MCAN }, { "ti,sysc-usb-host-fs", TI_SYSC_USB_HOST_FS }, { "ti,sysc-dra7-mcasp", TI_SYSC_DRA7_MCASP }, { "ti,sysc-mcasp", TI_SYSC_MCASP }, { "ti,sysc-omap-aes", TI_SYSC_OMAP_AES }, { "ti,sysc-omap3-sham", TI_SYSC_OMAP3_SHAM }, { "ti,sysc-omap4-sr", TI_SYSC_OMAP4_SR }, { "ti,sysc-omap3630-sr", TI_SYSC_OMAP3630_SR }, { "ti,sysc-omap3430-sr", TI_SYSC_OMAP3430_SR }, { "ti,sysc-omap4-timer", TI_SYSC_OMAP4_TIMER }, { "ti,sysc-omap2-timer", TI_SYSC_OMAP2_TIMER }, /* Above needs special workarounds */ { "ti,sysc-omap4-simple", TI_SYSC_OMAP4_SIMPLE }, { "ti,sysc-omap4", TI_SYSC_OMAP4 }, { "ti,sysc-omap2", TI_SYSC_OMAP2 }, { "ti,sysc", TI_SYSC }, { NULL, TI_SYSC_END } }; /* reg-names can be "rev", "sysc" and "syss" */ static const char * reg_names[] = { "rev", "sysc", "syss" }; #define REG_REV 0 #define REG_SYSC 1 #define REG_SYSS 2 #define REG_MAX 3 /* master idle / slave idle mode defined in 8.1.3.2.1 / 8.1.3.2.2 */ #include #define SYSC_IDLE_MAX 4 struct sysc_reg { uint64_t address; uint64_t size; }; struct clk_list { TAILQ_ENTRY(clk_list) next; clk_t clk; }; struct ti_sysc_softc { struct simplebus_softc sc; bool attach_done; device_t dev; int device_type; struct sysc_reg reg[REG_MAX]; /* Offset from host base address */ uint64_t offset_reg[REG_MAX]; uint32_t ti_sysc_mask; int32_t ti_sysc_midle[SYSC_IDLE_MAX]; int32_t ti_sysc_sidle[SYSC_IDLE_MAX]; uint32_t ti_sysc_delay_us; uint32_t ti_syss_mask; int num_clocks; TAILQ_HEAD(, clk_list) clk_list; /* deprecated ti_hwmods */ bool ti_no_reset_on_init; bool ti_no_idle_on_init; bool ti_no_idle; }; /* * All sysc seems to have a reg["rev"] register. * Lets use that for identification of which module the driver are connected to. */ uint64_t ti_sysc_get_rev_address(device_t dev) { struct ti_sysc_softc *sc = device_get_softc(dev); return (sc->reg[REG_REV].address); } uint64_t ti_sysc_get_rev_address_offset_host(device_t dev) { struct ti_sysc_softc *sc = device_get_softc(dev); return (sc->offset_reg[REG_REV]); } uint64_t ti_sysc_get_sysc_address(device_t dev) { struct ti_sysc_softc *sc = device_get_softc(dev); return (sc->reg[REG_SYSC].address); } uint64_t ti_sysc_get_sysc_address_offset_host(device_t dev) { struct ti_sysc_softc *sc = device_get_softc(dev); return (sc->offset_reg[REG_SYSC]); } uint64_t ti_sysc_get_syss_address(device_t dev) { struct ti_sysc_softc *sc = device_get_softc(dev); return (sc->reg[REG_SYSS].address); } uint64_t ti_sysc_get_syss_address_offset_host(device_t dev) { struct ti_sysc_softc *sc = device_get_softc(dev); return (sc->offset_reg[REG_SYSS]); } /* * Due no memory region is assigned the sysc driver the children needs to * handle the practical read/writes to the registers. * Check if sysc has reset bit. */ uint32_t ti_sysc_get_soft_reset_bit(device_t dev) { struct ti_sysc_softc *sc = device_get_softc(dev); switch (sc->device_type) { case TI_SYSC_OMAP4_TIMER: case TI_SYSC_OMAP4_SIMPLE: case TI_SYSC_OMAP4: if (sc->ti_sysc_mask & SYSC_OMAP4_SOFTRESET) { return (SYSC_OMAP4_SOFTRESET); } break; case TI_SYSC_OMAP2_TIMER: case TI_SYSC_OMAP2: case TI_SYSC: if (sc->ti_sysc_mask & SYSC_OMAP2_SOFTRESET) { return (SYSC_OMAP2_SOFTRESET); } break; default: break; } return (0); } int ti_sysc_clock_enable(device_t dev) { struct clk_list *clkp, *clkp_tmp; struct ti_sysc_softc *sc = device_get_softc(dev); int err; TAILQ_FOREACH_SAFE(clkp, &sc->clk_list, next, clkp_tmp) { err = clk_enable(clkp->clk); if (err) { DPRINTF(sc->dev, "clk_enable %s failed %d\n", clk_get_name(clkp->clk), err); break; } } return (err); } int ti_sysc_clock_disable(device_t dev) { struct clk_list *clkp, *clkp_tmp; struct ti_sysc_softc *sc = device_get_softc(dev); int err = 0; TAILQ_FOREACH_SAFE(clkp, &sc->clk_list, next, clkp_tmp) { err = clk_disable(clkp->clk); if (err) { DPRINTF(sc->dev, "clk_enable %s failed %d\n", clk_get_name(clkp->clk), err); break; } } return (err); } static int parse_regfields(struct ti_sysc_softc *sc) { phandle_t node; uint32_t parent_address_cells; uint32_t parent_size_cells; cell_t *reg; ssize_t nreg; int err, k, reg_i, prop_idx; uint32_t idx; node = ofw_bus_get_node(sc->dev); /* Get parents address and size properties */ err = OF_searchencprop(OF_parent(node), "#address-cells", &parent_address_cells, sizeof(parent_address_cells)); if (err == -1) return (ENXIO); if (!(parent_address_cells == 1 || parent_address_cells == 2)) { DPRINTF(sc->dev, "Expect parent #address-cells=[1||2]\n"); return (ENXIO); } err = OF_searchencprop(OF_parent(node), "#size-cells", &parent_size_cells, sizeof(parent_size_cells)); if (err == -1) return (ENXIO); if (!(parent_size_cells == 1 || parent_size_cells == 2)) { DPRINTF(sc->dev, "Expect parent #size-cells = [1||2]\n"); return (ENXIO); } /* Grab the content of reg properties */ nreg = OF_getproplen(node, "reg"); if (nreg <= 0) return (ENXIO); reg = malloc(nreg, M_DEVBUF, M_WAITOK); OF_getencprop(node, "reg", reg, nreg); /* Make sure address & size are 0 */ for (idx = 0; idx < REG_MAX; idx++) { sc->reg[idx].address = 0; sc->reg[idx].size = 0; } /* Loop through reg-names and figure out which reg-name corresponds to * index populate the values into the reg array. */ for (idx = 0, reg_i = 0; idx < REG_MAX && reg_i < nreg; idx++) { err = ofw_bus_find_string_index(node, "reg-names", reg_names[idx], &prop_idx); if (err != 0) continue; for (k = 0; k < parent_address_cells; k++) { sc->reg[prop_idx].address <<= 32; sc->reg[prop_idx].address |= reg[reg_i++]; } for (k = 0; k < parent_size_cells; k++) { sc->reg[prop_idx].size <<= 32; sc->reg[prop_idx].size |= reg[reg_i++]; } if (sc->sc.nranges == 0) sc->offset_reg[prop_idx] = sc->reg[prop_idx].address; else sc->offset_reg[prop_idx] = sc->reg[prop_idx].address - sc->sc.ranges[REG_REV].host; DPRINTF(sc->dev, "reg[%s] address %#jx size %#jx\n", reg_names[idx], sc->reg[prop_idx].address, sc->reg[prop_idx].size); } free(reg, M_DEVBUF); return (0); } static void parse_idle(struct ti_sysc_softc *sc, const char *name, uint32_t *idle) { phandle_t node; cell_t value[SYSC_IDLE_MAX]; int len, no, i; node = ofw_bus_get_node(sc->dev); if (!OF_hasprop(node, name)) { return; } len = OF_getproplen(node, name); no = len / sizeof(cell_t); if (no >= SYSC_IDLE_MAX) { DPRINTF(sc->dev, "Limit %s\n", name); no = SYSC_IDLE_MAX-1; len = no * sizeof(cell_t); } OF_getencprop(node, name, value, len); for (i = 0; i < no; i++) { idle[i] = value[i]; #if DEBUG_SYSC DPRINTF(sc->dev, "%s[%d] = %d ", name, i, value[i]); switch(value[i]) { case SYSC_IDLE_FORCE: DPRINTF(sc->dev, "SYSC_IDLE_FORCE\n"); break; case SYSC_IDLE_NO: DPRINTF(sc->dev, "SYSC_IDLE_NO\n"); break; case SYSC_IDLE_SMART: DPRINTF(sc->dev, "SYSC_IDLE_SMART\n"); break; case SYSC_IDLE_SMART_WKUP: DPRINTF(sc->dev, "SYSC_IDLE_SMART_WKUP\n"); break; } #endif } for ( ; i < SYSC_IDLE_MAX; i++) idle[i] = -1; } static int ti_sysc_attach_clocks(struct ti_sysc_softc *sc) { clk_t *clk; struct clk_list *clkp; int index, err; clk = malloc(sc->num_clocks*sizeof(clk_t), M_DEVBUF, M_WAITOK | M_ZERO); /* Check if all clocks can be found */ for (index = 0; index < sc->num_clocks; index++) { err = clk_get_by_ofw_index(sc->dev, 0, index, &clk[index]); if (err != 0) { free(clk, M_DEVBUF); return (1); } } /* All clocks are found, add to list */ for (index = 0; index < sc->num_clocks; index++) { clkp = malloc(sizeof(*clkp), M_DEVBUF, M_WAITOK | M_ZERO); clkp->clk = clk[index]; TAILQ_INSERT_TAIL(&sc->clk_list, clkp, next); } /* Release the clk array */ free(clk, M_DEVBUF); return (0); } static int ti_sysc_simplebus_attach_child(device_t dev) { device_t cdev; phandle_t node, child; struct ti_sysc_softc *sc = device_get_softc(dev); node = ofw_bus_get_node(sc->dev); for (child = OF_child(node); child > 0; child = OF_peer(child)) { cdev = simplebus_add_device(sc->dev, child, 0, NULL, -1, NULL); if (cdev != NULL) device_probe_and_attach(cdev); } return (0); } /* Device interface */ static int ti_sysc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "TI SYSC Interconnect"); return (BUS_PROBE_DEFAULT); } static int ti_sysc_attach(device_t dev) { struct ti_sysc_softc *sc; phandle_t node; int err; cell_t value; sc = device_get_softc(dev); sc->dev = dev; sc->device_type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; node = ofw_bus_get_node(sc->dev); /* ranges - use simplebus */ simplebus_init(sc->dev, node); if (simplebus_fill_ranges(node, &sc->sc) < 0) { DPRINTF(sc->dev, "could not get ranges\n"); return (ENXIO); } if (sc->sc.nranges == 0) { DPRINTF(sc->dev, "nranges == 0\n"); return (ENXIO); } /* Required field reg & reg-names - assume at least "rev" exists */ err = parse_regfields(sc); if (err) { DPRINTF(sc->dev, "parse_regfields failed %d\n", err); return (ENXIO); } /* Optional */ if (OF_hasprop(node, "ti,sysc-mask")) { OF_getencprop(node, "ti,sysc-mask", &value, sizeof(cell_t)); sc->ti_sysc_mask = value; } if (OF_hasprop(node, "ti,syss-mask")) { OF_getencprop(node, "ti,syss-mask", &value, sizeof(cell_t)); sc->ti_syss_mask = value; } if (OF_hasprop(node, "ti,sysc-delay-us")) { OF_getencprop(node, "ti,sysc-delay-us", &value, sizeof(cell_t)); sc->ti_sysc_delay_us = value; } DPRINTF(sc->dev, "sysc_mask %x syss_mask %x delay_us %x\n", sc->ti_sysc_mask, sc->ti_syss_mask, sc->ti_sysc_delay_us); parse_idle(sc, "ti,sysc-midle", sc->ti_sysc_midle); parse_idle(sc, "ti,sysc-sidle", sc->ti_sysc_sidle); if (OF_hasprop(node, "ti,no-reset-on-init")) sc->ti_no_reset_on_init = true; else sc->ti_no_reset_on_init = false; if (OF_hasprop(node, "ti,no-idle-on-init")) sc->ti_no_idle_on_init = true; else sc->ti_no_idle_on_init = false; if (OF_hasprop(node, "ti,no-idle")) sc->ti_no_idle = true; else sc->ti_no_idle = false; DPRINTF(sc->dev, "no-reset-on-init %d, no-idle-on-init %d, no-idle %d\n", sc->ti_no_reset_on_init, sc->ti_no_idle_on_init, sc->ti_no_idle); if (OF_hasprop(node, "clocks")) { struct clock_cell_info cell_info; read_clock_cells(sc->dev, &cell_info); free(cell_info.clock_cells, M_DEVBUF); free(cell_info.clock_cells_ncells, M_DEVBUF); sc->num_clocks = cell_info.num_real_clocks; TAILQ_INIT(&sc->clk_list); err = ti_sysc_attach_clocks(sc); if (err) { DPRINTF(sc->dev, "Failed to attach clocks\n"); return (bus_generic_attach(sc->dev)); } } err = ti_sysc_simplebus_attach_child(sc->dev); if (err) { DPRINTF(sc->dev, "ti_sysc_simplebus_attach_child %d\n", err); return (err); } sc->attach_done = true; return (bus_generic_attach(sc->dev)); } static int ti_sysc_detach(device_t dev) { return (EBUSY); } /* Bus interface */ static void ti_sysc_new_pass(device_t dev) { struct ti_sysc_softc *sc; int err; phandle_t node; sc = device_get_softc(dev); if (sc->attach_done) { bus_generic_new_pass(sc->dev); return; } node = ofw_bus_get_node(sc->dev); if (OF_hasprop(node, "clocks")) { err = ti_sysc_attach_clocks(sc); if (err) { DPRINTF(sc->dev, "Failed to attach clocks\n"); return; } } err = ti_sysc_simplebus_attach_child(sc->dev); if (err) { DPRINTF(sc->dev, "ti_sysc_simplebus_attach_child failed %d\n", err); return; } sc->attach_done = true; bus_generic_attach(sc->dev); } static device_method_t ti_sysc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ti_sysc_probe), DEVMETHOD(device_attach, ti_sysc_attach), DEVMETHOD(device_detach, ti_sysc_detach), /* Bus interface */ DEVMETHOD(bus_new_pass, ti_sysc_new_pass), DEVMETHOD_END }; DEFINE_CLASS_1(ti_sysc, ti_sysc_driver, ti_sysc_methods, sizeof(struct ti_sysc_softc), simplebus_driver); EARLY_DRIVER_MODULE(ti_sysc, simplebus, ti_sysc_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_FIRST); diff --git a/sys/arm64/freescale/imx/clk/imx_clk_composite.c b/sys/arm64/freescale/imx/clk/imx_clk_composite.c index 76a8979a82d4..1d5ab5908d8c 100644 --- a/sys/arm64/freescale/imx/clk/imx_clk_composite.c +++ b/sys/arm64/freescale/imx/clk/imx_clk_composite.c @@ -1,305 +1,305 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include -#include +#include #include #include "clkdev_if.h" #define TARGET_ROOT_ENABLE (1 << 28) #define TARGET_ROOT_MUX(n) ((n) << 24) #define TARGET_ROOT_MUX_MASK (7 << 24) #define TARGET_ROOT_MUX_SHIFT 24 #define TARGET_ROOT_PRE_PODF(n) ((((n) - 1) & 0x7) << 16) #define TARGET_ROOT_PRE_PODF_MASK (0x7 << 16) #define TARGET_ROOT_PRE_PODF_SHIFT 16 #define TARGET_ROOT_PRE_PODF_MAX 7 #define TARGET_ROOT_POST_PODF(n) ((((n) - 1) & 0x3f) << 0) #define TARGET_ROOT_POST_PODF_MASK (0x3f << 0) #define TARGET_ROOT_POST_PODF_SHIFT 0 #define TARGET_ROOT_POST_PODF_MAX 0x3f struct imx_clk_composite_sc { uint32_t offset; uint32_t flags; }; #define WRITE4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define READ4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) #define IMX_CLK_COMPOSITE_MASK_SHIFT 16 #if 0 #define dprintf(format, arg...) \ printf("%s:(%s)" format, __func__, clknode_get_name(clk), arg) #else #define dprintf(format, arg...) #endif static int imx_clk_composite_init(struct clknode *clk, device_t dev) { struct imx_clk_composite_sc *sc; uint32_t val, idx; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); DEVICE_UNLOCK(clk); idx = (val & TARGET_ROOT_MUX_MASK) >> TARGET_ROOT_MUX_SHIFT; clknode_init_parent_idx(clk, idx); return (0); } static int imx_clk_composite_set_gate(struct clknode *clk, bool enable) { struct imx_clk_composite_sc *sc; uint32_t val = 0; sc = clknode_get_softc(clk); dprintf("%sabling gate\n", enable ? "En" : "Dis"); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); if (enable) val |= TARGET_ROOT_ENABLE; else val &= ~(TARGET_ROOT_ENABLE); WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); return (0); } static int imx_clk_composite_set_mux(struct clknode *clk, int index) { struct imx_clk_composite_sc *sc; uint32_t val = 0; sc = clknode_get_softc(clk); dprintf("Set mux to %d\n", index); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); val &= ~(TARGET_ROOT_MUX_MASK); val |= TARGET_ROOT_MUX(index); WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); return (0); } static int imx_clk_composite_recalc(struct clknode *clk, uint64_t *freq) { struct imx_clk_composite_sc *sc; uint32_t reg, pre_div, post_div; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->offset, ®); DEVICE_UNLOCK(clk); pre_div = ((reg & TARGET_ROOT_PRE_PODF_MASK) >> TARGET_ROOT_PRE_PODF_SHIFT) + 1; post_div = ((reg & TARGET_ROOT_POST_PODF_MASK) >> TARGET_ROOT_POST_PODF_SHIFT) + 1; dprintf("parent_freq=%ju, div=%u\n", *freq, div); *freq = *freq / pre_div / post_div; dprintf("Final freq=%ju\n", *freq); return (0); } static int imx_clk_composite_find_best(uint64_t fparent, uint64_t ftarget, uint32_t *pre_div, uint32_t *post_div, int flags) { uint32_t prediv, postdiv, best_prediv, best_postdiv; int64_t diff, best_diff; uint64_t cur; best_diff = INT64_MAX; for (prediv = 1; prediv <= TARGET_ROOT_PRE_PODF_MAX + 1; prediv++) { for (postdiv = 1; postdiv <= TARGET_ROOT_POST_PODF_MAX + 1; postdiv++) { cur= fparent / prediv / postdiv; diff = (int64_t)ftarget - (int64_t)cur; if (flags & CLK_SET_ROUND_DOWN) { if (diff >= 0 && diff < best_diff) { best_diff = diff; best_prediv = prediv; best_postdiv = postdiv; } } else if (flags & CLK_SET_ROUND_UP) { if (diff <= 0 && abs(diff) < best_diff) { best_diff = diff; best_prediv = prediv; best_postdiv = postdiv; } } else { if (abs(diff) < best_diff) { best_diff = abs(diff); best_prediv = prediv; best_postdiv = postdiv; } } } } if (best_diff == INT64_MAX) return (ERANGE); *pre_div = best_prediv; *post_div = best_postdiv; return (0); } static int imx_clk_composite_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout, int flags, int *stop) { struct imx_clk_composite_sc *sc; struct clknode *p_clk; const char **p_names; int p_idx, best_parent; int64_t best_diff, diff; int32_t best_pre_div __unused, best_post_div __unused; int32_t pre_div, post_div; uint64_t cur, best; uint32_t val; sc = clknode_get_softc(clk); dprintf("Finding best parent/div for target freq of %ju\n", *fout); p_names = clknode_get_parent_names(clk); best_diff = 0; for (p_idx = 0; p_idx != clknode_get_parents_num(clk); p_idx++) { p_clk = clknode_find_by_name(p_names[p_idx]); clknode_get_freq(p_clk, &fparent); dprintf("Testing with parent %s (%d) at freq %ju\n", clknode_get_name(p_clk), p_idx, fparent); if (!imx_clk_composite_find_best(fparent, *fout, &pre_div, &post_div, sc->flags)) continue; cur = fparent / pre_div / post_div; diff = abs((int64_t)*fout - (int64_t)cur); if (diff < best_diff) { best = cur; best_diff = diff; best_pre_div = pre_div; best_post_div = post_div; best_parent = p_idx; dprintf("Best parent so far %s (%d) with best freq at " "%ju\n", clknode_get_name(p_clk), p_idx, best); } } *stop = 1; if (best_diff == INT64_MAX) return (ERANGE); if ((flags & CLK_SET_DRYRUN) != 0) { *fout = best; return (0); } p_idx = clknode_get_parent_idx(clk); if (p_idx != best_parent) { dprintf("Switching parent index from %d to %d\n", p_idx, best_parent); clknode_set_parent_by_idx(clk, best_parent); } dprintf("Setting dividers to pre=%d, post=%d\n", best_pre_div, best_post_div); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); val &= ~(TARGET_ROOT_PRE_PODF_MASK | TARGET_ROOT_POST_PODF_MASK); val |= TARGET_ROOT_PRE_PODF(pre_div); val |= TARGET_ROOT_POST_PODF(post_div); DEVICE_UNLOCK(clk); *fout = best; return (0); } static clknode_method_t imx_clk_composite_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, imx_clk_composite_init), CLKNODEMETHOD(clknode_set_gate, imx_clk_composite_set_gate), CLKNODEMETHOD(clknode_set_mux, imx_clk_composite_set_mux), CLKNODEMETHOD(clknode_recalc_freq, imx_clk_composite_recalc), CLKNODEMETHOD(clknode_set_freq, imx_clk_composite_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(imx_clk_composite_clknode, imx_clk_composite_clknode_class, imx_clk_composite_clknode_methods, sizeof(struct imx_clk_composite_sc), clknode_class); int imx_clk_composite_register(struct clkdom *clkdom, struct imx_clk_composite_def *clkdef) { struct clknode *clk; struct imx_clk_composite_sc *sc; clk = clknode_create(clkdom, &imx_clk_composite_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->offset = clkdef->offset; sc->flags = clkdef->flags; clknode_register(clkdom, clk); return (0); } diff --git a/sys/arm64/freescale/imx/clk/imx_clk_composite.h b/sys/arm64/freescale/imx/clk/imx_clk_composite.h index 84c7aea519ce..6c6d553b1e98 100644 --- a/sys/arm64/freescale/imx/clk/imx_clk_composite.h +++ b/sys/arm64/freescale/imx/clk/imx_clk_composite.h @@ -1,43 +1,43 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _IMX_CLK_COMPOSITE_H_ #define _IMX_CLK_COMPOSITE_H_ -#include +#include struct imx_clk_composite_def { struct clknode_init_def clkdef; uint32_t offset; uint32_t flags; }; int imx_clk_composite_register(struct clkdom *clkdom, struct imx_clk_composite_def *clkdef); #endif /* _IMX_CLK_COMPOSITE_H_ */ diff --git a/sys/arm64/freescale/imx/clk/imx_clk_frac_pll.c b/sys/arm64/freescale/imx/clk/imx_clk_frac_pll.c index 277c06e7badd..daa69d152af4 100644 --- a/sys/arm64/freescale/imx/clk/imx_clk_frac_pll.c +++ b/sys/arm64/freescale/imx/clk/imx_clk_frac_pll.c @@ -1,172 +1,172 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Oleksandr Tymoshenko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include -#include +#include #include #include "clkdev_if.h" struct imx_clk_frac_pll_sc { uint32_t offset; }; #define WRITE4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define READ4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) #define CFG0 0 #define CFG0_PLL_LOCK (1 << 31) #define CFG0_PD (1 << 19) #define CFG0_BYPASS (1 << 14) #define CFG0_NEWDIV_VAL (1 << 12) #define CFG0_NEWDIV_ACK (1 << 11) #define CFG0_OUTPUT_DIV_MASK (0x1f << 0) #define CFG0_OUTPUT_DIV_SHIFT 0 #define CFG1 4 #define CFG1_FRAC_DIV_MASK (0xffffff << 7) #define CFG1_FRAC_DIV_SHIFT 7 #define CFG1_INT_DIV_MASK (0x7f << 0) #define CFG1_INT_DIV_SHIFT 0 #if 0 #define dprintf(format, arg...) \ printf("%s:(%s)" format, __func__, clknode_get_name(clk), arg) #else #define dprintf(format, arg...) #endif static int imx_clk_frac_pll_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static int imx_clk_frac_pll_set_gate(struct clknode *clk, bool enable) { struct imx_clk_frac_pll_sc *sc; uint32_t cfg0; int timeout; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->offset + CFG0, &cfg0); if (enable) cfg0 &= ~(CFG0_PD); else cfg0 |= CFG0_PD; WRITE4(clk, sc->offset + CFG0, cfg0); /* Wait for PLL to lock */ if (enable && ((cfg0 & CFG0_BYPASS) == 0)) { for (timeout = 1000; timeout; timeout--) { READ4(clk, sc->offset + CFG0, &cfg0); if (cfg0 & CFG0_PLL_LOCK) break; DELAY(1); } } DEVICE_UNLOCK(clk); return (0); } static int imx_clk_frac_pll_recalc(struct clknode *clk, uint64_t *freq) { struct imx_clk_frac_pll_sc *sc; uint32_t cfg0, cfg1; uint64_t div, divfi, divff, divf_val; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->offset + CFG0, &cfg0); READ4(clk, sc->offset + CFG1, &cfg1); DEVICE_UNLOCK(clk); div = (cfg0 & CFG0_OUTPUT_DIV_MASK) >> CFG0_OUTPUT_DIV_SHIFT; div = (div + 1) * 2; divff = (cfg1 & CFG1_FRAC_DIV_MASK) >> CFG1_FRAC_DIV_SHIFT; divfi = (cfg1 & CFG1_INT_DIV_MASK) >> CFG1_INT_DIV_SHIFT; /* PLL is bypassed */ if (cfg0 & CFG0_BYPASS) return (0); divf_val = 1 + divfi + (divff/0x1000000); *freq = *freq * 8 * divf_val / div; return (0); } static clknode_method_t imx_clk_frac_pll_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, imx_clk_frac_pll_init), CLKNODEMETHOD(clknode_set_gate, imx_clk_frac_pll_set_gate), CLKNODEMETHOD(clknode_recalc_freq, imx_clk_frac_pll_recalc), CLKNODEMETHOD_END }; DEFINE_CLASS_1(imx_clk_frac_pll_clknode, imx_clk_frac_pll_clknode_class, imx_clk_frac_pll_clknode_methods, sizeof(struct imx_clk_frac_pll_sc), clknode_class); int imx_clk_frac_pll_register(struct clkdom *clkdom, struct imx_clk_frac_pll_def *clkdef) { struct clknode *clk; struct imx_clk_frac_pll_sc *sc; clk = clknode_create(clkdom, &imx_clk_frac_pll_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->offset = clkdef->offset; clknode_register(clkdom, clk); return (0); } diff --git a/sys/arm64/freescale/imx/clk/imx_clk_frac_pll.h b/sys/arm64/freescale/imx/clk/imx_clk_frac_pll.h index 17bb75f129ac..157b9638b1fd 100644 --- a/sys/arm64/freescale/imx/clk/imx_clk_frac_pll.h +++ b/sys/arm64/freescale/imx/clk/imx_clk_frac_pll.h @@ -1,40 +1,40 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Oleksandr Tymoshenko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _IMX_CLK_FRAC_PLL_H_ #define _IMX_CLK_FRAC_PLL_H_ -#include +#include struct imx_clk_frac_pll_def { struct clknode_init_def clkdef; uint32_t offset; }; int imx_clk_frac_pll_register(struct clkdom *clkdom, struct imx_clk_frac_pll_def *clkdef); #endif /* _IMX_CLK_FRAC_PLL_H_ */ diff --git a/sys/arm64/freescale/imx/clk/imx_clk_gate.c b/sys/arm64/freescale/imx/clk/imx_clk_gate.c index e666c063b29d..57d85291e0c2 100644 --- a/sys/arm64/freescale/imx/clk/imx_clk_gate.c +++ b/sys/arm64/freescale/imx/clk/imx_clk_gate.c @@ -1,114 +1,114 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include -#include +#include #include #include "clkdev_if.h" #define WR4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define RD4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define MD4(_clk, off, clr, set ) \ CLKDEV_MODIFY_4(clknode_get_device(_clk), off, clr, set) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) static int imx_clk_gate_init(struct clknode *clk, device_t dev); static int imx_clk_gate_set_gate(struct clknode *clk, bool enable); struct imx_clk_gate_sc { uint32_t offset; uint32_t shift; uint32_t mask; int gate_flags; }; static clknode_method_t imx_clk_gate_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, imx_clk_gate_init), CLKNODEMETHOD(clknode_set_gate, imx_clk_gate_set_gate), CLKNODEMETHOD_END }; DEFINE_CLASS_1(imx_clk_gate, imx_clk_gate_class, imx_clk_gate_methods, sizeof(struct imx_clk_gate_sc), clknode_class); static int imx_clk_gate_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return(0); } static int imx_clk_gate_set_gate(struct clknode *clk, bool enable) { uint32_t reg; struct imx_clk_gate_sc *sc; int rv; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); rv = MD4(clk, sc->offset, sc->mask << sc->shift, (enable ? sc->mask : 0) << sc->shift); if (rv != 0) { DEVICE_UNLOCK(clk); return (rv); } RD4(clk, sc->offset, ®); DEVICE_UNLOCK(clk); return(0); } int imx_clk_gate_register(struct clkdom *clkdom, struct imx_clk_gate_def *clkdef) { struct clknode *clk; struct imx_clk_gate_sc *sc; clk = clknode_create(clkdom, &imx_clk_gate_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->offset = clkdef->offset; sc->shift = clkdef->shift; sc->mask = clkdef->mask; sc->gate_flags = clkdef->gate_flags; clknode_register(clkdom, clk); return (0); } diff --git a/sys/arm64/freescale/imx/clk/imx_clk_gate.h b/sys/arm64/freescale/imx/clk/imx_clk_gate.h index c018466a585d..9fb7af24b282 100644 --- a/sys/arm64/freescale/imx/clk/imx_clk_gate.h +++ b/sys/arm64/freescale/imx/clk/imx_clk_gate.h @@ -1,43 +1,43 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _IMX_CLK_GATE_H_ #define _IMX_CLK_GATE_H_ -#include +#include struct imx_clk_gate_def { struct clknode_init_def clkdef; uint32_t offset; uint32_t shift; uint32_t mask; int gate_flags; }; int imx_clk_gate_register(struct clkdom *clkdom, struct imx_clk_gate_def *clkdef); #endif /* _IMX_CLK_GATE_H_ */ diff --git a/sys/arm64/freescale/imx/clk/imx_clk_mux.c b/sys/arm64/freescale/imx/clk/imx_clk_mux.c index 41f1bdb3bef1..ec84de9c3534 100644 --- a/sys/arm64/freescale/imx/clk/imx_clk_mux.c +++ b/sys/arm64/freescale/imx/clk/imx_clk_mux.c @@ -1,133 +1,133 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include -#include +#include #include #include "clkdev_if.h" #define WR4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define RD4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define MD4(_clk, off, clr, set ) \ CLKDEV_MODIFY_4(clknode_get_device(_clk), off, clr, set) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) static int imx_clk_mux_init(struct clknode *clk, device_t dev); static int imx_clk_mux_set_mux(struct clknode *clk, int idx); struct imx_clk_mux_sc { uint32_t offset; uint32_t shift; uint32_t mask; int mux_flags; }; static clknode_method_t imx_clk_mux_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, imx_clk_mux_init), CLKNODEMETHOD(clknode_set_mux, imx_clk_mux_set_mux), CLKNODEMETHOD_END }; DEFINE_CLASS_1(imx_clk_mux, imx_clk_mux_class, imx_clk_mux_methods, sizeof(struct imx_clk_mux_sc), clknode_class); static int imx_clk_mux_init(struct clknode *clk, device_t dev) { uint32_t reg; struct imx_clk_mux_sc *sc; int rv; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); rv = RD4(clk, sc->offset, ®); DEVICE_UNLOCK(clk); if (rv != 0) { return (rv); } reg = (reg >> sc->shift) & sc->mask; clknode_init_parent_idx(clk, reg); return(0); } static int imx_clk_mux_set_mux(struct clknode *clk, int idx) { uint32_t reg; struct imx_clk_mux_sc *sc; int rv; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); rv = MD4(clk, sc->offset, sc->mask << sc->shift, ((idx & sc->mask) << sc->shift)); if (rv != 0) { DEVICE_UNLOCK(clk); return (rv); } RD4(clk, sc->offset, ®); DEVICE_UNLOCK(clk); return(0); } int imx_clk_mux_register(struct clkdom *clkdom, struct imx_clk_mux_def *clkdef) { struct clknode *clk; struct imx_clk_mux_sc *sc; clk = clknode_create(clkdom, &imx_clk_mux_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->offset = clkdef->offset; sc->shift = clkdef->shift; sc->mask = (1 << clkdef->width) - 1; sc->mux_flags = clkdef->mux_flags; clknode_register(clkdom, clk); return (0); } diff --git a/sys/arm64/freescale/imx/clk/imx_clk_mux.h b/sys/arm64/freescale/imx/clk/imx_clk_mux.h index 3955e352c5e7..a735ff223037 100644 --- a/sys/arm64/freescale/imx/clk/imx_clk_mux.h +++ b/sys/arm64/freescale/imx/clk/imx_clk_mux.h @@ -1,43 +1,43 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _IMX_CLK_MUX_H_ #define _IMX_CLK_MUX_H_ -#include +#include struct imx_clk_mux_def { struct clknode_init_def clkdef; uint32_t offset; uint32_t shift; uint32_t width; int mux_flags; }; int imx_clk_mux_register(struct clkdom *clkdom, struct imx_clk_mux_def *clkdef); #endif /* _IMX_CLK_MUX_H_ */ diff --git a/sys/arm64/freescale/imx/clk/imx_clk_sscg_pll.c b/sys/arm64/freescale/imx/clk/imx_clk_sscg_pll.c index ae6395de707a..0f80be5e06c0 100644 --- a/sys/arm64/freescale/imx/clk/imx_clk_sscg_pll.c +++ b/sys/arm64/freescale/imx/clk/imx_clk_sscg_pll.c @@ -1,187 +1,187 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Oleksandr Tymoshenko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include -#include +#include #include #include "clkdev_if.h" struct imx_clk_sscg_pll_sc { uint32_t offset; }; #define WRITE4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define READ4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) #define CFG0 0x00 #define CFG0_PLL_LOCK (1 << 31) #define CFG0_PD (1 << 7) #define CFG0_BYPASS2 (1 << 5) #define CFG0_BYPASS1 (1 << 4) #define CFG1 0x04 #define CFG2 0x08 #define CFG2_DIVR1_MASK (7 << 25) #define CFG2_DIVR1_SHIFT 25 #define CFG2_DIVR2_MASK (0x3f << 19) #define CFG2_DIVR2_SHIFT 19 #define CFG2_DIVF1_MASK (0x3f << 13) #define CFG2_DIVF1_SHIFT 13 #define CFG2_DIVF2_MASK (0x3f << 7) #define CFG2_DIVF2_SHIFT 7 #define CFG2_DIV_MASK (0x3f << 1) #define CFG2_DIV_SHIFT 1 #if 0 #define dprintf(format, arg...) \ printf("%s:(%s)" format, __func__, clknode_get_name(clk), arg) #else #define dprintf(format, arg...) #endif static int imx_clk_sscg_pll_init(struct clknode *clk, device_t dev) { if (clknode_get_parents_num(clk) > 1) { device_printf(clknode_get_device(clk), "error: SSCG PLL does not support more than one parent yet\n"); return (EINVAL); } clknode_init_parent_idx(clk, 0); return (0); } static int imx_clk_sscg_pll_set_gate(struct clknode *clk, bool enable) { struct imx_clk_sscg_pll_sc *sc; uint32_t cfg0; int timeout; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->offset + CFG0, &cfg0); if (enable) cfg0 &= ~(CFG0_PD); else cfg0 |= CFG0_PD; WRITE4(clk, sc->offset + CFG0, cfg0); /* Reading lock */ if (enable) { for (timeout = 1000; timeout; timeout--) { READ4(clk, sc->offset + CFG0, &cfg0); if (cfg0 & CFG0_PLL_LOCK) break; DELAY(1); } } DEVICE_UNLOCK(clk); return (0); } static int imx_clk_sscg_pll_recalc(struct clknode *clk, uint64_t *freq) { struct imx_clk_sscg_pll_sc *sc; uint32_t cfg0, cfg2; int divr1, divr2, divf1, divf2, div; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->offset + CFG0, &cfg0); READ4(clk, sc->offset + CFG2, &cfg2); DEVICE_UNLOCK(clk); /* PLL is bypassed */ if (cfg0 & CFG0_BYPASS2) return (0); divr1 = (cfg2 & CFG2_DIVR1_MASK) >> CFG2_DIVR1_SHIFT; divr2 = (cfg2 & CFG2_DIVR2_MASK) >> CFG2_DIVR2_SHIFT; divf1 = (cfg2 & CFG2_DIVF1_MASK) >> CFG2_DIVF1_SHIFT; divf2 = (cfg2 & CFG2_DIVF2_MASK) >> CFG2_DIVF2_SHIFT; div = (cfg2 & CFG2_DIV_MASK) >> CFG2_DIV_SHIFT; if (cfg0 & CFG0_BYPASS1) { *freq = *freq / ((divr2 + 1) * (div + 1)); return (0); } *freq *= 2 * (divf1 + 1) * (divf2 + 1); *freq /= (divr1 + 1) * (divr2 + 1) * (div + 1); return (0); } static clknode_method_t imx_clk_sscg_pll_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, imx_clk_sscg_pll_init), CLKNODEMETHOD(clknode_set_gate, imx_clk_sscg_pll_set_gate), CLKNODEMETHOD(clknode_recalc_freq, imx_clk_sscg_pll_recalc), CLKNODEMETHOD_END }; DEFINE_CLASS_1(imx_clk_sscg_pll_clknode, imx_clk_sscg_pll_clknode_class, imx_clk_sscg_pll_clknode_methods, sizeof(struct imx_clk_sscg_pll_sc), clknode_class); int imx_clk_sscg_pll_register(struct clkdom *clkdom, struct imx_clk_sscg_pll_def *clkdef) { struct clknode *clk; struct imx_clk_sscg_pll_sc *sc; clk = clknode_create(clkdom, &imx_clk_sscg_pll_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->offset = clkdef->offset; clknode_register(clkdom, clk); return (0); } diff --git a/sys/arm64/freescale/imx/clk/imx_clk_sscg_pll.h b/sys/arm64/freescale/imx/clk/imx_clk_sscg_pll.h index 16f0016a1d66..346606265e63 100644 --- a/sys/arm64/freescale/imx/clk/imx_clk_sscg_pll.h +++ b/sys/arm64/freescale/imx/clk/imx_clk_sscg_pll.h @@ -1,40 +1,40 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Oleksandr Tymoshenko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _IMX_CLK_SSCG_PLL_H_ #define _IMX_CLK_SSCG_PLL_H_ -#include +#include struct imx_clk_sscg_pll_def { struct clknode_init_def clkdef; uint32_t offset; }; int imx_clk_sscg_pll_register(struct clkdom *clkdom, struct imx_clk_sscg_pll_def *clkdef); #endif /* _IMX_CLK_SSCG_PLL_H_ */ diff --git a/sys/arm64/freescale/imx/imx_ccm_clk.h b/sys/arm64/freescale/imx/imx_ccm_clk.h index 376815bc1663..4c16fa00fe6b 100644 --- a/sys/arm64/freescale/imx/imx_ccm_clk.h +++ b/sys/arm64/freescale/imx/imx_ccm_clk.h @@ -1,210 +1,210 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Oleksandr Tymoshenko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef IMX6_CCM_CLK_H #define IMX6_CCM_CLK_H -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include enum imx_clk_type { IMX_CLK_UNDEFINED = 0, IMX_CLK_FIXED, IMX_CLK_LINK, IMX_CLK_MUX, IMX_CLK_GATE, IMX_CLK_COMPOSITE, IMX_CLK_SSCG_PLL, IMX_CLK_FRAC_PLL, IMX_CLK_DIV, }; struct imx_clk { enum imx_clk_type type; union { struct clk_fixed_def *fixed; struct clk_link_def *link; struct imx_clk_mux_def *mux; struct imx_clk_gate_def *gate; struct imx_clk_composite_def *composite; struct imx_clk_sscg_pll_def *sscg_pll; struct imx_clk_frac_pll_def *frac_pll; struct clk_div_def *div; } clk; }; /* Linked clock. */ #define LINK(_id, _name) \ { \ .type = IMX_CLK_LINK, \ .clk.link = &(struct clk_link_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = NULL, \ .clkdef.parent_cnt = 0, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ }, \ } /* Complex clock without divider (multiplexer only). */ #define MUX(_id, _name, _pn, _f, _mo, _ms, _mw) \ { \ .type = IMX_CLK_MUX, \ .clk.mux = &(struct imx_clk_mux_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = _pn, \ .clkdef.parent_cnt = nitems(_pn), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = _mo, \ .shift = _ms, \ .width = _mw, \ .mux_flags = _f, \ }, \ } /* Fixed frequency clock */ #define FIXED(_id, _name, _freq) \ { \ .type = IMX_CLK_FIXED, \ .clk.fixed = &(struct clk_fixed_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .freq = _freq, \ }, \ } /* Fixed factor multipier/divider. */ #define FFACT(_id, _name, _pname, _mult, _div) \ { \ .type = IMX_CLK_FIXED, \ .clk.fixed = &(struct clk_fixed_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = (const char *[]){_pname}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .mult = _mult, \ .div = _div, \ }, \ } /* Clock gate */ #define GATE(_id, _name, _pname, _o, _shift) \ { \ .type = IMX_CLK_GATE, \ .clk.gate = &(struct imx_clk_gate_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = (const char *[]){_pname}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = _o, \ .shift = _shift, \ .mask = 1, \ }, \ } /* Root clock gate */ #define ROOT_GATE(_id, _name, _pname, _reg) \ { \ .type = IMX_CLK_GATE, \ .clk.gate = &(struct imx_clk_gate_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = (const char *[]){_pname}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = _reg, \ .shift = 0, \ .mask = 3, \ }, \ } /* Composite clock with GATE, MUX, PRE_DIV, and POST_DIV */ #define COMPOSITE(_id, _name, _pn, _o, _flags) \ { \ .type = IMX_CLK_COMPOSITE, \ .clk.composite = &(struct imx_clk_composite_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = _pn, \ .clkdef.parent_cnt = nitems(_pn), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = _o, \ .flags = _flags, \ }, \ } /* SSCG PLL */ #define SSCG_PLL(_id, _name, _pn, _o) \ { \ .type = IMX_CLK_SSCG_PLL, \ .clk.composite = &(struct imx_clk_composite_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = _pn, \ .clkdef.parent_cnt = nitems(_pn), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = _o, \ }, \ } /* Fractional PLL */ #define FRAC_PLL(_id, _name, _pname, _o) \ { \ .type = IMX_CLK_FRAC_PLL, \ .clk.frac_pll = &(struct imx_clk_frac_pll_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = (const char *[]){_pname}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = _o, \ }, \ } #define DIV(_id, _name, _pname, _o, _shift, _width) \ { \ .type = IMX_CLK_DIV, \ .clk.div = &(struct clk_div_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = (const char *[]){_pname}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = _o, \ .i_shift = _shift, \ .i_width = _width, \ }, \ } #endif diff --git a/sys/arm64/nvidia/tegra210/tegra210_car.c b/sys/arm64/nvidia/tegra210/tegra210_car.c index 0562b91c5a97..2046782805d6 100644 --- a/sys/arm64/nvidia/tegra210/tegra210_car.c +++ b/sys/arm64/nvidia/tegra210/tegra210_car.c @@ -1,597 +1,597 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2020 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include #include #include #include #include #include "clkdev_if.h" #include "hwreset_if.h" #include "tegra210_car.h" static struct ofw_compat_data compat_data[] = { {"nvidia,tegra210-car", 1}, {NULL, 0}, }; #define PLIST(x) static const char *x[] /* Pure multiplexer. */ #define MUX(_id, cname, plists, o, s, w) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = plists, \ .clkdef.parent_cnt = nitems(plists), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = o, \ .shift = s, \ .width = w, \ } /* Fractional divider (7.1). */ #define DIV7_1(_id, cname, plist, o, s) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){plist}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = o, \ .i_shift = (s) + 1, \ .i_width = 7, \ .f_shift = s, \ .f_width = 1, \ } /* Integer divider. */ #define DIV(_id, cname, plist, o, s, w, f) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){plist}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = o, \ .i_shift = s, \ .i_width = w, \ .div_flags = f, \ } /* Gate in PLL block. */ #define GATE_PLL(_id, cname, plist, o, s) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){plist}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = o, \ .shift = s, \ .mask = 3, \ .on_value = 3, \ .off_value = 0, \ } /* Standard gate. */ #define GATE(_id, cname, plist, o, s) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){plist}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = o, \ .shift = s, \ .mask = 1, \ .on_value = 1, \ .off_value = 0, \ } /* Inverted gate. */ #define GATE_INV(_id, cname, plist, o, s) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){plist}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = o, \ .shift = s, \ .mask = 1, \ .on_value = 0, \ .off_value = 1, \ } /* Fixed rate clock. */ #define FRATE(_id, cname, _freq) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = NULL, \ .clkdef.parent_cnt = 0, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .freq = _freq, \ } /* Fixed rate multipier/divider. */ #define FACT(_id, cname, pname, _mult, _div) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){pname}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .mult = _mult, \ .div = _div, \ } static uint32_t osc_freqs[16] = { [0] = 13000000, [1] = 16800000, [4] = 19200000, [5] = 38400000, [8] = 12000000, [9] = 48000000, }; /* Parent lists. */ PLIST(mux_xusb_hs) = {"xusb_ss_div2", "pllU_60", "pc_xusb_ss" }; PLIST(mux_xusb_ssp) = {"xusb_ss", "osc_div_clk"}; /* Clocks adjusted online. */ static struct clk_fixed_def fixed_osc = FRATE(TEGRA210_CLK_CLK_M, "osc", 38400000); static struct clk_fixed_def fixed_clk_m = FACT(0, "clk_m", "osc", 1, 1); static struct clk_fixed_def fixed_osc_div = FACT(0, "osc_div_clk", "osc", 1, 1); static struct clk_fixed_def tegra210_fixed_clks[] = { /* Core clocks. */ FRATE(0, "bogus", 1), FRATE(0, "clk_s", 32768), /* Audio clocks. */ FRATE(0, "vimclk_sync", 1), FRATE(0, "i2s1_sync", 1), FRATE(0, "i2s2_sync", 1), FRATE(0, "i2s3_sync", 1), FRATE(0, "i2s4_sync", 1), FRATE(0, "i2s5_sync", 1), FRATE(0, "spdif_in_sync", 1), /* XUSB */ FACT(TEGRA210_CLK_XUSB_SS_DIV2, "xusb_ss_div2", "xusb_ss", 1, 2), /* SOR */ FACT(0, "sor_safe_div", "pllP_out0", 1, 17), FACT(0, "dpaux_div", "sor_safe", 1, 17), FACT(0, "dpaux1_div", "sor_safe", 1, 17), /* Not Yet Implemented */ FRATE(0, "audio", 10000000), FRATE(0, "audio0", 10000000), FRATE(0, "audio1", 10000000), FRATE(0, "audio2", 10000000), FRATE(0, "audio3", 10000000), FRATE(0, "audio4", 10000000), FRATE(0, "ext_vimclk", 10000000), FRATE(0, "audiod1", 10000000), FRATE(0, "audiod2", 10000000), FRATE(0, "audiod3", 10000000), FRATE(0, "dfllCPU_out", 10000000), }; static struct clk_mux_def tegra210_mux_clks[] = { /* USB. */ MUX(TEGRA210_CLK_XUSB_HS_SRC, "xusb_hs", mux_xusb_hs, CLK_SOURCE_XUSB_SS, 25, 2), MUX(0, "xusb_ssp", mux_xusb_ssp, CLK_SOURCE_XUSB_SS, 24, 1), }; static struct clk_gate_def tegra210_gate_clks[] = { /* Base peripheral clocks. */ GATE_INV(TEGRA210_CLK_HCLK, "hclk", "hclk_div", CLK_SYSTEM_RATE, 7), GATE_INV(TEGRA210_CLK_PCLK, "pclk", "pclk_div", CLK_SYSTEM_RATE, 3), GATE(TEGRA210_CLK_CML0, "cml0", "pllE_out0", PLLE_AUX, 0), GATE(TEGRA210_CLK_CML1, "cml1", "pllE_out0", PLLE_AUX, 1), GATE(0, "pllD_dsi_csi", "pllD_out0", PLLD_MISC, 21), GATE(0, "pllP_hsio", "pllP_out0", PLLP_MISC1, 29), GATE(0, "pllP_xusb", "pllP_hsio", PLLP_MISC1, 28), }; static struct clk_div_def tegra210_div_clks[] = { /* Base peripheral clocks. */ DIV(0, "hclk_div", "sclk", CLK_SYSTEM_RATE, 4, 2, 0), DIV(0, "pclk_div", "hclk", CLK_SYSTEM_RATE, 0, 2, 0), }; /* Initial setup table. */ static struct tegra210_init_item clk_init_table[] = { /* clock, partent, frequency, enable */ {"uarta", "pllP_out0", 408000000, 0}, {"uartb", "pllP_out0", 408000000, 0}, {"uartc", "pllP_out0", 408000000, 0}, {"uartd", "pllP_out0", 408000000, 0}, {"pllA", NULL, 564480000, 1}, {"pllA_out0", NULL, 11289600, 1}, {"extperiph1", "pllA_out0", 0, 1}, {"i2s1", "pllA_out0", 11289600, 0}, {"i2s2", "pllA_out0", 11289600, 0}, {"i2s3", "pllA_out0", 11289600, 0}, {"i2s4", "pllA_out0", 11289600, 0}, {"i2s5", "pllA_out0", 11289600, 0}, {"host1x", "pllP_out0", 136000000, 1}, {"sclk", "pllP_out2", 102000000, 1}, {"dvfs_soc", "pllP_out0", 51000000, 1}, {"dvfs_ref", "pllP_out0", 51000000, 1}, {"spi4", "pllP_out0", 12000000, 1}, {"pllREFE", NULL, 672000000, 0}, {"xusb", NULL, 0, 1}, {"xusb_ss", "pllU_480", 120000000, 0}, {"pc_xusb_fs", "pllU_48", 48000000, 0}, {"xusb_hs", "pc_xusb_ss", 120000000, 0}, {"xusb_ssp", "xusb_ss", 120000000, 0}, {"pc_xusb_falcon", "pllP_xusb", 204000000, 0}, {"pc_xusb_core_host", "pllP_xusb", 102000000, 0}, {"pc_xusb_core_dev", "pllP_xusb", 102000000, 0}, {"sata", "pllP_out0", 104000000, 0}, {"sata_oob", "pllP_out0", 204000000, 0}, {"emc", NULL, 0, 1}, {"mselect", NULL, 0, 1}, {"csite", NULL, 0, 1}, {"dbgapb", NULL, 0, 1 }, {"tsensor", "clk_m", 400000, 0}, {"i2c1", "pllP_out0", 0, 0}, {"i2c2", "pllP_out0", 0, 0}, {"i2c3", "pllP_out0", 0, 0}, {"i2c4", "pllP_out0", 0, 0}, {"i2c5", "pllP_out0", 0, 0}, {"i2c6", "pllP_out0", 0, 0}, {"pllDP_out0", NULL, 270000000, 0}, {"soc_therm", "pllP_out0", 51000000, 0}, {"cclk_g", NULL, 0, 1}, {"pllU_out1", NULL, 48000000, 1}, {"pllU_out2", NULL, 60000000, 1}, {"pllC4", NULL, 1000000000, 1}, {"pllC4_out0", NULL, 1000000000, 1}, }; static void init_divs(struct tegra210_car_softc *sc, struct clk_div_def *clks, int nclks) { int i, rv; for (i = 0; i < nclks; i++) { rv = clknode_div_register(sc->clkdom, clks + i); if (rv != 0) panic("clk_div_register failed"); } } static void init_gates(struct tegra210_car_softc *sc, struct clk_gate_def *clks, int nclks) { int i, rv; for (i = 0; i < nclks; i++) { rv = clknode_gate_register(sc->clkdom, clks + i); if (rv != 0) panic("clk_gate_register failed"); } } static void init_muxes(struct tegra210_car_softc *sc, struct clk_mux_def *clks, int nclks) { int i, rv; for (i = 0; i < nclks; i++) { rv = clknode_mux_register(sc->clkdom, clks + i); if (rv != 0) panic("clk_mux_register failed"); } } static void init_fixeds(struct tegra210_car_softc *sc, struct clk_fixed_def *clks, int nclks) { int i, rv; uint32_t val; int osc_idx; CLKDEV_READ_4(sc->dev, OSC_CTRL, &val); osc_idx = OSC_CTRL_OSC_FREQ_GET(val); fixed_osc.freq = osc_freqs[osc_idx]; if (fixed_osc.freq == 0) panic("Undefined input frequency"); rv = clknode_fixed_register(sc->clkdom, &fixed_osc); if (rv != 0) panic("clk_fixed_register failed"); fixed_osc_div.div = 1 << OSC_CTRL_PLL_REF_DIV_GET(val); rv = clknode_fixed_register(sc->clkdom, &fixed_osc_div); if (rv != 0) panic("clk_fixed_register failed"); CLKDEV_READ_4(sc->dev, SPARE_REG0, &val); fixed_clk_m.div = SPARE_REG0_MDIV_GET(val) + 1; rv = clknode_fixed_register(sc->clkdom, &fixed_clk_m); if (rv != 0) panic("clk_fixed_register failed"); for (i = 0; i < nclks; i++) { rv = clknode_fixed_register(sc->clkdom, clks + i); if (rv != 0) panic("clk_fixed_register failed"); } } static void postinit_clock(struct tegra210_car_softc *sc) { int i; struct tegra210_init_item *tbl; struct clknode *clknode; int rv; for (i = 0; i < nitems(clk_init_table); i++) { tbl = &clk_init_table[i]; clknode = clknode_find_by_name(tbl->name); if (clknode == NULL) { device_printf(sc->dev, "Cannot find clock %s\n", tbl->name); continue; } if (tbl->parent != NULL) { rv = clknode_set_parent_by_name(clknode, tbl->parent); if (rv != 0) { device_printf(sc->dev, "Cannot set parent for %s (to %s): %d\n", tbl->name, tbl->parent, rv); continue; } } if (tbl->frequency != 0) { rv = clknode_set_freq(clknode, tbl->frequency, 0 , 9999); if (rv != 0) { device_printf(sc->dev, "Cannot set frequency for %s: %d\n", tbl->name, rv); continue; } } if (tbl->enable!= 0) { rv = clknode_enable(clknode); if (rv != 0) { device_printf(sc->dev, "Cannot enable %s: %d\n", tbl->name, rv); continue; } } } } static void register_clocks(device_t dev) { struct tegra210_car_softc *sc; sc = device_get_softc(dev); sc->clkdom = clkdom_create(dev); if (sc->clkdom == NULL) panic("clkdom == NULL"); init_fixeds(sc, tegra210_fixed_clks, nitems(tegra210_fixed_clks)); tegra210_init_plls(sc); init_muxes(sc, tegra210_mux_clks, nitems(tegra210_mux_clks)); init_divs(sc, tegra210_div_clks, nitems(tegra210_div_clks)); init_gates(sc, tegra210_gate_clks, nitems(tegra210_gate_clks)); tegra210_periph_clock(sc); tegra210_super_mux_clock(sc); clkdom_finit(sc->clkdom); clkdom_xlock(sc->clkdom); postinit_clock(sc); clkdom_unlock(sc->clkdom); if (bootverbose) clkdom_dump(sc->clkdom); } static int tegra210_car_clkdev_read_4(device_t dev, bus_addr_t addr, uint32_t *val) { struct tegra210_car_softc *sc; sc = device_get_softc(dev); *val = bus_read_4(sc->mem_res, addr); return (0); } static int tegra210_car_clkdev_write_4(device_t dev, bus_addr_t addr, uint32_t val) { struct tegra210_car_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->mem_res, addr, val); return (0); } static int tegra210_car_clkdev_modify_4(device_t dev, bus_addr_t addr, uint32_t clear_mask, uint32_t set_mask) { struct tegra210_car_softc *sc; uint32_t reg; sc = device_get_softc(dev); reg = bus_read_4(sc->mem_res, addr); reg &= ~clear_mask; reg |= set_mask; bus_write_4(sc->mem_res, addr, reg); return (0); } static void tegra210_car_clkdev_device_lock(device_t dev) { struct tegra210_car_softc *sc; sc = device_get_softc(dev); mtx_lock(&sc->mtx); } static void tegra210_car_clkdev_device_unlock(device_t dev) { struct tegra210_car_softc *sc; sc = device_get_softc(dev); mtx_unlock(&sc->mtx); } static int tegra210_car_detach(device_t dev) { device_printf(dev, "Error: Clock driver cannot be detached\n"); return (EBUSY); } static int tegra210_car_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data != 0) { device_set_desc(dev, "Tegra Clock Driver"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int tegra210_car_attach(device_t dev) { struct tegra210_car_softc *sc = device_get_softc(dev); int rid, rv; sc->dev = dev; mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; /* Resource setup. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->mem_res) { device_printf(dev, "cannot allocate memory resource\n"); rv = ENXIO; goto fail; } register_clocks(dev); hwreset_register_ofw_provider(dev); return (0); fail: if (sc->mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (rv); } static int tegra210_car_hwreset_assert(device_t dev, intptr_t id, bool value) { struct tegra210_car_softc *sc = device_get_softc(dev); return (tegra210_hwreset_by_idx(sc, id, value)); } static device_method_t tegra210_car_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra210_car_probe), DEVMETHOD(device_attach, tegra210_car_attach), DEVMETHOD(device_detach, tegra210_car_detach), /* Clkdev interface*/ DEVMETHOD(clkdev_read_4, tegra210_car_clkdev_read_4), DEVMETHOD(clkdev_write_4, tegra210_car_clkdev_write_4), DEVMETHOD(clkdev_modify_4, tegra210_car_clkdev_modify_4), DEVMETHOD(clkdev_device_lock, tegra210_car_clkdev_device_lock), DEVMETHOD(clkdev_device_unlock, tegra210_car_clkdev_device_unlock), /* Reset interface */ DEVMETHOD(hwreset_assert, tegra210_car_hwreset_assert), DEVMETHOD_END }; static DEFINE_CLASS_0(car, tegra210_car_driver, tegra210_car_methods, sizeof(struct tegra210_car_softc)); EARLY_DRIVER_MODULE(tegra210_car, simplebus, tegra210_car_driver, NULL, NULL, BUS_PASS_TIMER); diff --git a/sys/arm64/nvidia/tegra210/tegra210_clk_per.c b/sys/arm64/nvidia/tegra210/tegra210_clk_per.c index 0d84d42bb459..8918cfdd929c 100644 --- a/sys/arm64/nvidia/tegra210/tegra210_clk_per.c +++ b/sys/arm64/nvidia/tegra210/tegra210_clk_per.c @@ -1,968 +1,968 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2020 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include -#include +#include #include #include #include "tegra210_car.h" /* Bits in base register. */ #define PERLCK_AMUX_MASK 0x0F #define PERLCK_AMUX_SHIFT 16 #define PERLCK_AMUX_DIS (1 << 20) #define PERLCK_UDIV_DIS (1 << 24) #define PERLCK_ENA_MASK (1 << 28) #define PERLCK_MUX_SHIFT 29 #define PERLCK_MUX_MASK 0x07 struct periph_def { struct clknode_init_def clkdef; uint32_t base_reg; uint32_t div_width; uint32_t div_mask; uint32_t div_f_width; uint32_t div_f_mask; uint32_t flags; }; struct pgate_def { struct clknode_init_def clkdef; uint32_t idx; uint32_t flags; }; #define PLIST(x) static const char *x[] #define GATE(_id, cname, plist, _idx) \ { \ .clkdef.id = TEGRA210_CLK_##_id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){plist}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .idx = _idx, \ .flags = 0, \ } /* Sources for multiplexors. */ PLIST(mux_N_N_c_N_p_N_a) = {"bogus", NULL, "pllC_out0", NULL, "pllP_out0", NULL, "pllA_out0", NULL}; PLIST(mux_N_N_p_N_N_N_clkm) = {NULL, NULL, "pllP_out0", NULL, NULL, NULL, "clk_m", NULL}; PLIST(mux_N_c_p_a1_c2_c3_clkm) = {NULL, "pllC_out0", "pllP_out0", "pllA1_out0", "pllC2_out0", "pllC3_out0", "clk_m", NULL}; PLIST(mux_N_c_p_a1_c2_c3_clkm_c4) = {NULL, "pllC_out0", "pllP_out0", "pllA1_out0", "pllC2_out0", "pllC3_out0", "clk_m", "pllC4_out0"}; PLIST(mux_N_c_p_clkm_N_c4_c4o1_c4o1) = {NULL, "pllC_out0", "pllP_out0", "clk_m", NULL, "pllC4_out0", "pllC4_out1", "pllC4_out1"}; PLIST(mux_N_c_p_clkm_N_c4_c4o1_c4o2) = {NULL, "pllC_out0", "pllP_out0", "clk_m", NULL, "pllC4_out0", "pllC4_out1", "pllC4_out2"}; PLIST(mux_N_c2_c_c3_p_N_a) = {NULL, "pllC2_out0", "pllC_out0", "pllC3_out0", "pllP_out0", NULL, "pllA_out0", NULL}; PLIST(mux_N_c2_c_c3_p_clkm_a1_c4) = {NULL, "pllC2_out0", "pllC_out0", "pllC3_out0", "pllP_out0", "clk_m", "pllA1_out0", "pllC4_out0"}; PLIST(mux_N_c2_c_c3_p_N_a1_clkm) = {NULL, "pllC2_out0", "pllC_out0", "pllC3_out0", "pllP_out0", NULL, "pllA1_out0", "clk_m"}; PLIST(mux_a_N_audio_N_p_N_clkm) = {"pllA_out0", NULL, "audio", NULL, "pllP_out0", NULL, "clk_m"}; PLIST(mux_a_N_audio0_N_p_N_clkm) = {"pllA_out0", NULL, "audio0", NULL, "pllP_out0", NULL, "clk_m"}; PLIST(mux_a_N_audio1_N_p_N_clkm) = {"pllA_out0", NULL, "audio1", NULL, "pllP_out0", NULL, "clk_m"}; PLIST(mux_a_N_audio2_N_p_N_clkm) = {"pllA_out0", NULL, "audio2", NULL, "pllP_out0", NULL, "clk_m"}; PLIST(mux_a_N_audio3_N_p_N_clkm) = {"pllA_out0", NULL, "audio3", NULL, "pllP_out0", NULL, "clk_m"}; PLIST(mux_a_N_audio4_N_p_N_clkm) = {"pllA_out0", NULL, "audio4", NULL, "pllP_out0", NULL, "clk_m"}; PLIST(mux_a_audiod1_p_clkm) = {"pllA_out0", "audiod1", "pllP_out0", "clk_m", NULL, NULL, NULL, NULL}; PLIST(mux_a_audiod2_p_clkm) = {"pllA_out0", "audiod2", "pllP_out0", "clk_m", NULL, NULL, NULL, NULL}; PLIST(mux_a_audiod3_p_clkm) = {"pllA_out0", "audiod3", "pllP_out0", "clk_m", NULL, NULL, NULL, NULL}; PLIST(mux_a_c4_c_c4o1_p_N_clkm_c4o2) = {"pllA_out0", "pllC4_out0", "pllC_out0", "pllC4_out1", "pllP_out0", NULL, "clk_m", "pllC4_out2"}; PLIST(mux_a_clks_p_clkm_e) = {"pllA_out0", "clk_s", "pllP_out0", "clk_m", "pllE_out0"}; PLIST(mux_c4o1_c2_c_c4_p_clkm_a_c4) = {"pllC4_out1", "pllC2_out0", "pllC_out0", "pllC4_out0", "pllP_out0", "clk_m","pllA_out0", "pllC4_out0", }; PLIST(mux_m_c_p_clkm_mud_mbud_mb_pud) = {"pllM_out0", "pllC_out0", "pllP_out0", "clk_m", "pllM_UD", "pllMB_UD", "pllMB_out0", "pllP_UD"}; PLIST(mux_p_N_N_c4o2_c4o1_N_clkm_c4) = {"pllP_out0", NULL, NULL, "pllC4_out2", "pllC4_out1", NULL, "clk_m", "pllC4_out0"}; PLIST(mux_p_N_c_c4_c4o1_c4o2_clkm) = {"pllP_out0", NULL, "pllC_out0", "pllC4_out0", "pllC4_out1", "pllC4_out2", "clk_m"}; PLIST(mux_p_N_c_c4_N_c4o1_clkm_c4o2) = {"pllP_out0", NULL, "pllC_out0", "pllC4_out0", NULL, "pllC4_out1", "clk_m", "pllC4_out2"}; PLIST(mux_p_N_d_N_N_d2_clkm) = {"pllP_out0", NULL, "pllD_out0", NULL, NULL, "pllD2_out0", "clk_m"}; PLIST(mux_p_N_clkm_N_clks_N_E) = {"pllP_out0", NULL, "clk_m", NULL, NULL, "clk_s", NULL, "pllE_out0"}; PLIST(mux_p_c_c2_N_c2_N_clkm) = {"pllP_out0", "pllC_out0", "pllC2_out0", NULL, "pllC2_out0", NULL, "clk_m", NULL}; PLIST(mux_p_co1_c_N_c4o2_c4o1_clkm_c4) = {"pllP_out0", "pllC_out1", "pllC_out0", NULL, "pllC4_out2", "pllC4_out1" ,"clk_m", "pllC4_out0"}; PLIST(mux_p_c2_c_c3_N_a1_clkm_c4) = {"pllP_out0", "pllC2_out0", "pllC_out0", "pllC3_out0", NULL, "pllA1_out0", "clk_m", "pllC4_out0"}; PLIST(mux_p_c2_c_c3_N_N_clkm) = {"pllP_out0", "pllC2_out0", "pllC_out0", "pllC3_out0", NULL, NULL, "clk_m", NULL}; PLIST(mux_p_c2_c_c3_m_e_clkm) = {"pllP_out0", "pllC2_out0", "pllC_out0", "pllC3_out0", "pllM_out0", "pllE_out0", "clk_m"}; PLIST(mux_p_c2_c_c4_N_c4o1_clkm_c4o2) = {"pllP_out0", "pllC2_out0", "pllC4_out0", NULL, "pllC4_out1", "clk_m", "pllC4_out2"}; PLIST(mux_p_c2_c_c4_a_c4o1_clkm_c4o2) = {"pllP_out0", "pllC2_out0", "pllC_out0", "pllC4_out0", "pllA_out0", "pllC4_out1", "clk_m", "pllC4_out2"}; PLIST(mux_p_c2_c_c4o2_c4o1_clks_clkm_c4) = {"pllP_out0", "pllC2_out0", "pllC4_out2", "pllC4_out1", "clk_s", "clk_m", "pllC4_out0"}; PLIST(mux_p_c2_c_c4_c4o1_clkm_c4o2) = {"pllP_out0", "pllC2_out0", "pllC_out0", "pllC4_out0", "pllC4_out1", "clk_m", "pllC4_out2"}; PLIST(mux_p_c2_c_c4_clkm_c4o1_c4o2) = {"pllP_out0", "pllC2_out0", "pllC_out0", "pllC4_out0", "clk_m", "pllC4_out1", "pllC4_out2"}; PLIST(mux_p_c2_c_c4_clks_c4o1_clkm_c4o2) = {"pllP_out0", "pllC2_out0", "pllC_out0", "pllC4_out0", "clk_s", "pllC4_out1", "clk_m", "pllC4_out2"}; PLIST(mux_p_c2_c_c4_clkm_c4o1_clks_c4o2) = {"pllP_out0", "pllC2_out0", "pllC_out0", "pllC4_out0", "clk_m", "pllC4_out1", "clk_s", "pllC4_out2"}; PLIST(mux_p_c2_refe1_c3_m_a1_clkm_C4) = {"pllP_out0", "pllC2_out0", "pllREFE_out1", "pllC3_out0", "pllM_out0", "pllA1_out0", "clk_m", "pllC4_out0"}; PLIST(mux_p_c4_c_c4o1_N_c4o2_clkm) = {"pllP_out0", "pllC4_out0", "pllC_out0", "pllC4_out1", NULL, "pllC4_out2", "clk_m", NULL}; PLIST(mux_p_m_d_a_c_d2_clkm) = {"pllP_out0", "pllM_out0", "pllD_out0", "pllA_out0", "pllC_out0", "pllD2_out0", "clk_m"}; PLIST(mux_p_po3_clkm_clks_a) = {"pllP_out0", "pllP_out3", "clk_m", "clk_s", "pllA_out0", NULL, NULL, NULL}; PLIST(mux_po3_c_c2_clkm_p_c4_c4o1_c4o2) = {"pllP_out3", "pllC_out0", "pllC2_out0", "clk_m", "pllP_out0", "pllC4_out0", "pllC4_out1", "pllC4_out2"}; PLIST(mux_clkm_p_N_N_N_refre) = {"clk_m", "pllP_xusb", NULL, NULL, NULL, "pllREFE_out0", NULL, NULL}; PLIST(mux_clkm_N_u48_N_p_N_u480) = {"clk_m", NULL, "pllU_48", NULL, "pllP_out0", NULL, "pllU_480"}; PLIST(mux_clkm_refe_clks_u480) = {"clk_m", "pllREFE_out0", "clk_s", "pllU_480", NULL, NULL, NULL, NULL}; PLIST(mux_sep_audio) = {"pllA_out0", "pllC4_out0", "pllC_out0", "pllC4_out0", "pllP_out0", "pllC4_out0", "clk_m", NULL, "spdif_in", "i2s1", "i2s2", "i2s3", "i2s4", "i2s5", "pllA_out0", "ext_vimclk"}; static uint32_t clk_enable_reg[] = { CLK_OUT_ENB_L, CLK_OUT_ENB_H, CLK_OUT_ENB_U, CLK_OUT_ENB_V, CLK_OUT_ENB_W, CLK_OUT_ENB_X, CLK_OUT_ENB_Y, }; static uint32_t clk_reset_reg[] = { RST_DEVICES_L, RST_DEVICES_H, RST_DEVICES_U, RST_DEVICES_V, RST_DEVICES_W, RST_DEVICES_X, RST_DEVICES_Y, }; #define L(n) ((0 * 32) + (n)) #define H(n) ((1 * 32) + (n)) #define U(n) ((2 * 32) + (n)) #define V(n) ((3 * 32) + (n)) #define W(n) ((4 * 32) + (n)) #define X(n) ((5 * 32) + (n)) #define Y(n) ((6 * 32) + (n)) /* Clock IDs not yet defined in binding header file. */ #define TEGRA210_CLK_STAT_MON H(5) #define TEGRA210_CLK_IRAMA U(20) #define TEGRA210_CLK_IRAMB U(21) #define TEGRA210_CLK_IRAMC U(22) #define TEGRA210_CLK_IRAMD U(23) #define TEGRA210_CLK_CRAM2 U(24) #define TEGRA210_CLK_M_DOUBLER U(26) #define TEGRA210_CLK_DEVD2_OUT U(29) #define TEGRA210_CLK_DEVD1_OUT U(30) #define TEGRA210_CLK_CPUG V(0) #define TEGRA210_CLK_ATOMICS V(16) #define TEGRA210_CLK_PCIERX0 W(2) #define TEGRA210_CLK_PCIERX1 W(3) #define TEGRA210_CLK_PCIERX2 W(4) #define TEGRA210_CLK_PCIERX3 W(5) #define TEGRA210_CLK_PCIERX4 W(6) #define TEGRA210_CLK_PCIERX5 W(7) #define TEGRA210_CLK_PCIE2_IOBIST W(9) #define TEGRA210_CLK_EMC_IOBIST W(10) #define TEGRA210_CLK_SATA_IOBIST W(12) #define TEGRA210_CLK_MIPI_IOBIST W(13) #define TEGRA210_CLK_EMC_LATENCY W(29) #define TEGRA210_CLK_MC1 W(30) #define TEGRA210_CLK_ETR X(3) #define TEGRA210_CLK_CAM_MCLK X(4) #define TEGRA210_CLK_CAM_MCLK2 X(5) #define TEGRA210_CLK_MC_CAPA X(7) #define TEGRA210_CLK_MC_CBPA X(8) #define TEGRA210_CLK_MC_CPU X(9) #define TEGRA210_CLK_MC_BBC X(10) #define TEGRA210_CLK_EMC_DLL X(14) #define TEGRA210_CLK_UART_FST_MIPI_CAL X(17) #define TEGRA210_CLK_HPLL_ADSP X(26) #define TEGRA210_CLK_PLLP_ADSP X(27) #define TEGRA210_CLK_PLLA_ADSP X(28) #define TEGRA210_CLK_PLLG_REF X(29) #define TEGRA210_CLK_AXIAP Y(4) #define TEGRA210_CLK_MC_CDPA Y(8) #define TEGRA210_CLK_MC_CCPA Y(9) static struct pgate_def pgate_def[] = { /* bank L -> 0-31 */ GATE(ISPB, "ispb", "clk_m", L(3)), GATE(RTC, "rtc", "clk_s", L(4)), GATE(TIMER, "timer", "clk_m", L(5)), GATE(UARTA, "uarta", "pc_uarta" , L(6)), GATE(UARTB, "uartb", "pc_uartb", L(7)), GATE(GPIO, "gpio", "clk_m", L(8)), GATE(SDMMC2, "sdmmc2", "pc_sdmmc2", L(9)), GATE(SPDIF_OUT, "spdif_out", "pc_spdif_out", L(10)), GATE(SPDIF_IN, "spdif_in", "pc_spdif_in", L(10)), GATE(I2S1, "i2s2", "pc_i2s2", L(11)), GATE(I2C1, "i2c1", "pc_i2c1", L(12)), GATE(SDMMC1, "sdmmc1", "pc_sdmmc1", L(14)), GATE(SDMMC4, "sdmmc4", "pc_sdmmc4", L(15)), GATE(PWM, "pwm", "pc_pwm", L(17)), GATE(I2S2, "i2s3", "pc_i2s3", L(18)), GATE(VI, "vi", "pc_vi", L(20)), GATE(USBD, "usbd", "clk_m", L(22)), GATE(ISP, "isp", "pc_isp", L(23)), GATE(DISP2, "disp2", "pc_disp2", L(26)), GATE(DISP1, "disp1", "pc_disp1", L(27)), GATE(HOST1X, "host1x", "pc_host1x", L(28)), GATE(I2S0, "i2s1", "pc_i2s1", L(30)), /* bank H -> 32-63 */ GATE(MC, "mem", "clk_m", H(0)), GATE(AHBDMA, "ahbdma", "clk_m", H(1)), GATE(APBDMA, "apbdma", "clk_m", H(2)), GATE(STAT_MON, "stat_mon", "clk_s", H(5)), GATE(PMC, "pmc", "clk_s", H(6)), GATE(FUSE, "fuse", "clk_m", H(7)), GATE(KFUSE, "kfuse", "clk_m", H(8)), GATE(SBC1, "spi1", "pc_spi1", H(9)), GATE(SBC2, "spi2", "pc_spi2", H(12)), GATE(SBC3, "spi3", "pc_spi3", H(14)), GATE(I2C5, "i2c5", "pc_i2c5", H(15)), GATE(DSIA, "dsia", "pllD_dsi_csi", H(16)), GATE(CSI, "csi", "pllP_out3", H(20)), GATE(I2C2, "i2c2", "pc_i2c2", H(22)), GATE(UARTC, "uartc", "pc_uartc", H(23)), GATE(MIPI_CAL, "mipi_cal", "clk_m", H(24)), GATE(EMC, "emc", "pc_emc", H(25)), GATE(USB2, "usb2", "clk_m", H(26)), GATE(BSEV, "bsev", "clk_m", H(31)), /* bank U -> 64-95 */ GATE(UARTD, "uartd", "pc_uartd", U(1)), GATE(I2C3, "i2c3", "pc_i2c3", U(3)), GATE(SBC4, "spi4", "pc_spi4", U(4)), GATE(SDMMC3, "sdmmc3", "pc_sdmmc3", U(5)), GATE(PCIE, "pcie", "clk_m", U(6)), GATE(AFI, "afi", "clk_m", U(8)), GATE(CSITE, "csite", "pc_csite", U(9)), GATE(SOC_THERM, "soc_therm", "pc_soc_therm", U(14)), GATE(DTV, "dtv", "clk_m", U(15)), GATE(I2CSLOW, "i2c_slow", "pc_i2c_slow", U(17)), GATE(DSIB, "dsib", "pllD_dsi_csi", U(18)), GATE(TSEC, "tsec", "pc_tsec", U(19)), GATE(IRAMA, "irama", "clk_m", U(20)), GATE(IRAMB, "iramb", "clk_m", U(21)), GATE(IRAMC, "iramc", "clk_m", U(22)), GATE(IRAMD, "iramd", "clk_m", U(23)), GATE(CRAM2, "cram2", "clk_m", U(24)), GATE(XUSB_HOST, "xusb_host", "pc_xusb_core_host", U(25)), GATE(M_DOUBLER, "m_doubler", "clk_m", U(26)), GATE(CSUS, "sus_out", "clk_m", U(28)), GATE(DEVD2_OUT, "devd2_out", "clk_m", U(29)), GATE(DEVD1_OUT, "devd1_out", "clk_m", U(30)), GATE(XUSB_DEV, "xusb_core_dev", "pc_xusb_core_dev", U(31)), /* bank V -> 96-127 */ GATE(CPUG, "cpug", "clk_m", V(0)), GATE(MSELECT, "mselect", "pc_mselect", V(3)), GATE(TSENSOR, "tsensor", "pc_tsensor", V(4)), GATE(I2S4, "i2s5", "pc_i2s5", V(5)), GATE(I2S3, "i2s4", "pc_i2s4", V(6)), GATE(I2C4, "i2c4", "pc_i2c4", V(7)), GATE(D_AUDIO, "ahub", "pc_ahub", V(10)), GATE(APB2APE, "apb2ape", "clk_m", V(11)), GATE(HDA2CODEC_2X, "hda2codec_2x", "pc_hda2codec_2x", V(15)), GATE(ATOMICS, "atomics", "clk_m", V(16)), GATE(SPDIF_2X, "spdif_doubler", "clk_m", V(22)), GATE(ACTMON, "actmon", "pc_actmon", V(23)), GATE(EXTERN1, "extperiph1", "pc_extperiph1", V(24)), GATE(EXTERN2, "extperiph2", "pc_extperiph2", V(25)), GATE(EXTERN3, "extperiph3", "pc_extperiph3", V(26)), GATE(SATA_OOB, "sata_oob", "pc_sata_oob", V(27)), GATE(SATA, "sata", "pc_sata", V(28)), GATE(HDA, "hda", "pc_hda", V(29)), /* bank W -> 128-159*/ GATE(HDA2HDMI, "hda2hdmi", "clk_m", W(0)), /* GATE(SATA_COLD, "sata_cold", "clk_m", W(1)),*/ /* Reset only */ GATE(PCIERX0, "pcierx0", "clk_m", W(2)), GATE(PCIERX1, "pcierx1", "clk_m", W(3)), GATE(PCIERX2, "pcierx2", "clk_m", W(4)), GATE(PCIERX3, "pcierx3", "clk_m", W(5)), GATE(PCIERX4, "pcierx4", "clk_m", W(6)), GATE(PCIERX5, "pcierx5", "clk_m", W(7)), GATE(CEC, "cec", "clk_m", W(8)), GATE(PCIE2_IOBIST, "pcie2_iobist", "clk_m", W(9)), GATE(EMC_IOBIST, "emc_iobist", "clk_m", W(10)), GATE(SATA_IOBIST, "sata_iobist", "clk_m", W(12)), GATE(MIPI_IOBIST, "mipi_iobist", "clk_m", W(13)), GATE(XUSB_GATE, "xusb_gate", "clk_m", W(15)), GATE(CILAB, "cilab", "pc_cilab", W(16)), GATE(CILCD, "cilcd", "pc_cilcd", W(17)), GATE(CILE, "cilef", "pc_cilef", W(18)), GATE(DSIALP, "dsia_lp", "pc_dsia_lp", W(19)), GATE(DSIBLP, "dsib_lp", "pc_dsib_lp", W(20)), GATE(ENTROPY, "entropy", "pc_entropy", W(21)), GATE(DFLL_REF, "dvfs_ref", "pc_dvfs_ref", W(27)), GATE(DFLL_SOC, "dvfs_soc", "pc_dvfs_soc", W(27)), GATE(XUSB_SS, "xusb_ss", "pc_xusb_ss", W(28)), GATE(EMC_LATENCY, "emc_latency", "pc_emc_latency", W(29)), GATE(MC1, "mc1", "clk_m", W(30)), /* bank X -> 160-191*/ /*GATE(SPARE, "spare", "clk_m", X(0)), */ GATE(DMIC1, "dmic1", "clk_m", X(1)), GATE(DMIC2, "dmic2", "clk_m", X(2)), GATE(ETR, "etr", "clk_m", X(3)), GATE(CAM_MCLK, "CAM_MCLK", "clk_m", X(4)), GATE(CAM_MCLK2, "CAM_MCLK2", "clk_m", X(5)), GATE(I2C6, "i2c6", "pc_i2c6", X(6)), GATE(MC_CAPA, "mc_capa", "clk_m", X(7)), GATE(MC_CBPA, "mc_cbpa", "clk_m", X(8)), GATE(MC_CPU, "mc_cpu", "clk_m", X(9)), GATE(MC_BBC, "mc_bbc", "clk_m", X(10)), GATE(VIM2_CLK, "vim2_clk", "clk_m", X(11)), GATE(MIPIBIF, "mipibif", "clk_m", X(13)), GATE(EMC_DLL, "emc_dll", "pc_emc_dll", X(14)), GATE(UART_FST_MIPI_CAL, "uart_fst_mipi_cal", "clk_m", X(17)), GATE(VIC03, "vic", "pc_vic", X(18)), GATE(DPAUX, "dpaux", "dpaux_div", X(21)), GATE(SOR0, "sor0", "pc_sor0", X(22)), GATE(SOR1, "sor1", "pc_sor1", X(23)), GATE(GPU, "gpu", "osc_div_clk", X(24)), GATE(DBGAPB, "dbgapb", "clk_m", X(25)), GATE(HPLL_ADSP, "hpll_adsp", "clk_m", X(26)), GATE(PLLP_ADSP, "pllp_adsp", "clk_m", X(27)), GATE(PLLA_ADSP, "plla_adsp", "clk_m", X(28)), GATE(PLLG_REF, "pllg_ref", "clk_m", X(29)), /* bank Y -> 192-224*/ /* GATE(SPARE1, "spare1", "clk_m", Y(0)), */ GATE(SDMMC_LEGACY, "sdmmc_legacy_tm", "pc_sdmmc_legacy_tm", Y(1)), GATE(NVDEC, "nvdec", "pc_nvdec", Y(2)), GATE(NVJPG, "nvjpg", "clk_m", Y(3)), GATE(AXIAP, "axiap", "clk_m", Y(4)), GATE(DMIC3, "dmic3", "clk_m", Y(5)), GATE(APE, "ape", "clk_m", Y(6)), GATE(ADSP, "adsp", "clk_m", Y(7)), GATE(MC_CDPA, "mc_cdpa", "clk_m", Y(8)), GATE(MC_CCPA, "mc_ccpa", "clk_m", Y(9)), GATE(MAUD, "mc_maud", "clk_m", Y(10)), GATE(TSECB, "tsecb", "clk_m", Y(14)), GATE(DPAUX1, "dpaux1", "dpaux1_div", Y(15)), GATE(VI_I2C, "vi_i2c", "clk_m", Y(16)), GATE(HSIC_TRK, "hsic_trk", "clk_m", Y(17)), GATE(USB2_TRK, "usb2_trk", "clk_m", Y(18)), GATE(QSPI, "qspi", "clk_m", Y(19)), GATE(UARTAPE, "uarape", "clk_m", Y(20)), GATE(ADSP_NEON, "adspneon", "clk_m", Y(26)), GATE(NVENC, "nvenc", "clk_m", Y(27)), GATE(IQC2, "iqc2", "clk_m", Y(28)), GATE(IQC1, "iqc1", "clk_m", Y(29)), GATE(SOR_SAFE, "sor_safe", "sor_safe_div", Y(30)), GATE(PLL_P_OUT_CPU, "pllp_out_cpu", "clk_m", Y(31)), }; /* Peripheral clock clock */ #define DCF_HAVE_MUX 0x0100 /* Block with multipexor */ #define DCF_HAVE_ENA 0x0200 /* Block with enable bit */ #define DCF_HAVE_DIV 0x0400 /* Block with divider */ /* Mark block with additional bits / functionality. */ #define DCF_IS_MASK 0x00FF #define DCF_IS_UART 0x0001 #define DCF_IS_VI 0x0002 #define DCF_IS_HOST1X 0x0003 #define DCF_IS_XUSB_SS 0x0004 #define DCF_IS_EMC_DLL 0x0005 #define DCF_IS_SATA 0x0006 #define DCF_IS_VIC 0x0007 #define DCF_IS_AHUB 0x0008 #define DCF_IS_SOR0 0x0009 #define DCF_IS_EMC 0x000A #define DCF_IS_QSPI 0x000B #define DCF_IS_EMC_SAFE 0x000C /* Basic pheripheral clock */ #define PER_CLK(_id, cn, pl, r, diw, fiw, f) \ { \ .clkdef.id = _id, \ .clkdef.name = cn, \ .clkdef.parent_names = pl, \ .clkdef.parent_cnt = nitems(pl), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .base_reg = r, \ .div_width = diw, \ .div_f_width = fiw, \ .flags = f, \ } /* Mux with fractional 8.1 divider. */ #define CLK_8_1(id, cn, pl, r, f) \ PER_CLK(id, cn, pl, r, 8, 1, (f) | DCF_HAVE_MUX | DCF_HAVE_DIV) /* Mux with integer 8bits divider. */ #define CLK_8_0(id, cn, pl, r, f) \ PER_CLK(id, cn, pl, r, 8, 0, (f) | DCF_HAVE_MUX | DCF_HAVE_DIV) /* Mux with fractional 16.1 divider. */ #define CLK16_1(id, cn, pl, r, f) \ PER_CLK(id, cn, pl, r, 16, 1, (f) | DCF_HAVE_MUX | DCF_HAVE_DIV) /* Mux with integer 16bits divider. */ #define CLK16_0(id, cn, pl, r, f) \ PER_CLK(id, cn, pl, r, 16, 0, (f) | DCF_HAVE_MUX | DCF_HAVE_DIV) /* Mux wihout divider. */ #define CLK_0_0(id, cn, pl, r, f) \ PER_CLK(id, cn, pl, r, 0, 0, (f) | DCF_HAVE_MUX) static struct periph_def periph_def[] = { CLK_8_1(0, "pc_i2s2", mux_a_N_audio1_N_p_N_clkm, CLK_SOURCE_I2S2, DCF_HAVE_ENA), CLK_8_1(0, "pc_i2s3", mux_a_N_audio2_N_p_N_clkm, CLK_SOURCE_I2S3, DCF_HAVE_ENA), CLK_8_1(0, "pc_spdif_out", mux_a_N_audio_N_p_N_clkm, CLK_SOURCE_SPDIF_OUT, 0), CLK_8_1(0, "pc_spdif_in", mux_p_c2_c_c4_clkm_c4o1_c4o2, CLK_SOURCE_SPDIF_IN, 0), CLK_8_1(0, "pc_pwm", mux_p_c2_c_c4_clks_c4o1_clkm_c4o2, CLK_SOURCE_PWM, 0), CLK_8_1(0, "pc_spi2", mux_p_c2_c_c4_N_c4o1_clkm_c4o2, CLK_SOURCE_SPI2, 0), CLK_8_1(0, "pc_spi3", mux_p_c2_c_c4_N_c4o1_clkm_c4o2, CLK_SOURCE_SPI3, 0), CLK16_0(0, "pc_i2c1", mux_p_c2_c_c4_N_c4o1_clkm_c4o2, CLK_SOURCE_I2C1, 0), CLK16_0(0, "pc_i2c5", mux_p_c2_c_c4_N_c4o1_clkm_c4o2, CLK_SOURCE_I2C5, 0), CLK_8_1(0, "pc_spi1", mux_p_c2_c_c4_N_c4o1_clkm_c4o2, CLK_SOURCE_SPI1, 0), CLK_0_0(0, "pc_disp1", mux_p_N_d_N_N_d2_clkm, CLK_SOURCE_DISP1, 0), CLK_0_0(0, "pc_disp2", mux_p_N_d_N_N_d2_clkm, CLK_SOURCE_DISP2, 0), CLK_8_1(0, "pc_isp", mux_N_c_p_a1_c2_c3_clkm_c4, CLK_SOURCE_ISP, 0), CLK_8_1(0, "pc_vi", mux_N_c2_c_c3_p_clkm_a1_c4, CLK_SOURCE_VI, DCF_IS_VI), CLK_8_1(0, "pc_sdmmc1", mux_p_N_N_c4o2_c4o1_N_clkm_c4, CLK_SOURCE_SDMMC1, 0), CLK_8_1(0, "pc_sdmmc2", mux_p_N_N_c4o2_c4o1_N_clkm_c4, CLK_SOURCE_SDMMC2, 0), CLK_8_1(0, "pc_sdmmc4", mux_p_N_N_c4o2_c4o1_N_clkm_c4, CLK_SOURCE_SDMMC4, 0), CLK16_1(0, "pc_uarta", mux_p_c2_c_c4_c4o1_clkm_c4o2, CLK_SOURCE_UARTA, DCF_IS_UART), CLK16_1(0, "pc_uartb", mux_p_c2_c_c4_N_c4o1_clkm_c4o2, CLK_SOURCE_UARTB, DCF_IS_UART), CLK_8_1(0, "pc_host1x", mux_c4o1_c2_c_c4_p_clkm_a_c4, CLK_SOURCE_HOST1X, DCF_IS_HOST1X), CLK16_0(0, "pc_i2c2", mux_p_c2_c_c4_N_c4o1_clkm_c4o2, CLK_SOURCE_I2C2, 0), CLK_8_1(0, "pc_emc", mux_m_c_p_clkm_mud_mbud_mb_pud, CLK_SOURCE_EMC, DCF_IS_EMC), CLK16_1(0, "pc_uartc", mux_p_c2_c_c4_c4o1_clkm_c4o2, CLK_SOURCE_UARTC, DCF_IS_UART), CLK_8_1(0, "pc_vi_sensor", mux_N_c2_c_c3_p_N_a, CLK_SOURCE_VI_SENSOR, 0), CLK_8_1(0, "pc_spi4", mux_p_c2_c_c4_N_c4o1_clkm_c4o2, CLK_SOURCE_SPI4, 0), CLK16_0(0, "pc_i2c3", mux_p_c2_c_c4_N_c4o1_clkm_c4o2, CLK_SOURCE_I2C3, 0), CLK_8_1(0, "pc_sdmmc3", mux_p_c2_c_c3_m_e_clkm, CLK_SOURCE_SDMMC3, 0), CLK16_1(0, "pc_uartd", mux_p_c2_c_c4_c4o1_clkm_c4o2, CLK_SOURCE_UARTD, DCF_IS_UART), CLK_8_1(0, "pc_csite", mux_p_c2_refe1_c3_m_a1_clkm_C4, CLK_SOURCE_CSITE, 0), CLK_8_1(0, "pc_i2s1", mux_a_N_audio0_N_p_N_clkm, CLK_SOURCE_I2S1, 0), /* DTV xxx */ CLK_8_1(0, "pc_tsec", mux_p_c2_c_c3_N_a1_clkm_c4, CLK_SOURCE_TSEC, 0), /* SPARE2 */ CLK_8_1(0, "pc_mselect", mux_p_c2_c_c4o2_c4o1_clks_clkm_c4, CLK_SOURCE_MSELECT, 0), CLK_8_1(0, "pc_tsensor", mux_p_c2_c_c4_clkm_c4o1_clks_c4o2, CLK_SOURCE_TSENSOR, 0), CLK_8_1(0, "pc_i2s4", mux_a_N_audio3_N_p_N_clkm, CLK_SOURCE_I2S3, DCF_HAVE_ENA), CLK_8_1(0, "pc_i2s5", mux_a_N_audio4_N_p_N_clkm, CLK_SOURCE_I2S4, DCF_HAVE_ENA), CLK16_0(0, "pc_i2c4", mux_p_c2_c_c4_N_c4o1_clkm_c4o2, CLK_SOURCE_I2C4, 0), CLK_8_1(0, "pc_ahub", mux_sep_audio, CLK_SOURCE_AHUB, DCF_IS_AHUB), CLK_8_1(0, "pc_hda2codec_2x", mux_p_c2_c_c4_a_c4o1_clkm_c4o2, CLK_SOURCE_HDA2CODEC_2X, 0), CLK_8_1(0, "pc_actmon", mux_p_c2_c_c4_clks_c4o1_clkm_c4o2, CLK_SOURCE_ACTMON, 0), CLK_8_1(0, "pc_extperiph1", mux_a_clks_p_clkm_e, CLK_SOURCE_EXTPERIPH1, 0), CLK_8_1(0, "pc_extperiph2", mux_a_clks_p_clkm_e, CLK_SOURCE_EXTPERIPH2, 0), CLK_8_1(0, "pc_extperiph3", mux_a_clks_p_clkm_e, CLK_SOURCE_EXTPERIPH3, 0), CLK_8_1(0, "pc_i2c_slow", mux_p_c2_c_c4_clks_c4o1_clkm_c4o2, CLK_SOURCE_I2C_SLOW, 0), /* SYS */ CLK_8_1(0, "pc_ispb", mux_N_N_c_N_p_N_a, CLK_SOURCE_ISPB, 0), CLK_8_1(0, "pc_sor1", mux_p_N_d_N_N_d2_clkm, CLK_SOURCE_SOR1, DCF_IS_SOR0), CLK_8_1(0, "pc_sor0", mux_p_m_d_a_c_d2_clkm, CLK_SOURCE_SOR0, DCF_IS_SOR0), CLK_8_1(0, "pc_sata_oob", mux_p_c4_c_c4o1_N_c4o2_clkm, CLK_SOURCE_SATA_OOB, 0), CLK_8_1(0, "pc_sata", mux_p_N_c_c4_N_c4o1_clkm_c4o2, CLK_SOURCE_SATA, DCF_IS_SATA), CLK_8_1(0, "pc_hda", mux_p_c2_c_c4_N_c4o1_clkm_c4o2, CLK_SOURCE_HDA, 0), CLK_8_1(TEGRA210_CLK_XUSB_HOST_SRC, "pc_xusb_core_host", mux_clkm_p_N_N_N_refre, CLK_SOURCE_XUSB_CORE_HOST, 0), CLK_8_1(TEGRA210_CLK_XUSB_FALCON_SRC, "pc_xusb_falcon", mux_clkm_p_N_N_N_refre, CLK_SOURCE_XUSB_FALCON, 0), CLK_8_1(TEGRA210_CLK_XUSB_FS_SRC, "pc_xusb_fs", mux_clkm_N_u48_N_p_N_u480, CLK_SOURCE_XUSB_FS, 0), CLK_8_1(TEGRA210_CLK_XUSB_DEV_SRC, "pc_xusb_core_dev", mux_clkm_p_N_N_N_refre, CLK_SOURCE_XUSB_CORE_DEV, 0), CLK_8_1(TEGRA210_CLK_XUSB_SS_SRC, "pc_xusb_ss", mux_clkm_refe_clks_u480, CLK_SOURCE_XUSB_SS, DCF_IS_XUSB_SS), CLK_8_1(0, "pc_cilab", mux_p_N_c_c4_c4o1_c4o2_clkm, CLK_SOURCE_CILAB, 0), CLK_8_1(0, "pc_cilcd", mux_p_N_c_c4_c4o1_c4o2_clkm, CLK_SOURCE_CILCD, 0), CLK_8_1(0, "pc_cilef", mux_p_N_c_c4_c4o1_c4o2_clkm, CLK_SOURCE_CILEF, 0), CLK_8_1(0, "pc_dsia_lp", mux_p_N_c_c4_c4o1_c4o2_clkm, CLK_SOURCE_DSIA_LP, 0), CLK_8_1(0, "pc_dsib_lp", mux_p_N_c_c4_c4o1_c4o2_clkm, CLK_SOURCE_DSIB_LP, 0), CLK_8_1(0, "pc_entropy", mux_p_N_clkm_N_clks_N_E, CLK_SOURCE_ENTROPY, 0), CLK_8_1(0, "pc_dvfs_ref", mux_p_c2_c_c4_N_c4o1_clkm_c4o2, CLK_SOURCE_DVFS_REF, DCF_HAVE_ENA), CLK_8_1(0, "pc_dvfs_soc", mux_p_c2_c_c4_N_c4o1_clkm_c4o2, CLK_SOURCE_DVFS_SOC, DCF_HAVE_ENA), CLK_8_1(0, "pc_emc_latency", mux_N_c_p_clkm_N_c4_c4o1_c4o2, CLK_SOURCE_EMC_LATENCY, 0), CLK_8_1(0, "pc_soc_therm", mux_N_c_p_clkm_N_c4_c4o1_c4o1, CLK_SOURCE_SOC_THERM, 0), CLK_8_1(0, "pc_dmic1", mux_a_audiod1_p_clkm, CLK_SOURCE_DMIC1, 0), CLK_8_1(0, "pc_dmic2", mux_a_audiod2_p_clkm, CLK_SOURCE_DMIC2, 0), CLK_8_1(0, "pc_vi_sensor2", mux_N_c2_c_c3_p_N_a, CLK_SOURCE_VI_SENSOR2, 0), CLK16_0(0, "pc_i2c6", mux_p_c2_c_c4_N_c4o1_clkm_c4o2, CLK_SOURCE_I2C6, 0), /* MIPIBIF */ CLK_8_1(0, "pc_emc_dll", mux_m_c_p_clkm_mud_mbud_mb_pud, CLK_SOURCE_EMC_DLL, DCF_IS_EMC_DLL), CLK_8_1(0, "pc_uart_fst_mipi_cal", mux_p_c_c2_N_c2_N_clkm, CLK_SOURCE_UART_FST_MIPI_CAL, 0), CLK_8_1(0, "pc_vic", mux_N_c_p_a1_c2_c3_clkm, CLK_SOURCE_VIC, DCF_IS_VIC), CLK_8_1(0, "pc_sdmmc_legacy_tm", mux_po3_c_c2_clkm_p_c4_c4o1_c4o2, CLK_SOURCE_SDMMC_LEGACY_TM, 0), CLK_8_1(0, "pc_nvdec", mux_N_c2_c_c3_p_N_a1_clkm, CLK_SOURCE_NVDEC, 0), CLK_8_1(0, "pc_nvjpg", mux_N_c2_c_c3_p_N_a1_clkm, CLK_SOURCE_NVJPG, 0), CLK_8_1(0, "pc_nvenc", mux_N_c2_c_c3_p_N_a1_clkm, CLK_SOURCE_NVENC, 0), CLK_8_1(0, "pc_dmic3", mux_a_audiod3_p_clkm, CLK_SOURCE_DMIC3, 0), CLK_8_1(0, "pc_ape", mux_a_c4_c_c4o1_p_N_clkm_c4o2, CLK_SOURCE_APE, 0), CLK_8_1(0, "pc_qspi", mux_p_co1_c_N_c4o2_c4o1_clkm_c4, CLK_SOURCE_QSPI, DCF_IS_QSPI), CLK_8_1(0, "pc_vi_i2c", mux_p_c2_c_c3_N_N_clkm, CLK_SOURCE_VI_I2C, 0), /* USB2_HSIC_TRK */ CLK_8_0(0, "pc_maud", mux_p_po3_clkm_clks_a, CLK_SOURCE_MAUD, 0), CLK_8_1(0, "pc_tsecb", mux_p_c2_c_c3_N_a1_clkm_c4, CLK_SOURCE_TSECB, 0), CLK_8_1(0, "pc_uartape", mux_p_c2_c_c3_N_N_clkm, CLK_SOURCE_UARTAPE, 0), CLK_8_1(0, "pc_dbgapb", mux_N_N_p_N_N_N_clkm, CLK_SOURCE_DBGAPB, 0), CLK_8_1(0, "pc_emc_safe", mux_m_c_p_clkm_mud_mbud_mb_pud, CLK_SOURCE_EMC_SAFE, DCF_IS_EMC_SAFE), }; static int periph_init(struct clknode *clk, device_t dev); static int periph_recalc(struct clknode *clk, uint64_t *freq); static int periph_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout, int flags, int *stop); static int periph_set_mux(struct clknode *clk, int idx); struct periph_sc { device_t clkdev; uint32_t base_reg; uint32_t div_shift; uint32_t div_width; uint32_t div_mask; uint32_t div_f_width; uint32_t div_f_mask; uint32_t flags; uint32_t divider; int mux; }; static clknode_method_t periph_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, periph_init), CLKNODEMETHOD(clknode_recalc_freq, periph_recalc), CLKNODEMETHOD(clknode_set_freq, periph_set_freq), CLKNODEMETHOD(clknode_set_mux, periph_set_mux), CLKNODEMETHOD_END }; DEFINE_CLASS_1(tegra210_periph, tegra210_periph_class, periph_methods, sizeof(struct periph_sc), clknode_class); static int periph_init(struct clknode *clk, device_t dev) { struct periph_sc *sc; uint32_t reg; sc = clknode_get_softc(clk); DEVICE_LOCK(sc); if (sc->flags & DCF_HAVE_ENA) MD4(sc, sc->base_reg, PERLCK_ENA_MASK, PERLCK_ENA_MASK); RD4(sc, sc->base_reg, ®); DEVICE_UNLOCK(sc); /* Stnadard mux. */ if (sc->flags & DCF_HAVE_MUX) sc->mux = (reg >> PERLCK_MUX_SHIFT) & PERLCK_MUX_MASK; else sc->mux = 0; if (sc->flags & DCF_HAVE_DIV) sc->divider = (reg & sc->div_mask) + 2; else sc->divider = 1; if ((sc->flags & DCF_IS_MASK) == DCF_IS_UART) { if (!(reg & PERLCK_UDIV_DIS)) sc->divider = 2; } /* AUDIO MUX */ if ((sc->flags & DCF_IS_MASK) == DCF_IS_AHUB) { if (!(reg & PERLCK_AMUX_DIS) && (sc->mux == 7)) { sc->mux = 8 + ((reg >> PERLCK_AMUX_SHIFT) & PERLCK_MUX_MASK); } } clknode_init_parent_idx(clk, sc->mux); return(0); } static int periph_set_mux(struct clknode *clk, int idx) { struct periph_sc *sc; uint32_t reg; sc = clknode_get_softc(clk); if (!(sc->flags & DCF_HAVE_MUX)) return (ENXIO); sc->mux = idx; DEVICE_LOCK(sc); RD4(sc, sc->base_reg, ®); reg &= ~(PERLCK_MUX_MASK << PERLCK_MUX_SHIFT); if ((sc->flags & DCF_IS_MASK) == DCF_IS_AHUB) { reg &= ~PERLCK_AMUX_DIS; reg &= ~(PERLCK_MUX_MASK << PERLCK_AMUX_SHIFT); if (idx <= 7) { reg |= idx << PERLCK_MUX_SHIFT; } else { reg |= 7 << PERLCK_MUX_SHIFT; reg |= (idx - 8) << PERLCK_AMUX_SHIFT; } } else { reg |= idx << PERLCK_MUX_SHIFT; } WR4(sc, sc->base_reg, reg); DEVICE_UNLOCK(sc); return(0); } static int periph_recalc(struct clknode *clk, uint64_t *freq) { struct periph_sc *sc; uint32_t reg; sc = clknode_get_softc(clk); if (sc->flags & DCF_HAVE_DIV) { DEVICE_LOCK(sc); RD4(sc, sc->base_reg, ®); DEVICE_UNLOCK(sc); *freq = (*freq << sc->div_f_width) / sc->divider; } return (0); } static int periph_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout, int flags, int *stop) { struct periph_sc *sc; uint64_t tmp, divider; sc = clknode_get_softc(clk); if (!(sc->flags & DCF_HAVE_DIV)) { *stop = 0; return (0); } tmp = fin << sc->div_f_width; divider = tmp / *fout; if ((tmp % *fout) != 0) divider++; if (divider < (1 << sc->div_f_width)) divider = 1 << (sc->div_f_width - 1); if (flags & CLK_SET_DRYRUN) { if (((flags & (CLK_SET_ROUND_UP | CLK_SET_ROUND_DOWN)) == 0) && (*fout != (tmp / divider))) return (ERANGE); } else { DEVICE_LOCK(sc); MD4(sc, sc->base_reg, sc->div_mask, (divider - (1 << sc->div_f_width))); DEVICE_UNLOCK(sc); sc->divider = divider; } *fout = tmp / divider; *stop = 1; return (0); } static int periph_register(struct clkdom *clkdom, struct periph_def *clkdef) { struct clknode *clk; struct periph_sc *sc; clk = clknode_create(clkdom, &tegra210_periph_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->clkdev = clknode_get_device(clk); sc->base_reg = clkdef->base_reg; sc->div_width = clkdef->div_width; sc->div_mask = (1 <div_width) - 1; sc->div_f_width = clkdef->div_f_width; sc->div_f_mask = (1 <div_f_width) - 1; sc->flags = clkdef->flags; clknode_register(clkdom, clk); return (0); } /* -------------------------------------------------------------------------- */ static int pgate_init(struct clknode *clk, device_t dev); static int pgate_set_gate(struct clknode *clk, bool enable); static int pgate_get_gate(struct clknode *clk, bool *enabled); struct pgate_sc { device_t clkdev; uint32_t idx; uint32_t flags; uint32_t enabled; }; static clknode_method_t pgate_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, pgate_init), CLKNODEMETHOD(clknode_set_gate, pgate_set_gate), CLKNODEMETHOD(clknode_get_gate, pgate_get_gate), CLKNODEMETHOD_END }; DEFINE_CLASS_1(tegra210_pgate, tegra210_pgate_class, pgate_methods, sizeof(struct pgate_sc), clknode_class); static uint32_t get_enable_reg(int idx) { KASSERT(idx / 32 < nitems(clk_enable_reg), ("Invalid clock index for enable: %d", idx)); return (clk_enable_reg[idx / 32]); } static uint32_t get_reset_reg(int idx) { KASSERT(idx / 32 < nitems(clk_reset_reg), ("Invalid clock index for reset: %d", idx)); return (clk_reset_reg[idx / 32]); } static int pgate_init(struct clknode *clk, device_t dev) { struct pgate_sc *sc; uint32_t ena_reg, rst_reg, mask; sc = clknode_get_softc(clk); mask = 1 << (sc->idx % 32); DEVICE_LOCK(sc); RD4(sc, get_enable_reg(sc->idx), &ena_reg); RD4(sc, get_reset_reg(sc->idx), &rst_reg); DEVICE_UNLOCK(sc); sc->enabled = ena_reg & mask ? 1 : 0; clknode_init_parent_idx(clk, 0); return(0); } static int pgate_set_gate(struct clknode *clk, bool enable) { struct pgate_sc *sc; uint32_t reg, mask, base_reg; sc = clknode_get_softc(clk); mask = 1 << (sc->idx % 32); sc->enabled = enable; base_reg = get_enable_reg(sc->idx); DEVICE_LOCK(sc); MD4(sc, base_reg, mask, enable ? mask : 0); RD4(sc, base_reg, ®); DEVICE_UNLOCK(sc); DELAY(2); return(0); } static int pgate_get_gate(struct clknode *clk, bool *enabled) { struct pgate_sc *sc; uint32_t reg, mask, base_reg; sc = clknode_get_softc(clk); mask = 1 << (sc->idx % 32); base_reg = get_enable_reg(sc->idx); DEVICE_LOCK(sc); RD4(sc, base_reg, ®); DEVICE_UNLOCK(sc); *enabled = reg & mask ? true: false; return(0); } int tegra210_hwreset_by_idx(struct tegra210_car_softc *sc, intptr_t idx, bool reset) { uint32_t reg, mask, reset_reg; CLKDEV_DEVICE_LOCK(sc->dev); if (idx == TEGRA210_RST_DFLL_DVCO) { CLKDEV_MODIFY_4(sc->dev, DFLL_BASE, DFLL_BASE_DVFS_DFLL_RESET, reset ? DFLL_BASE_DVFS_DFLL_RESET : 0); CLKDEV_READ_4(sc->dev, DFLL_BASE, ®); } if (idx == TEGRA210_RST_ADSP) { reset_reg = (reset) ? RST_DEV_Y_SET: RST_DEV_Y_CLR; mask = (0x1F << 22) |(1 << 7); CLKDEV_WRITE_4(sc->dev, reset_reg, mask); CLKDEV_READ_4(sc->dev, reset_reg, ®); } else { mask = 1 << (idx % 32); reset_reg = get_reset_reg(idx); CLKDEV_MODIFY_4(sc->dev, reset_reg, mask, reset ? mask : 0); CLKDEV_READ_4(sc->dev, reset_reg, ®); } CLKDEV_DEVICE_UNLOCK(sc->dev); return(0); } static int pgate_register(struct clkdom *clkdom, struct pgate_def *clkdef) { struct clknode *clk; struct pgate_sc *sc; clk = clknode_create(clkdom, &tegra210_pgate_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->clkdev = clknode_get_device(clk); sc->idx = clkdef->idx; sc->flags = clkdef->flags; clknode_register(clkdom, clk); return (0); } void tegra210_periph_clock(struct tegra210_car_softc *sc) { int i, rv; for (i = 0; i < nitems(periph_def); i++) { rv = periph_register(sc->clkdom, &periph_def[i]); if (rv != 0) panic("tegra210_periph_register failed"); } for (i = 0; i < nitems(pgate_def); i++) { rv = pgate_register(sc->clkdom, &pgate_def[i]); if (rv != 0) panic("tegra210_pgate_register failed"); } } diff --git a/sys/arm64/nvidia/tegra210/tegra210_clk_pll.c b/sys/arm64/nvidia/tegra210/tegra210_clk_pll.c index 8c1a2045d990..41aa4dc0fdae 100644 --- a/sys/arm64/nvidia/tegra210/tegra210_clk_pll.c +++ b/sys/arm64/nvidia/tegra210/tegra210_clk_pll.c @@ -1,1503 +1,1503 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2020 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include #include "tegra210_car.h" #if 0 #define dprintf(...) printf(__VA_ARGS__) #else #define dprintf(...) #endif /* All PLLs. */ enum pll_type { PLL_M, PLL_MB, PLL_X, PLL_C, PLL_C2, PLL_C3, PLL_C4, PLL_P, PLL_A, PLL_A1, PLL_U, PLL_D, PLL_D2, PLL_DP, PLL_E, PLL_REFE}; /* Flags for PLLs */ #define PLL_FLAG_PDIV_POWER2 0x01 /* P Divider is 2^n */ #define PLL_FLAG_VCO_OUT 0x02 /* Output VCO directly */ #define PLL_FLAG_HAVE_SDM 0x04 /* Have SDM implemented */ #define PLL_FLAG_HAVE_SDA 0x04 /* Have SDA implemented */ /* Common base register bits. */ #define PLL_BASE_BYPASS (1U << 31) #define PLL_BASE_ENABLE (1 << 30) #define PLL_BASE_REFDISABLE (1 << 29) #define PLL_BASE_LOCK (1 << 27) #define PLLREFE_MISC_LOCK (1 << 27) #define PLL_MISC_LOCK_ENABLE (1 << 18) #define PLLM_LOCK_ENABLE (1 << 4) #define PLLMB_LOCK_ENABLE (1 << 16) #define PLLC_LOCK_ENABLE (1 << 24) #define PLLC4_LOCK_ENABLE (1 << 30) #define PLLA_LOCK_ENABLE (1 << 28) #define PLLD2_LOCK_ENABLE (1 << 30) #define PLLU_LOCK_ENABLE (1 << 29) #define PLLREFE_LOCK_ENABLE (1 << 30) #define PLLPD_LOCK_ENABLE (1 << 30) #define PLLE_LOCK_ENABLE (1 << 9) #define PLLM_IDDQ_BIT 5 #define PLLMB_IDDQ_BIT 17 #define PLLC_IDDQ_BIT 27 #define PLLC4_IDDQ_BIT 18 #define PLLP_IDDQ_BIT 3 #define PLLA_IDDQ_BIT 25 #define PLLA1_IDDQ_BIT 27 #define PLLU_IDDQ_BIT 31 #define PLLD_IDDQ_BIT 20 #define PLLD2_IDDQ_BIT 18 #define PLLX_IDDQ_BIT 3 #define PLLREFE_IDDQ_BIT 24 #define PLLDP_IDDQ_BIT 18 #define PLL_LOCK_TIMEOUT 5000 /* Post divider <-> register value mapping. */ struct pdiv_table { uint32_t divider; /* real divider */ uint32_t value; /* register value */ }; /* Bits definition of M, N and P fields. */ struct mnp_bits { uint32_t m_width; uint32_t n_width; uint32_t p_width; uint32_t m_shift; uint32_t n_shift; uint32_t p_shift; }; struct clk_pll_def { struct clknode_init_def clkdef; enum pll_type type; uint32_t base_reg; uint32_t misc_reg; uint32_t lock_enable; uint32_t iddq_reg; uint32_t iddq_mask; uint32_t flags; struct pdiv_table *pdiv_table; struct mnp_bits mnp_bits; }; #define PLIST(x) static const char *x[] #define PLL(_id, cname, pname) \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){pname}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS /* multiplexer for pll sources. */ #define MUX(_id, cname, plists, o, s, w) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = plists, \ .clkdef.parent_cnt = nitems(plists), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = o, \ .shift = s, \ .width = w, \ } /* Fractional divider (7.1) for PLL branch. */ #define DIV7_1(_id, cname, plist, o, s) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){plist}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = o, \ .i_shift = (s) + 1, \ .i_width = 7, \ .f_shift = s, \ .f_width = 1, \ } /* P divider (2^n). for PLL branch. */ #define DIV5_E(_id, cname, plist, o, s) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){plist}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = o, \ .i_shift = s, \ .i_width = 5, \ } /* P divider (2^n). for PLL branch. */ #define DIV_TB(_id, cname, plist, o, s, n, table) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){plist}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .div_flags = CLK_DIV_WITH_TABLE | CLK_DIV_ZERO_BASED, \ .offset = o, \ .i_shift = s, \ .i_width = n, \ .div_table = table, \ } /* Standard gate. */ #define GATE(_id, cname, plist, o, s) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){plist}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = o, \ .shift = s, \ .mask = 1, \ .on_value = 1, \ .off_value = 0, \ } /* Gate for PLL branch. */ #define GATE_PLL(_id, cname, plist, o, s) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){plist}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = o, \ .shift = s, \ .mask = 3, \ .on_value = 3, \ .off_value = 0, \ } /* Fixed rate multipier/divider. */ #define FACT(_id, cname, pname, _mult, _div) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = (const char *[]){pname}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .mult = _mult, \ .div = _div, \ } static struct pdiv_table qlin_map[] = { { 1, 0}, { 2, 1}, { 3, 2}, { 4, 3}, { 5, 4}, { 6, 5}, { 8, 6}, { 9, 7}, {10, 8}, {12, 9}, {15, 10}, {16, 11}, {18, 12}, {20, 13}, {24, 14}, {30, 15}, {32, 16}, { 0, 0}, }; static struct clk_pll_def pll_clks[] = { /* PLLM: 880 MHz Clock source for EMC 2x clock */ { PLL(TEGRA210_CLK_PLL_M, "pllM_out0", "osc"), .type = PLL_M, .base_reg = PLLM_BASE, .misc_reg = PLLM_MISC2, .lock_enable = PLLM_LOCK_ENABLE, .iddq_reg = PLLM_MISC2, .iddq_mask = 1 << PLLM_IDDQ_BIT, .pdiv_table = qlin_map, .mnp_bits = {8, 8, 5, 0, 8, 20}, }, /* PLLMB: 880 MHz Clock source for EMC 2x clock */ { PLL(TEGRA210_CLK_PLL_M, "pllMB_out0", "osc"), .type = PLL_MB, .base_reg = PLLMB_BASE, .misc_reg = PLLMB_MISC1, .lock_enable = PLLMB_LOCK_ENABLE, .iddq_reg = PLLMB_MISC1, .iddq_mask = 1 << PLLMB_IDDQ_BIT, .pdiv_table = qlin_map, .mnp_bits = {8, 8, 5, 0, 8, 20}, }, /* PLLX: 1GHz Clock source for the fast CPU cluster and the shadow CPU */ { PLL(TEGRA210_CLK_PLL_X, "pllX_out0", "osc_div_clk"), .type = PLL_X, .base_reg = PLLX_BASE, .misc_reg = PLLX_MISC, .lock_enable = PLL_MISC_LOCK_ENABLE, .iddq_reg = PLLX_MISC_3, .iddq_mask = 1 << PLLX_IDDQ_BIT, .pdiv_table = qlin_map, .mnp_bits = {8, 8, 5, 0, 8, 20}, }, /* PLLC: 510 MHz Clock source for camera use */ { PLL(TEGRA210_CLK_PLL_C, "pllC_out0", "osc_div_clk"), .type = PLL_C, .base_reg = PLLC_BASE, .misc_reg = PLLC_MISC_0, .iddq_reg = PLLC_MISC_1, .iddq_mask = 1 << PLLC_IDDQ_BIT, .pdiv_table = qlin_map, .mnp_bits = {8, 8, 5, 0, 10, 20}, }, /* PLLC2: 510 MHz Clock source for SE, VIC, TSECB, NVJPG scaling */ { PLL(TEGRA210_CLK_PLL_C2, "pllC2_out0", "osc_div_clk"), .type = PLL_C2, .base_reg = PLLC2_BASE, .misc_reg = PLLC2_MISC_0, .iddq_reg = PLLC2_MISC_1, .iddq_mask = 1 << PLLC_IDDQ_BIT, .pdiv_table = qlin_map, .mnp_bits = {8, 8, 5, 0, 10, 20}, }, /* PLLC3: 510 MHz Clock source for NVENC, NVDEC scaling */ { PLL(TEGRA210_CLK_PLL_C3, "pllC3_out0", "osc_div_clk"), .type = PLL_C3, .base_reg = PLLC3_BASE, .misc_reg = PLLC3_MISC_0, .lock_enable = PLL_MISC_LOCK_ENABLE, .iddq_reg = PLLC3_MISC_1, .iddq_mask = 1 << PLLC_IDDQ_BIT, .mnp_bits = {8, 8, 5, 0, 10, 20}, }, /* PLLC4: 600 MHz Clock source for SD/eMMC ans system busses */ { PLL(TEGRA210_CLK_PLL_C4, "pllC4", "pllC4_src"), .type = PLL_C4, .flags = PLL_FLAG_VCO_OUT, .base_reg = PLLC4_BASE, .misc_reg = PLLC4_MISC, .lock_enable = PLLC4_LOCK_ENABLE, .iddq_reg = PLLC4_BASE, .iddq_mask = 1 << PLLC4_IDDQ_BIT, .pdiv_table = qlin_map, .mnp_bits = {8, 8, 5, 0, 8, 19}, }, /* PLLP: 408 MHz Clock source for most peripherals */ { /* * VCO is directly exposed as pllP_out0, P div is used for * pllP_out2 */ PLL(TEGRA210_CLK_PLL_P, "pllP_out0", "osc_div_clk"), .type = PLL_P, .flags = PLL_FLAG_VCO_OUT, .base_reg = PLLP_BASE, .misc_reg = PLLP_MISC, .lock_enable = PLL_MISC_LOCK_ENABLE, .iddq_reg = PLLP_MISC, .iddq_mask = 1 << PLLA_IDDQ_BIT, .mnp_bits = {8, 8, 5, 0, 10, 20}, }, /* PLLA: Audio clock for precise codec sampling */ { PLL(TEGRA210_CLK_PLL_A, "pllA", "osc_div_clk"), .type = PLL_A, .base_reg = PLLA_BASE, .misc_reg = PLLA_MISC, .lock_enable = PLLA_LOCK_ENABLE, .iddq_reg = PLLA_BASE, .iddq_mask = 1 << PLLA_IDDQ_BIT, .pdiv_table = qlin_map, .mnp_bits = {8, 8, 5, 0, 8, 20}, }, /* PLLA1: Audio clock for ADSP */ { PLL(TEGRA210_CLK_PLL_A1, "pllA1_out0", "osc_div_clk"), .type = PLL_A1, .base_reg = PLLA1_BASE, .misc_reg = PLLA1_MISC_1, .iddq_reg = PLLA1_MISC_1, .iddq_mask = 1 << PLLA_IDDQ_BIT, .pdiv_table = qlin_map, .mnp_bits = {8, 8, 5, 0, 8, 20}, }, /* PLLU: 480 MHz Clock source for USB PHY, provides 12/60/480 MHz */ { PLL(TEGRA210_CLK_PLL_U, "pllU", "osc_div_clk"), .type = PLL_U, .flags = PLL_FLAG_VCO_OUT | PLL_FLAG_HAVE_SDA, .base_reg = PLLU_BASE, .misc_reg = PLLU_MISC, .lock_enable = PLLU_LOCK_ENABLE, .iddq_reg = PLLU_MISC, .iddq_mask = 1 << PLLU_IDDQ_BIT, .pdiv_table = qlin_map, .mnp_bits = {8, 8, 5, 0, 8, 16}, }, /* PLLD: 594 MHz Clock sources for the DSI and display subsystem */ { PLL(TEGRA210_CLK_PLL_D, "pllD_out", "osc_div_clk"), .type = PLL_D, .flags = PLL_FLAG_PDIV_POWER2, .base_reg = PLLD_BASE, .misc_reg = PLLD_MISC, .lock_enable = PLL_MISC_LOCK_ENABLE, .iddq_reg = PLLA1_MISC_1, .iddq_mask = 1 << PLLA_IDDQ_BIT, .mnp_bits = {8, 8, 3, 0, 11, 20}, }, /* PLLD2: 594 MHz Clock sources for the DSI and display subsystem */ { PLL(TEGRA210_CLK_PLL_D2, "pllD2_out", "pllD2_src"), .type = PLL_D2, .flags = PLL_FLAG_HAVE_SDM, .base_reg = PLLD2_BASE, .misc_reg = PLLD2_MISC, .lock_enable = PLLD2_LOCK_ENABLE, .iddq_reg = PLLD2_BASE, .iddq_mask = 1 << PLLD_IDDQ_BIT, .pdiv_table = qlin_map, .mnp_bits = {8, 8, 5, 0, 8, 19}, }, /* PLLREFE: 624 Mhz*/ { PLL(0, "pllREFE", "osc_div_clk"), .type = PLL_REFE, .flags = PLL_FLAG_VCO_OUT, .base_reg = PLLREFE_BASE, .misc_reg = PLLREFE_MISC, .lock_enable = PLLREFE_LOCK_ENABLE, .iddq_reg = PLLREFE_MISC, .iddq_mask = 1 << PLLREFE_IDDQ_BIT, .pdiv_table = qlin_map, .mnp_bits = {8, 8, 5, 0, 8, 16}, }, /* PLLE: 100 MHz reference clock for PCIe/SATA/USB 3.0 (spread spectrum) */ { PLL(TEGRA210_CLK_PLL_E, "pllE_out0", "pllE_src"), .type = PLL_E, .base_reg = PLLE_BASE, .misc_reg = PLLE_MISC, .lock_enable = PLLE_LOCK_ENABLE, .pdiv_table = qlin_map, .mnp_bits = {8, 8, 5, 0, 8, 24}, }, /* PLLDP: 270 MHz Clock source fordisplay SOR (spread spectrum) */ { PLL(0, "pllDP_out0", "pllDP_src"), .type = PLL_DP, .flags = PLL_FLAG_HAVE_SDM, .base_reg = PLLDP_BASE, .misc_reg = PLLDP_MISC, .lock_enable = PLLPD_LOCK_ENABLE, .iddq_reg = PLLDP_BASE, .iddq_mask = 1 << PLLDP_IDDQ_BIT, .pdiv_table = qlin_map, .mnp_bits = {8, 8, 5, 0, 8, 19}, }, }; /* Fixed rate dividers. */ static struct clk_fixed_def tegra210_pll_fdivs[] = { FACT(0, "pllP_UD", "pllP_out0", 1, 1), FACT(0, "pllC_UD", "pllC_out0", 1, 1), FACT(0, "pllD_UD", "pllD_out0", 1, 1), FACT(0, "pllM_UD", "pllM_out0", 1, 1), FACT(0, "pllMB_UD", "pllMB_out0", 1, 1), FACT(TEGRA210_CLK_PLL_D_OUT0, "pllD_out0", "pllD_out", 1, 2), FACT(0, "pllC4_out1", "pllC4", 1, 3), FACT(0, "pllC4_out2", "pllC4", 1, 5), FACT(0, "pllD2_out0", "pllD2_out", 1, 2), /* Aliases used in super mux. */ FACT(0, "pllX_out0_alias", "pllX_out0", 1, 1), FACT(0, "dfllCPU_out_alias", "dfllCPU_out", 1, 1), }; /* MUXes for PLL sources. */ PLIST(mux_pll_srcs) = {"osc_div_clk", NULL, "pllP_out0", NULL}; /* FIXME */ PLIST(mux_plle_src1) = {"osc_div_clk", "pllP_out0"}; PLIST(mux_plle_src) = {"pllE_src1", "pllREFE_out0"}; static struct clk_mux_def tegra210_pll_sources[] = { /* Core clocks. */ MUX(0, "pllD2_src", mux_pll_srcs, PLLD2_BASE, 25, 2), MUX(0, "pllDP_src", mux_pll_srcs, PLLDP_BASE, 25, 2), MUX(0, "pllC4_src", mux_pll_srcs, PLLC4_BASE, 25, 2), MUX(0, "pllE_src1", mux_plle_src1, PLLE_AUX, 2, 1), MUX(0, "pllE_src", mux_plle_src, PLLE_AUX, 28, 1), }; /* Gates for PLL branches. */ static struct clk_gate_def tegra210_pll_gates[] = { /* Core clocks. */ GATE_PLL(0, "pllC_out1", "pllC_out1_div", PLLC_OUT, 0), GATE_PLL(0, "pllP_out1", "pllP_out1_div", PLLP_OUTA, 0), GATE_PLL(0, "pllP_out3", "pllP_out3_div", PLLP_OUTB, 0), GATE_PLL(TEGRA210_CLK_PLL_P_OUT4, "pllP_out4", "pllP_out4_div", PLLP_OUTB, 16), GATE_PLL(0, "pllP_out5", "pllP_out5_div", PLLP_OUTC, 16), GATE_PLL(0, "pllU_out1", "pllU_out1_div", PLLU_OUTA, 0), GATE_PLL(0, "pllU_out2", "pllU_out2_div", PLLU_OUTA, 16), GATE(0, "pllU_480", "pllU", PLLU_BASE, 22), GATE(0, "pllU_60", "pllU_out2", PLLU_BASE, 23), GATE(0, "pllU_48", "pllU_out1", PLLU_BASE, 25), GATE_PLL(0, "pllREFE_out1", "pllREFE_out1_div", PLLREFE_OUT, 0), GATE_PLL(0, "pllC4_out3", "pllC4_out3_div", PLLC4_OUT, 0), GATE_PLL(0, "pllA_out0", "pllA_out0_div", PLLA_OUT, 0), }; struct clk_div_table tegra210_pll_pdiv_tbl[] = { /* value , divider */ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 }, { 4, 5 }, { 5, 6 }, { 6, 8 }, { 7, 10 }, { 8, 12 }, { 9, 16 }, {10, 12 }, {11, 16 }, {12, 20 }, {13, 24 }, {14, 32 }, { 0, 0 }, }; /* Dividers for PLL branches. */ static struct clk_div_def tegra210_pll_divs[] = { /* Core clocks. */ DIV7_1(0, "pllC_out1_div", "pllC_out0", PLLC_OUT, 8), DIV7_1(0, "pllP_out1_div", "pllP_out0", PLLP_OUTA, 8), DIV_TB(0, "pllP_out2", "pllP_out0", PLLP_BASE, 20, 5, tegra210_pll_pdiv_tbl), DIV7_1(0, "pllP_out3_div", "pllP_out0", PLLP_OUTB, 8), DIV7_1(0, "pllP_out4_div", "pllP_out0", PLLP_OUTB, 24), DIV7_1(0, "pllP_out5_div", "pllP_out0", PLLP_OUTC, 24), DIV_TB(0, "pllU_out0", "pllU", PLLU_BASE, 16, 5, tegra210_pll_pdiv_tbl), DIV7_1(0, "pllU_out1_div", "pllU_out0", PLLU_OUTA, 8), DIV7_1(0, "pllU_out2_div", "pllU_out0", PLLU_OUTA, 24), DIV_TB(0, "pllREFE_out0", "pllREFE", PLLREFE_BASE, 16, 5, tegra210_pll_pdiv_tbl), DIV7_1(0, "pllREFE_out1_div", "pllREFE", PLLREFE_OUT, 8), DIV_TB(TEGRA210_CLK_PLL_C4_OUT0, "pllC4_out0", "pllC4", PLLC4_BASE, 19, 5, tegra210_pll_pdiv_tbl), DIV7_1(0, "pllC4_out3_div", "pllC4_out0", PLLC4_OUT, 8), DIV7_1(0, "pllA_out0_div", "pllA", PLLA_OUT, 8), }; static int tegra210_pll_init(struct clknode *clk, device_t dev); static int tegra210_pll_set_gate(struct clknode *clk, bool enable); static int tegra210_pll_get_gate(struct clknode *clk, bool *enabled); static int tegra210_pll_recalc(struct clknode *clk, uint64_t *freq); static int tegra210_pll_set_freq(struct clknode *clknode, uint64_t fin, uint64_t *fout, int flags, int *stop); struct pll_sc { device_t clkdev; enum pll_type type; uint32_t base_reg; uint32_t misc_reg; uint32_t lock_enable; uint32_t iddq_reg; uint32_t iddq_mask; uint32_t flags; struct pdiv_table *pdiv_table; struct mnp_bits mnp_bits; }; static clknode_method_t tegra210_pll_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, tegra210_pll_init), CLKNODEMETHOD(clknode_set_gate, tegra210_pll_set_gate), CLKNODEMETHOD(clknode_get_gate, tegra210_pll_get_gate), CLKNODEMETHOD(clknode_recalc_freq, tegra210_pll_recalc), CLKNODEMETHOD(clknode_set_freq, tegra210_pll_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(tegra210_pll, tegra210_pll_class, tegra210_pll_methods, sizeof(struct pll_sc), clknode_class); static int pll_enable(struct pll_sc *sc) { uint32_t reg; RD4(sc, sc->base_reg, ®); if (sc->type != PLL_E) reg &= ~PLL_BASE_BYPASS; reg |= PLL_BASE_ENABLE; WR4(sc, sc->base_reg, reg); return (0); } static int pll_disable(struct pll_sc *sc) { uint32_t reg; RD4(sc, sc->base_reg, ®); if (sc->type != PLL_E) reg |= PLL_BASE_BYPASS; reg &= ~PLL_BASE_ENABLE; WR4(sc, sc->base_reg, reg); return (0); } static uint32_t pdiv_to_reg(struct pll_sc *sc, uint32_t p_div) { struct pdiv_table *tbl; tbl = sc->pdiv_table; if (tbl == NULL) { if (sc->flags & PLL_FLAG_PDIV_POWER2) return (ffs(p_div) - 1); else return (p_div); } while (tbl->divider != 0) { if (p_div <= tbl->divider) return (tbl->value); tbl++; } return (0xFFFFFFFF); } static uint32_t reg_to_pdiv(struct pll_sc *sc, uint32_t reg) { struct pdiv_table *tbl; tbl = sc->pdiv_table; if (tbl == NULL) { if (sc->flags & PLL_FLAG_PDIV_POWER2) return (1 << reg); else return (reg == 0 ? 1: reg); } while (tbl->divider) { if (reg == tbl->value) return (tbl->divider); tbl++; } return (0); } static uint32_t get_masked(uint32_t val, uint32_t shift, uint32_t width) { return ((val >> shift) & ((1 << width) - 1)); } static uint32_t set_masked(uint32_t val, uint32_t v, uint32_t shift, uint32_t width) { val &= ~(((1 << width) - 1) << shift); val |= (v & ((1 << width) - 1)) << shift; return (val); } static void get_divisors(struct pll_sc *sc, uint32_t *m, uint32_t *n, uint32_t *p) { uint32_t val; struct mnp_bits *mnp_bits; mnp_bits = &sc->mnp_bits; RD4(sc, sc->base_reg, &val); *m = get_masked(val, mnp_bits->m_shift, mnp_bits->m_width); *n = get_masked(val, mnp_bits->n_shift, mnp_bits->n_width); *p = get_masked(val, mnp_bits->p_shift, mnp_bits->p_width); } static uint32_t set_divisors(struct pll_sc *sc, uint32_t val, uint32_t m, uint32_t n, uint32_t p) { struct mnp_bits *mnp_bits; mnp_bits = &sc->mnp_bits; val = set_masked(val, m, mnp_bits->m_shift, mnp_bits->m_width); val = set_masked(val, n, mnp_bits->n_shift, mnp_bits->n_width); val = set_masked(val, p, mnp_bits->p_shift, mnp_bits->p_width); return (val); } static bool is_locked(struct pll_sc *sc) { uint32_t reg; switch (sc->type) { case PLL_REFE: RD4(sc, sc->misc_reg, ®); reg &= PLLREFE_MISC_LOCK; break; case PLL_E: RD4(sc, sc->misc_reg, ®); reg &= PLLE_MISC_LOCK; break; default: RD4(sc, sc->base_reg, ®); reg &= PLL_BASE_LOCK; break; } return (reg != 0); } static int wait_for_lock(struct pll_sc *sc) { int i; for (i = PLL_LOCK_TIMEOUT / 10; i > 0; i--) { if (is_locked(sc)) break; DELAY(10); } if (i <= 0) { printf("PLL lock timeout\n"); return (ETIMEDOUT); } return (0); } static int plle_enable(struct pll_sc *sc) { uint32_t reg; int rv; uint32_t pll_m = 2; uint32_t pll_n = 125; uint32_t pll_cml = 14; /* Disable lock override. */ RD4(sc, sc->base_reg, ®); reg &= ~PLLE_BASE_LOCK_OVERRIDE; WR4(sc, sc->base_reg, reg); /* Enable SW control */ RD4(sc, PLLE_AUX, ®); reg |= PLLE_AUX_ENABLE_SWCTL; reg &= ~PLLE_AUX_SEQ_ENABLE; WR4(sc, PLLE_AUX, reg); DELAY(10); RD4(sc, sc->misc_reg, ®); reg |= PLLE_MISC_LOCK_ENABLE; reg |= PLLE_MISC_IDDQ_SWCTL; reg &= ~PLLE_MISC_IDDQ_OVERRIDE_VALUE; reg |= PLLE_MISC_PTS; reg &= ~PLLE_MISC_VREG_BG_CTRL(~0); reg &= ~PLLE_MISC_VREG_CTRL(~0); WR4(sc, sc->misc_reg, reg); DELAY(10); RD4(sc, PLLE_SS_CNTL, ®); reg |= PLLE_SS_CNTL_DISABLE; WR4(sc, PLLE_SS_CNTL, reg); RD4(sc, sc->base_reg, ®); reg = set_divisors(sc, reg, pll_m, pll_n, pll_cml); WR4(sc, sc->base_reg, reg); DELAY(10); pll_enable(sc); rv = wait_for_lock(sc); if (rv != 0) return (rv); RD4(sc, PLLE_SS_CNTL, ®); reg &= ~PLLE_SS_CNTL_SSCINCINTRV(~0); reg &= ~PLLE_SS_CNTL_SSCINC(~0); reg &= ~PLLE_SS_CNTL_SSCINVERT; reg &= ~PLLE_SS_CNTL_SSCCENTER; reg &= ~PLLE_SS_CNTL_SSCMAX(~0); reg |= PLLE_SS_CNTL_SSCINCINTRV(0x23); reg |= PLLE_SS_CNTL_SSCINC(0x1); reg |= PLLE_SS_CNTL_SSCMAX(0x21); WR4(sc, PLLE_SS_CNTL, reg); reg &= ~PLLE_SS_CNTL_SSCBYP; reg &= ~PLLE_SS_CNTL_BYPASS_SS; WR4(sc, PLLE_SS_CNTL, reg); DELAY(10); reg &= ~PLLE_SS_CNTL_INTERP_RESET; WR4(sc, PLLE_SS_CNTL, reg); DELAY(10); /* HW control of brick pll. */ RD4(sc, sc->misc_reg, ®); reg &= ~PLLE_MISC_IDDQ_SWCTL; WR4(sc, sc->misc_reg, reg); RD4(sc, PLLE_AUX, ®); reg |= PLLE_AUX_USE_LOCKDET; reg |= PLLE_AUX_SS_SEQ_INCLUDE; reg &= ~PLLE_AUX_ENABLE_SWCTL; reg &= ~PLLE_AUX_SS_SWCTL; WR4(sc, PLLE_AUX, reg); reg |= PLLE_AUX_SEQ_START_STATE; DELAY(10); reg |= PLLE_AUX_SEQ_ENABLE; WR4(sc, PLLE_AUX, reg); /* Enable and start XUSBIO PLL HW control*/ RD4(sc, XUSBIO_PLL_CFG0, ®); reg &= ~XUSBIO_PLL_CFG0_CLK_ENABLE_SWCTL; reg &= ~XUSBIO_PLL_CFG0_PADPLL_RESET_SWCTL; reg |= XUSBIO_PLL_CFG0_PADPLL_USE_LOCKDET; reg |= XUSBIO_PLL_CFG0_PADPLL_SLEEP_IDDQ; reg &= ~XUSBIO_PLL_CFG0_SEQ_ENABLE; WR4(sc, XUSBIO_PLL_CFG0, reg); DELAY(10); reg |= XUSBIO_PLL_CFG0_SEQ_ENABLE; WR4(sc, XUSBIO_PLL_CFG0, reg); /* Enable and start SATA PLL HW control */ RD4(sc, SATA_PLL_CFG0, ®); reg &= ~SATA_PLL_CFG0_PADPLL_RESET_SWCTL; reg &= ~SATA_PLL_CFG0_PADPLL_RESET_OVERRIDE_VALUE; reg |= SATA_PLL_CFG0_PADPLL_USE_LOCKDET; reg |= SATA_PLL_CFG0_PADPLL_SLEEP_IDDQ; reg &= ~SATA_PLL_CFG0_SEQ_IN_SWCTL; reg &= ~SATA_PLL_CFG0_SEQ_RESET_INPUT_VALUE; reg &= ~SATA_PLL_CFG0_SEQ_LANE_PD_INPUT_VALUE; reg &= ~SATA_PLL_CFG0_SEQ_PADPLL_PD_INPUT_VALUE; reg &= ~SATA_PLL_CFG0_SEQ_ENABLE; WR4(sc, SATA_PLL_CFG0, reg); DELAY(10); reg |= SATA_PLL_CFG0_SEQ_ENABLE; WR4(sc, SATA_PLL_CFG0, reg); /* Enable HW control of PCIe PLL. */ RD4(sc, PCIE_PLL_CFG, ®); reg |= PCIE_PLL_CFG_SEQ_ENABLE; WR4(sc, PCIE_PLL_CFG, reg); return (0); } static int tegra210_pll_set_gate(struct clknode *clknode, bool enable) { int rv; struct pll_sc *sc; sc = clknode_get_softc(clknode); if (enable == 0) { rv = pll_disable(sc); return(rv); } if (sc->type == PLL_E) rv = plle_enable(sc); else rv = pll_enable(sc); return (rv); } static int tegra210_pll_get_gate(struct clknode *clknode, bool *enabled) { uint32_t reg; struct pll_sc *sc; sc = clknode_get_softc(clknode); RD4(sc, sc->base_reg, ®); *enabled = reg & PLL_BASE_ENABLE ? true: false; WR4(sc, sc->base_reg, reg); return (0); } static int pll_set_std(struct pll_sc *sc, uint64_t fin, uint64_t *fout, int flags, uint32_t m, uint32_t n, uint32_t p) { uint32_t reg; struct mnp_bits *mnp_bits; int rv; mnp_bits = &sc->mnp_bits; if (m >= (1 << mnp_bits->m_width)) return (ERANGE); if (n >= (1 << mnp_bits->n_width)) return (ERANGE); if (pdiv_to_reg(sc, p) >= (1 << mnp_bits->p_width)) return (ERANGE); if (flags & CLK_SET_DRYRUN) { if (((flags & (CLK_SET_ROUND_UP | CLK_SET_ROUND_DOWN)) == 0) && (*fout != (((fin / m) * n) /p))) return (ERANGE); *fout = ((fin / m) * n) /p; return (0); } pll_disable(sc); /* take pll out of IDDQ */ if (sc->iddq_reg != 0) MD4(sc, sc->iddq_reg, sc->iddq_mask, 0); RD4(sc, sc->base_reg, ®); reg = set_masked(reg, m, mnp_bits->m_shift, mnp_bits->m_width); reg = set_masked(reg, n, mnp_bits->n_shift, mnp_bits->n_width); reg = set_masked(reg, pdiv_to_reg(sc, p), mnp_bits->p_shift, mnp_bits->p_width); WR4(sc, sc->base_reg, reg); /* Enable PLL. */ RD4(sc, sc->base_reg, ®); reg |= PLL_BASE_ENABLE; WR4(sc, sc->base_reg, reg); /* Enable lock detection. */ RD4(sc, sc->misc_reg, ®); reg |= sc->lock_enable; WR4(sc, sc->misc_reg, reg); rv = wait_for_lock(sc); if (rv != 0) { /* Disable PLL */ RD4(sc, sc->base_reg, ®); reg &= ~PLL_BASE_ENABLE; WR4(sc, sc->base_reg, reg); return (rv); } RD4(sc, sc->misc_reg, ®); pll_enable(sc); *fout = ((fin / m) * n) / p; return 0; } static int plla_set_freq(struct pll_sc *sc, uint64_t fin, uint64_t *fout, int flags) { uint32_t m, n, p; p = 1; m = 3; n = (*fout * p * m + fin / 2)/ fin; dprintf("%s: m: %d, n: %d, p: %d\n", __func__, m, n, p); return (pll_set_std(sc, fin, fout, flags, m, n, p)); } static int pllc_set_freq(struct pll_sc *sc, uint64_t fin, uint64_t *fout, int flags) { uint32_t m, n, p; p = 2; m = 3; n = (*fout * p * m + fin / 2)/ fin; dprintf("%s: m: %d, n: %d, p: %d\n", __func__, m, n, p); return (pll_set_std( sc, fin, fout, flags, m, n, p)); } static int pllc4_set_freq(struct pll_sc *sc, uint64_t fin, uint64_t *fout, int flags) { uint32_t m, n, p; p = 1; m = 4; n = (*fout * p * m + fin / 2)/ fin; dprintf("%s: m: %d, n: %d, p: %d\n", __func__, m, n, p); return (pll_set_std( sc, fin, fout, flags, m, n, p)); } static int plldp_set_freq(struct pll_sc *sc, uint64_t fin, uint64_t *fout, int flags) { uint32_t m, n, p; p = 1; m = 4; n = (*fout * p * m + fin / 2)/ fin; dprintf("%s: m: %d, n: %d, p: %d\n", __func__, m, n, p); return (pll_set_std( sc, fin, fout, flags, m, n, p)); } /* * PLLD2 is used as source for pixel clock for HDMI. * We must be able to set it frequency very flexibly and * precisely (within 5% tolerance limit allowed by HDMI specs). * * For this reason, it is necessary to search the full state space. * Fortunately, thanks to early cycle terminations, performance * is within acceptable limits. */ #define PLLD2_PFD_MIN 12000000 /* 12 MHz */ #define PLLD2_PFD_MAX 38400000 /* 38.4 MHz */ #define PLLD2_VCO_MIN 750000000 /* 750 MHz */ #define PLLD2_VCO_MAX 1500000000 /* 1.5 GHz */ static int plld2_set_freq(struct pll_sc *sc, uint64_t fin, uint64_t *fout, int flags) { uint32_t m, n, p; uint32_t best_m, best_n, best_p; uint64_t vco, pfd; int64_t err, best_err; struct mnp_bits *mnp_bits; struct pdiv_table *tbl; int p_idx, rv; mnp_bits = &sc->mnp_bits; tbl = sc->pdiv_table; best_err = INT64_MAX; for (p_idx = 0; tbl[p_idx].divider != 0; p_idx++) { p = tbl[p_idx].divider; /* Check constraints */ vco = *fout * p; if (vco < PLLD2_VCO_MIN) continue; if (vco > PLLD2_VCO_MAX) break; for (m = 1; m < (1 << mnp_bits->m_width); m++) { n = (*fout * p * m + fin / 2) / fin; /* Check constraints */ if (n == 0) continue; if (n >= (1 << mnp_bits->n_width)) break; vco = (fin * n) / m; if (vco > PLLD2_VCO_MAX || vco < PLLD2_VCO_MIN) continue; pfd = fin / m; if (pfd > PLLD2_PFD_MAX || vco < PLLD2_PFD_MIN) continue; /* Constraints passed, save best result */ err = *fout - vco / p; if (err < 0) err = -err; if (err < best_err) { best_err = err; best_p = p; best_m = m; best_n = n; } if (err == 0) goto done; } } done: /* * HDMI specification allows 5% pixel clock tolerance, * we will by a slightly stricter */ if (best_err > ((*fout * 100) / 4)) return (ERANGE); if (flags & CLK_SET_DRYRUN) return (0); rv = pll_set_std(sc, fin, fout, flags, best_m, best_n, best_p); /* XXXX Panic for rv == ERANGE ? */ return (rv); } static int pllrefe_set_freq(struct pll_sc *sc, uint64_t fin, uint64_t *fout, int flags) { uint32_t m, n, p; m = 1; p = 1; n = *fout * p * m / fin; dprintf("%s: m: %d, n: %d, p: %d\n", __func__, m, n, p); return (pll_set_std(sc, fin, fout, flags, m, n, p)); } #define PLLX_PFD_MIN 12000000LL /* 12 MHz */ #define PLLX_PFD_MAX 38400000LL /* 38.4 MHz */ #define PLLX_VCO_MIN 900000000LL /* 0.9 GHz */ #define PLLX_VCO_MAX 3000000000LL /* 3 GHz */ static int pllx_set_freq(struct pll_sc *sc, uint64_t fin, uint64_t *fout, int flags) { struct mnp_bits *mnp_bits; uint32_t m, n, p; uint32_t old_m, old_n, old_p; uint32_t reg; int i, rv; mnp_bits = &sc->mnp_bits; get_divisors(sc, &old_m, &old_n, &old_p); old_p = reg_to_pdiv(sc, old_p); /* Pre-divider is fixed, Compute post-divider */ m = old_m; p = 1; while ((*fout * p) < PLLX_VCO_MIN) p++; if ((*fout * p) > PLLX_VCO_MAX) return (ERANGE); n = (*fout * p * m + fin / 2) / fin; dprintf("%s: m: %d, n: %d, p: %d\n", __func__, m, n, p); if (m >= (1 << mnp_bits->m_width)) return (ERANGE); if (n >= (1 << mnp_bits->n_width)) return (ERANGE); if (pdiv_to_reg(sc, p) >= (1 << mnp_bits->p_width)) return (ERANGE); if (flags & CLK_SET_DRYRUN) { if (((flags & (CLK_SET_ROUND_UP | CLK_SET_ROUND_DOWN)) == 0) && (*fout != (((fin / m) * n) /p))) return (ERANGE); *fout = ((fin / m) * n) /p; return (0); } /* If new post-divider is bigger that original, set it now. */ if (p < old_p) { RD4(sc, sc->base_reg, ®); reg = set_masked(reg, pdiv_to_reg(sc, p), mnp_bits->p_shift, mnp_bits->p_width); WR4(sc, sc->base_reg, reg); } DELAY(100); /* vvv Program dynamic VCO ramp. vvv */ /* 1 - disable dynamic ramp mode. */ RD4(sc, PLLX_MISC_2, ®); reg &= ~PLLX_MISC_2_EN_DYNRAMP; WR4(sc, PLLX_MISC_2, reg); /* 2 - Setup new ndiv. */ RD4(sc, PLLX_MISC_2, ®); reg &= ~PLLX_MISC_2_NDIV_NEW(~0); reg |= PLLX_MISC_2_NDIV_NEW(n); WR4(sc, PLLX_MISC_2, reg); /* 3 - enable dynamic ramp. */ RD4(sc, PLLX_MISC_2, ®); reg |= PLLX_MISC_2_EN_DYNRAMP; WR4(sc, PLLX_MISC_2, reg); /* 4 - wait for done. */ for (i = PLL_LOCK_TIMEOUT / 10; i > 0; i--) { RD4(sc, PLLX_MISC_2, ®); if (reg & PLLX_MISC_2_DYNRAMP_DONE) break; DELAY(10); } if (i <= 0) { printf("PLL X dynamic ramp timedout\n"); return (ETIMEDOUT); } /* 5 - copy new ndiv to base register. */ RD4(sc, sc->base_reg, ®); reg = set_masked(reg, n, mnp_bits->n_shift, mnp_bits->n_width); WR4(sc, sc->base_reg, reg); /* 6 - disable dynamic ramp mode. */ RD4(sc, PLLX_MISC_2, ®); reg &= ~PLLX_MISC_2_EN_DYNRAMP; WR4(sc, PLLX_MISC_2, reg); rv = wait_for_lock(sc); if (rv != 0) { printf("PLL X is not locked !!\n"); } /* ^^^ Dynamic ramp done. ^^^ */ /* If new post-divider is smaller that original, set it. */ if (p > old_p) { RD4(sc, sc->base_reg, ®); reg = set_masked(reg, pdiv_to_reg(sc, p), mnp_bits->p_shift, mnp_bits->p_width); WR4(sc, sc->base_reg, reg); } *fout = ((fin / m) * n) / p; return (0); } /* Simplified setup for 38.4 MHz clock. */ #define PLLX_STEP_A 0x04 #define PLLX_STEP_B 0x05 static int pllx_init(struct pll_sc *sc) { uint32_t reg; RD4(sc, PLLX_MISC, ®); reg = PLLX_MISC_LOCK_ENABLE; WR4(sc, PLLX_MISC, reg); /* Setup dynamic ramp. */ reg = 0; reg |= PLLX_MISC_2_DYNRAMP_STEPA(PLLX_STEP_A); reg |= PLLX_MISC_2_DYNRAMP_STEPB(PLLX_STEP_B); WR4(sc, PLLX_MISC_2, reg); /* Disable SDM. */ reg = 0; WR4(sc, PLLX_MISC_4, reg); WR4(sc, PLLX_MISC_5, reg); return (0); } static int tegra210_pll_set_freq(struct clknode *clknode, uint64_t fin, uint64_t *fout, int flags, int *stop) { *stop = 1; int rv; struct pll_sc *sc; sc = clknode_get_softc(clknode); dprintf("%s: %s requested freq: %lu, input freq: %lu\n", __func__, clknode_get_name(clknode), *fout, fin); switch (sc->type) { case PLL_A: rv = plla_set_freq(sc, fin, fout, flags); break; case PLL_C: case PLL_C2: case PLL_C3: rv = pllc_set_freq(sc, fin, fout, flags); break; case PLL_C4: rv = pllc4_set_freq(sc, fin, fout, flags); break; case PLL_D2: rv = plld2_set_freq(sc, fin, fout, flags); break; case PLL_DP: rv = plldp_set_freq(sc, fin, fout, flags); break; case PLL_REFE: rv = pllrefe_set_freq(sc, fin, fout, flags); break; case PLL_X: rv = pllx_set_freq(sc, fin, fout, flags); break; case PLL_U: if (*fout == 480000000) /* PLLU is fixed to 480 MHz */ rv = 0; else rv = ERANGE; break; default: rv = ENXIO; break; } return (rv); } static int tegra210_pll_init(struct clknode *clk, device_t dev) { struct pll_sc *sc; uint32_t reg, rv; sc = clknode_get_softc(clk); if (sc->type == PLL_X) { rv = pllx_init(sc); if (rv != 0) return (rv); } /* If PLL is enabled, enable lock detect too. */ RD4(sc, sc->base_reg, ®); if (reg & PLL_BASE_ENABLE) { RD4(sc, sc->misc_reg, ®); reg |= sc->lock_enable; WR4(sc, sc->misc_reg, reg); } if (sc->type == PLL_REFE) { RD4(sc, sc->misc_reg, ®); reg &= ~(1 << 29); /* Disable lock override */ WR4(sc, sc->misc_reg, reg); } clknode_init_parent_idx(clk, 0); return(0); } static int tegra210_pll_recalc(struct clknode *clk, uint64_t *freq) { struct pll_sc *sc; uint32_t m, n, p, pr; uint32_t reg, misc_reg; int locked; sc = clknode_get_softc(clk); RD4(sc, sc->base_reg, ®); RD4(sc, sc->misc_reg, &misc_reg); get_divisors(sc, &m, &n, &pr); /* If VCO is directlu exposed, P divider is handled by external node */ if (sc->flags & PLL_FLAG_VCO_OUT) p = 1; else p = reg_to_pdiv(sc, pr); locked = is_locked(sc); dprintf("%s: %s (0x%08x, 0x%08x) - m: %d, n: %d, p: %d (%d): " "e: %d, r: %d, o: %d - %s\n", __func__, clknode_get_name(clk), reg, misc_reg, m, n, p, pr, (reg >> 30) & 1, (reg >> 29) & 1, (reg >> 28) & 1, locked ? "locked" : "unlocked"); if ((m == 0) || (n == 0) || (p == 0)) { *freq = 0; return (EINVAL); } if (!locked) { *freq = 0; return (0); } *freq = ((*freq / m) * n) / p; return (0); } static int pll_register(struct clkdom *clkdom, struct clk_pll_def *clkdef) { struct clknode *clk; struct pll_sc *sc; clk = clknode_create(clkdom, &tegra210_pll_class, &clkdef->clkdef); if (clk == NULL) return (ENXIO); sc = clknode_get_softc(clk); sc->clkdev = clknode_get_device(clk); sc->type = clkdef->type; sc->base_reg = clkdef->base_reg; sc->misc_reg = clkdef->misc_reg; sc->lock_enable = clkdef->lock_enable; sc->iddq_reg = clkdef->iddq_reg; sc->iddq_mask = clkdef->iddq_mask; sc->flags = clkdef->flags; sc->pdiv_table = clkdef->pdiv_table; sc->mnp_bits = clkdef->mnp_bits; clknode_register(clkdom, clk); return (0); } static void config_utmi_pll(struct tegra210_car_softc *sc) { uint32_t reg; /* * XXX Simplified UTMIP settings for 38.4MHz base clock. */ #define ENABLE_DELAY_COUNT 0x00 #define STABLE_COUNT 0x00 #define ACTIVE_DELAY_COUNT 0x06 #define XTAL_FREQ_COUNT 0x80 CLKDEV_READ_4(sc->dev, UTMIPLL_HW_PWRDN_CFG0, ®); reg &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE; CLKDEV_WRITE_4(sc->dev, UTMIPLL_HW_PWRDN_CFG0, reg); CLKDEV_READ_4(sc->dev, UTMIP_PLL_CFG2, ®); reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0); reg |= UTMIP_PLL_CFG2_STABLE_COUNT(STABLE_COUNT); reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0); reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(ACTIVE_DELAY_COUNT); CLKDEV_WRITE_4(sc->dev, UTMIP_PLL_CFG2, reg); CLKDEV_READ_4(sc->dev, UTMIP_PLL_CFG1, ®); reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0); reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(ENABLE_DELAY_COUNT); reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0); reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(XTAL_FREQ_COUNT); reg |= UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP; CLKDEV_WRITE_4(sc->dev, UTMIP_PLL_CFG1, reg); reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; reg |= UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP; CLKDEV_WRITE_4(sc->dev, UTMIP_PLL_CFG1, reg); DELAY(20); /* Setup samplers. */ CLKDEV_READ_4(sc->dev, UTMIP_PLL_CFG2, ®); reg |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERUP; reg |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERUP; reg |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERUP; reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN; reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN; reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERDOWN; CLKDEV_WRITE_4(sc->dev, UTMIP_PLL_CFG2, reg); /* Powerup UTMIP. */ CLKDEV_READ_4(sc->dev, UTMIP_PLL_CFG1, ®); reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP; reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN; CLKDEV_WRITE_4(sc->dev, UTMIP_PLL_CFG1, reg); DELAY(10); /* Prepare UTMIP sequencer. */ CLKDEV_READ_4(sc->dev, UTMIPLL_HW_PWRDN_CFG0, ®); reg |= UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET; reg &= ~UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL; CLKDEV_WRITE_4(sc->dev, UTMIPLL_HW_PWRDN_CFG0, reg); DELAY(10); CLKDEV_READ_4(sc->dev, XUSB_PLL_CFG0, ®); reg &= ~XUSB_PLL_CFG0_UTMIPLL_LOCK_DLY; CLKDEV_WRITE_4(sc->dev, XUSB_PLL_CFG0, reg); DELAY(10); /* HW control of UTMIPLL. */ CLKDEV_READ_4(sc->dev, UTMIPLL_HW_PWRDN_CFG0, ®); reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE; CLKDEV_WRITE_4(sc->dev, UTMIPLL_HW_PWRDN_CFG0, reg); } void tegra210_init_plls(struct tegra210_car_softc *sc) { int i, rv; for (i = 0; i < nitems(tegra210_pll_sources); i++) { rv = clknode_mux_register(sc->clkdom, tegra210_pll_sources + i); if (rv != 0) panic("clk_mux_register failed"); } for (i = 0; i < nitems(pll_clks); i++) { rv = pll_register(sc->clkdom, pll_clks + i); if (rv != 0) panic("pll_register failed"); } config_utmi_pll(sc); for (i = 0; i < nitems(tegra210_pll_fdivs); i++) { rv = clknode_fixed_register(sc->clkdom, tegra210_pll_fdivs + i); if (rv != 0) panic("clk_fixed_register failed"); } for (i = 0; i < nitems(tegra210_pll_gates); i++) { rv = clknode_gate_register(sc->clkdom, tegra210_pll_gates + i); if (rv != 0) panic("clk_gate_register failed"); } for (i = 0; i < nitems(tegra210_pll_divs); i++) { rv = clknode_div_register(sc->clkdom, tegra210_pll_divs + i); if (rv != 0) panic("clk_div_register failed"); } } diff --git a/sys/arm64/nvidia/tegra210/tegra210_clk_super.c b/sys/arm64/nvidia/tegra210/tegra210_clk_super.c index 7fd1b8ec7a7a..57a1d4c22d51 100644 --- a/sys/arm64/nvidia/tegra210/tegra210_clk_super.c +++ b/sys/arm64/nvidia/tegra210/tegra210_clk_super.c @@ -1,228 +1,228 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2020 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include -#include +#include #include #include "tegra210_car.h" struct super_mux_def { struct clknode_init_def clkdef; uint32_t base_reg; uint32_t flags; }; #define PLIST(x) static const char *x[] #define SM(_id, cn, pl, r) \ { \ .clkdef.id = _id, \ .clkdef.name = cn, \ .clkdef.parent_names = pl, \ .clkdef.parent_cnt = nitems(pl), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .base_reg = r, \ } PLIST(cclk_g_parents) = { "clk_m", NULL, "clk_s", NULL, "pllP_out0", "pllP_out4", NULL, NULL, "pllX_out0", "dfllCPU_out_alias", NULL, NULL, NULL, NULL, "pllX_out0_alias", "dfllCPU_out", }; PLIST(cclk_lp_parents) = { "clk_m", NULL, "clk_s", NULL, "pllP_out0", "pllP_out4", NULL, NULL, "pllX_out0", "dfllCPU_out_alias", NULL, NULL, NULL, NULL, "pllX_out0_alias", "dfllCPU_out", }; PLIST(sclk_parents) = { "clk_m", "pllC_out1", "pllC4_out3", "pllP_out0", "pllP_out2", "pllC4_out1", "clk_s", "pllC4_out1", }; static struct super_mux_def super_mux_def[] = { SM(TEGRA210_CLK_CCLK_G, "cclk_g", cclk_g_parents, CCLKG_BURST_POLICY), SM(TEGRA210_CLK_CCLK_LP, "cclk_lp", cclk_lp_parents, CCLKLP_BURST_POLICY), SM(TEGRA210_CLK_SCLK, "sclk", sclk_parents, SCLK_BURST_POLICY), }; static int super_mux_init(struct clknode *clk, device_t dev); static int super_mux_set_mux(struct clknode *clk, int idx); struct super_mux_sc { device_t clkdev; uint32_t base_reg; uint32_t flags; int mux; }; static clknode_method_t super_mux_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, super_mux_init), CLKNODEMETHOD(clknode_set_mux, super_mux_set_mux), CLKNODEMETHOD_END }; DEFINE_CLASS_1(tegra210_super_mux, tegra210_super_mux_class, super_mux_methods, sizeof(struct super_mux_sc), clknode_class); /* Mux status. */ #define SUPER_MUX_STATE_STDBY 0 #define SUPER_MUX_STATE_IDLE 1 #define SUPER_MUX_STATE_RUN 2 #define SUPER_MUX_STATE_IRQ 3 #define SUPER_MUX_STATE_FIQ 4 /* Mux register bits. */ #define SUPER_MUX_STATE_BIT_SHIFT 28 #define SUPER_MUX_STATE_BIT_MASK 0xF /* State is Priority encoded */ #define SUPER_MUX_STATE_BIT_STDBY 0x00 #define SUPER_MUX_STATE_BIT_IDLE 0x01 #define SUPER_MUX_STATE_BIT_RUN 0x02 #define SUPER_MUX_STATE_BIT_IRQ 0x04 #define SUPER_MUX_STATE_BIT_FIQ 0x08 #define SUPER_MUX_MUX_WIDTH 4 static uint32_t super_mux_get_state(uint32_t reg) { reg = (reg >> SUPER_MUX_STATE_BIT_SHIFT) & SUPER_MUX_STATE_BIT_MASK; if (reg & SUPER_MUX_STATE_BIT_FIQ) return (SUPER_MUX_STATE_FIQ); if (reg & SUPER_MUX_STATE_BIT_IRQ) return (SUPER_MUX_STATE_IRQ); if (reg & SUPER_MUX_STATE_BIT_RUN) return (SUPER_MUX_STATE_RUN); if (reg & SUPER_MUX_STATE_BIT_IDLE) return (SUPER_MUX_STATE_IDLE); return (SUPER_MUX_STATE_STDBY); } static int super_mux_init(struct clknode *clk, device_t dev) { struct super_mux_sc *sc; uint32_t reg; int shift, state; sc = clknode_get_softc(clk); DEVICE_LOCK(sc); RD4(sc, sc->base_reg, ®); DEVICE_UNLOCK(sc); state = super_mux_get_state(reg); if ((state != SUPER_MUX_STATE_RUN) && (state != SUPER_MUX_STATE_IDLE)) { panic("Unexpected super mux state: %u", state); } shift = state * SUPER_MUX_MUX_WIDTH; sc->mux = (reg >> shift) & ((1 << SUPER_MUX_MUX_WIDTH) - 1); clknode_init_parent_idx(clk, sc->mux); return(0); } static int super_mux_set_mux(struct clknode *clk, int idx) { struct super_mux_sc *sc; int shift, state; uint32_t reg, dummy; sc = clknode_get_softc(clk); DEVICE_LOCK(sc); RD4(sc, sc->base_reg, ®); state = super_mux_get_state(reg); if ((state != SUPER_MUX_STATE_RUN) && (state != SUPER_MUX_STATE_IDLE)) { panic("Unexpected super mux state: %u", state); } shift = (state - 1) * SUPER_MUX_MUX_WIDTH; sc->mux = idx; reg &= ~(((1 << SUPER_MUX_MUX_WIDTH) - 1) << shift); reg |= idx << shift; WR4(sc, sc->base_reg, reg); RD4(sc, sc->base_reg, &dummy); DEVICE_UNLOCK(sc); return(0); } static int super_mux_register(struct clkdom *clkdom, struct super_mux_def *clkdef) { struct clknode *clk; struct super_mux_sc *sc; clk = clknode_create(clkdom, &tegra210_super_mux_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->clkdev = clknode_get_device(clk); sc->base_reg = clkdef->base_reg; sc->flags = clkdef->flags; clknode_register(clkdom, clk); return (0); } void tegra210_super_mux_clock(struct tegra210_car_softc *sc) { int i, rv; for (i = 0; i < nitems(super_mux_def); i++) { rv = super_mux_register(sc->clkdom, &super_mux_def[i]); if (rv != 0) panic("super_mux_register failed"); } } diff --git a/sys/arm64/nvidia/tegra210/tegra210_coretemp.c b/sys/arm64/nvidia/tegra210/tegra210_coretemp.c index fc6e3b17de34..ac037d4ac385 100644 --- a/sys/arm64/nvidia/tegra210/tegra210_coretemp.c +++ b/sys/arm64/nvidia/tegra210/tegra210_coretemp.c @@ -1,264 +1,264 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2020 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include "tegra_soctherm_if.h" enum therm_info { CORETEMP_TEMP, CORETEMP_DELTA, CORETEMP_RESOLUTION, CORETEMP_TJMAX, }; struct tegra210_coretemp_softc { device_t dev; int overheat_log; int core_max_temp; int cpu_id; device_t tsens_dev; intptr_t tsens_id; }; static int coretemp_get_val_sysctl(SYSCTL_HANDLER_ARGS) { device_t dev; int val, temp, rv; struct tegra210_coretemp_softc *sc; enum therm_info type; char stemp[16]; dev = (device_t) arg1; sc = device_get_softc(dev); type = arg2; rv = TEGRA_SOCTHERM_GET_TEMPERATURE(sc->tsens_dev, sc->dev, sc->tsens_id, &temp); if (rv != 0) { device_printf(sc->dev, "Cannot read temperature sensor %u: %d\n", (unsigned int)sc->tsens_id, rv); return (rv); } switch (type) { case CORETEMP_TEMP: val = temp / 100; val += 2731; break; case CORETEMP_DELTA: val = (sc->core_max_temp - temp) / 1000; break; case CORETEMP_RESOLUTION: val = 1; break; case CORETEMP_TJMAX: val = sc->core_max_temp / 100; val += 2731; break; } if ((temp > sc->core_max_temp) && !sc->overheat_log) { sc->overheat_log = 1; /* * Check for Critical Temperature Status and Critical * Temperature Log. It doesn't really matter if the * current temperature is invalid because the "Critical * Temperature Log" bit will tell us if the Critical * Temperature has * been reached in past. It's not * directly related to the current temperature. * * If we reach a critical level, allow devctl(4) * to catch this and shutdown the system. */ device_printf(dev, "critical temperature detected, " "suggest system shutdown\n"); snprintf(stemp, sizeof(stemp), "%d", val); devctl_notify("coretemp", "Thermal", stemp, "notify=0xcc"); } else { sc->overheat_log = 0; } return (sysctl_handle_int(oidp, 0, val, req)); } static int tegra210_coretemp_ofw_parse(struct tegra210_coretemp_softc *sc) { int rv, ncells; phandle_t node, xnode; pcell_t *cells; node = OF_peer(0); node = ofw_bus_find_child(node, "thermal-zones"); if (node <= 0) { device_printf(sc->dev, "Cannot find 'thermal-zones'.\n"); return (ENXIO); } node = ofw_bus_find_child(node, "cpu"); if (node <= 0) { device_printf(sc->dev, "Cannot find 'cpu'\n"); return (ENXIO); } rv = ofw_bus_parse_xref_list_alloc(node, "thermal-sensors", "#thermal-sensor-cells", 0, &xnode, &ncells, &cells); if (rv != 0) { device_printf(sc->dev, "Cannot parse 'thermal-sensors' property.\n"); return (ENXIO); } if (ncells != 1) { device_printf(sc->dev, "Invalid format of 'thermal-sensors' property(%d).\n", ncells); return (ENXIO); } sc->tsens_id = 0x100 + sc->cpu_id; OF_prop_free(cells); sc->tsens_dev = OF_device_from_xref(xnode); if (sc->tsens_dev == NULL) { device_printf(sc->dev, "Cannot find thermal sensors device."); return (ENXIO); } return (0); } static void tegra210_coretemp_identify(driver_t *driver, device_t parent) { phandle_t root; root = OF_finddevice("/"); if (!ofw_bus_node_is_compatible(root, "nvidia,tegra210")) return; if (device_find_child(parent, "tegra210_coretemp", -1) != NULL) return; if (BUS_ADD_CHILD(parent, 0, "tegra210_coretemp", -1) == NULL) device_printf(parent, "add child failed\n"); } static int tegra210_coretemp_probe(device_t dev) { device_set_desc(dev, "CPU Thermal Sensor"); return (0); } static int tegra210_coretemp_attach(device_t dev) { struct tegra210_coretemp_softc *sc; device_t pdev; struct sysctl_oid *oid; struct sysctl_ctx_list *ctx; int rv; sc = device_get_softc(dev); sc->dev = dev; sc->cpu_id = device_get_unit(dev); sc->core_max_temp = 102000; pdev = device_get_parent(dev); rv = tegra210_coretemp_ofw_parse(sc); if (rv != 0) return (rv); ctx = device_get_sysctl_ctx(dev); oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(pdev)), OID_AUTO, "coretemp", CTLFLAG_RD, NULL, "Per-CPU thermal information"); /* * Add the MIBs to dev.cpu.N and dev.cpu.N.coretemp. */ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(pdev)), OID_AUTO, "temperature", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, CORETEMP_TEMP, coretemp_get_val_sysctl, "IK", "Current temperature"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "delta", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, CORETEMP_DELTA, coretemp_get_val_sysctl, "I", "Delta between TCC activation and current temperature"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "resolution", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, CORETEMP_RESOLUTION, coretemp_get_val_sysctl, "I", "Resolution of CPU thermal sensor"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "tjmax", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, CORETEMP_TJMAX, coretemp_get_val_sysctl, "IK", "TCC activation temperature"); return (0); } static int tegra210_coretemp_detach(device_t dev) { return (0); } static device_method_t tegra210_coretemp_methods[] = { /* Device interface */ DEVMETHOD(device_identify, tegra210_coretemp_identify), DEVMETHOD(device_probe, tegra210_coretemp_probe), DEVMETHOD(device_attach, tegra210_coretemp_attach), DEVMETHOD(device_detach, tegra210_coretemp_detach), DEVMETHOD_END }; static DEFINE_CLASS_0(tegra210_coretemp, tegra210_coretemp_driver, tegra210_coretemp_methods, sizeof(struct tegra210_coretemp_softc)); DRIVER_MODULE(tegra210_coretemp, cpu, tegra210_coretemp_driver, NULL, NULL); diff --git a/sys/arm64/nvidia/tegra210/tegra210_cpufreq.c b/sys/arm64/nvidia/tegra210/tegra210_cpufreq.c index e8b48c4a4947..99b54b224e89 100644 --- a/sys/arm64/nvidia/tegra210/tegra210_cpufreq.c +++ b/sys/arm64/nvidia/tegra210/tegra210_cpufreq.c @@ -1,496 +1,496 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2020 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "cpufreq_if.h" /* CPU voltage table entry */ struct speedo_entry { uint64_t freq; /* Frequency point */ int c0; /* Coeeficient values for */ int c1; /* quadratic equation: */ int c2; /* c2 * speedo^2 + c1 * speedo + c0 */ }; struct cpu_volt_def { int min_uvolt; /* Min allowed CPU voltage */ int max_uvolt; /* Max allowed CPU voltage */ int step_uvolt; /* Step of CPU voltage */ int speedo_scale; /* Scaling factor for cvt */ int speedo_nitems; /* Size of speedo table */ struct speedo_entry *speedo_tbl; /* CPU voltage table */ }; struct cpu_speed_point { uint64_t freq; /* Frequecy */ int uvolt; /* Requested voltage */ }; static struct speedo_entry tegra210_speedo_tbl[] = { {204000000UL, 1007452, -23865, 370}, {306000000UL, 1052709, -24875, 370}, {408000000UL, 1099069, -25895, 370}, {510000000UL, 1146534, -26905, 370}, {612000000UL, 1195102, -27915, 370}, {714000000UL, 1244773, -28925, 370}, {816000000UL, 1295549, -29935, 370}, {918000000UL, 1347428, -30955, 370}, {1020000000UL, 1400411, -31965, 370}, {1122000000UL, 1454497, -32975, 370}, {1224000000UL, 1509687, -33985, 370}, {1326000000UL, 1565981, -35005, 370}, {1428000000UL, 1623379, -36015, 370}, {1530000000UL, 1681880, -37025, 370}, {1632000000UL, 1741485, -38035, 370}, {1734000000UL, 1802194, -39055, 370}, {1836000000UL, 1864006, -40065, 370}, {1912500000UL, 1910780, -40815, 370}, {2014500000UL, 1227000, 0, 0}, {2218500000UL, 1227000, 0, 0}, }; static struct cpu_volt_def tegra210_cpu_volt_def = { .min_uvolt = 900000, /* 0.9 V */ .max_uvolt = 1227000, /* 1.227 */ .step_uvolt = 10000, /* 10 mV */ .speedo_scale = 100, .speedo_nitems = nitems(tegra210_speedo_tbl), .speedo_tbl = tegra210_speedo_tbl, }; static uint64_t cpu_max_freq[] = { 1912500000UL, 1912500000UL, 2218500000UL, 1785000000UL, 1632000000UL, 1912500000UL, 2014500000UL, 1734000000UL, 1683000000UL, 1555500000UL, 1504500000UL, }; static uint64_t cpu_freq_tbl[] = { 204000000UL, 306000000UL, 408000000UL, 510000000UL, 612000000UL, 714000000UL, 816000000UL, 918000000UL, 1020000000UL, 1122000000UL, 1224000000UL, 1326000000UL, 1428000000UL, 1530000000UL, 1632000000UL, 1734000000UL, 1836000000UL, 1912500000UL, 2014500000UL, 2218500000UL, }; struct tegra210_cpufreq_softc { device_t dev; phandle_t node; clk_t clk_cpu_g; clk_t clk_pll_x; clk_t clk_pll_p; clk_t clk_dfll; int process_id; int speedo_id; int speedo_value; uint64_t cpu_max_freq; struct cpu_volt_def *cpu_def; struct cpu_speed_point *speed_points; int nspeed_points; struct cpu_speed_point *act_speed_point; int latency; }; static int cpufreq_lowest_freq = 1; TUNABLE_INT("hw.tegra210.cpufreq.lowest_freq", &cpufreq_lowest_freq); #define DIV_ROUND_CLOSEST(val, div) (((val) + ((div) / 2)) / (div)) #define ROUND_UP(val, div) roundup(val, div) #define ROUND_DOWN(val, div) rounddown(val, div) /* * Compute requesetd voltage for given frequency and SoC process variations, * - compute base voltage from speedo value using speedo table * - round up voltage to next regulator step * - clamp it to regulator limits */ static int freq_to_voltage(struct tegra210_cpufreq_softc *sc, uint64_t freq) { int uv, scale, min_uvolt, max_uvolt, step_uvolt; struct speedo_entry *ent; int i; /* Get speedo entry with higher frequency */ ent = NULL; for (i = 0; i < sc->cpu_def->speedo_nitems; i++) { if (sc->cpu_def->speedo_tbl[i].freq >= freq) { ent = &sc->cpu_def->speedo_tbl[i]; break; } } if (ent == NULL) ent = &sc->cpu_def->speedo_tbl[sc->cpu_def->speedo_nitems - 1]; scale = sc->cpu_def->speedo_scale; /* uV = (c2 * speedo / scale + c1) * speedo / scale + c0) */ uv = DIV_ROUND_CLOSEST(ent->c2 * sc->speedo_value, scale); uv = DIV_ROUND_CLOSEST((uv + ent->c1) * sc->speedo_value, scale) + ent->c0; step_uvolt = sc->cpu_def->step_uvolt; /* Round up it to next regulator step */ uv = ROUND_UP(uv, step_uvolt); /* Clamp result */ min_uvolt = ROUND_UP(sc->cpu_def->min_uvolt, step_uvolt); max_uvolt = ROUND_DOWN(sc->cpu_def->max_uvolt, step_uvolt); if (uv < min_uvolt) uv = min_uvolt; if (uv > max_uvolt) uv = max_uvolt; return (uv); } static void build_speed_points(struct tegra210_cpufreq_softc *sc) { int i; sc->nspeed_points = nitems(cpu_freq_tbl); sc->speed_points = malloc(sizeof(struct cpu_speed_point) * sc->nspeed_points, M_DEVBUF, M_NOWAIT); for (i = 0; i < sc->nspeed_points; i++) { sc->speed_points[i].freq = cpu_freq_tbl[i]; sc->speed_points[i].uvolt = freq_to_voltage(sc, cpu_freq_tbl[i]); } } static struct cpu_speed_point * get_speed_point(struct tegra210_cpufreq_softc *sc, uint64_t freq) { int i; if (sc->speed_points[0].freq >= freq) return (sc->speed_points + 0); for (i = 0; i < sc->nspeed_points - 1; i++) { if (sc->speed_points[i + 1].freq > freq) return (sc->speed_points + i); } return (sc->speed_points + sc->nspeed_points - 1); } static int tegra210_cpufreq_settings(device_t dev, struct cf_setting *sets, int *count) { struct tegra210_cpufreq_softc *sc; int i, j; if (sets == NULL || count == NULL) return (EINVAL); sc = device_get_softc(dev); memset(sets, CPUFREQ_VAL_UNKNOWN, sizeof(*sets) * (*count)); for (i = 0, j = sc->nspeed_points - 1; j >= 0; j--) { if (sc->cpu_max_freq < sc->speed_points[j].freq) continue; sets[i].freq = sc->speed_points[j].freq / 1000000; sets[i].volts = sc->speed_points[j].uvolt / 1000; sets[i].lat = sc->latency; sets[i].dev = dev; i++; } *count = i; return (0); } static int set_cpu_freq(struct tegra210_cpufreq_softc *sc, uint64_t freq) { struct cpu_speed_point *point; int rv; point = get_speed_point(sc, freq); /* Set PLLX frequency */ rv = clk_set_freq(sc->clk_pll_x, point->freq, CLK_SET_ROUND_DOWN); if (rv != 0) { device_printf(sc->dev, "Can't set CPU clock frequency\n"); return (rv); } sc->act_speed_point = point; return (0); } static int tegra210_cpufreq_set(device_t dev, const struct cf_setting *cf) { struct tegra210_cpufreq_softc *sc; uint64_t freq; int rv; if (cf == NULL || cf->freq < 0) return (EINVAL); sc = device_get_softc(dev); freq = cf->freq; if (freq < cpufreq_lowest_freq) freq = cpufreq_lowest_freq; freq *= 1000000; if (freq >= sc->cpu_max_freq) freq = sc->cpu_max_freq; rv = set_cpu_freq(sc, freq); return (rv); } static int tegra210_cpufreq_get(device_t dev, struct cf_setting *cf) { struct tegra210_cpufreq_softc *sc; if (cf == NULL) return (EINVAL); sc = device_get_softc(dev); memset(cf, CPUFREQ_VAL_UNKNOWN, sizeof(*cf)); cf->dev = NULL; cf->freq = sc->act_speed_point->freq / 1000000; cf->volts = sc->act_speed_point->uvolt / 1000; /* Transition latency in us. */ cf->lat = sc->latency; /* Driver providing this setting. */ cf->dev = dev; return (0); } static int tegra210_cpufreq_type(device_t dev, int *type) { if (type == NULL) return (EINVAL); *type = CPUFREQ_TYPE_ABSOLUTE; return (0); } static int get_fdt_resources(struct tegra210_cpufreq_softc *sc, phandle_t node) { int rv; device_t parent_dev; parent_dev = device_get_parent(sc->dev); rv = clk_get_by_ofw_name(parent_dev, 0, "cpu_g", &sc->clk_cpu_g); if (rv != 0) { device_printf(sc->dev, "Cannot get 'cpu_g' clock: %d\n", rv); return (ENXIO); } rv = clk_get_by_ofw_name(parent_dev, 0, "pll_x", &sc->clk_pll_x); if (rv != 0) { device_printf(sc->dev, "Cannot get 'pll_x' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(parent_dev, 0, "pll_p", &sc->clk_pll_p); if (rv != 0) { device_printf(parent_dev, "Cannot get 'pll_p' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(parent_dev, 0, "dfll", &sc->clk_dfll); /* XXX DPLL is not implemented yet */ #if 0 if (rv != 0) { device_printf(sc->dev, "Cannot get 'dfll' clock\n"); return (ENXIO); } #endif return (0); } static void tegra210_cpufreq_identify(driver_t *driver, device_t parent) { phandle_t root; root = OF_finddevice("/"); if (!ofw_bus_node_is_compatible(root, "nvidia,tegra210")) return; if (device_get_unit(parent) != 0) return; if (device_find_child(parent, "tegra210_cpufreq", -1) != NULL) return; if (BUS_ADD_CHILD(parent, 0, "tegra210_cpufreq", -1) == NULL) device_printf(parent, "add child failed\n"); } static int tegra210_cpufreq_probe(device_t dev) { device_set_desc(dev, "CPU Frequency Control"); return (0); } static int tegra210_cpufreq_attach(device_t dev) { struct tegra210_cpufreq_softc *sc; uint64_t freq; int rv; sc = device_get_softc(dev); sc->dev = dev; sc->node = ofw_bus_get_node(device_get_parent(dev)); sc->process_id = tegra_sku_info.cpu_process_id; sc->speedo_id = tegra_sku_info.cpu_speedo_id; sc->speedo_value = tegra_sku_info.cpu_speedo_value; sc->cpu_def = &tegra210_cpu_volt_def; rv = get_fdt_resources(sc, sc->node); if (rv != 0) { return (rv); } build_speed_points(sc); rv = clk_get_freq(sc->clk_cpu_g, &freq); if (rv != 0) { device_printf(dev, "Can't get CPU clock frequency\n"); return (rv); } if (sc->speedo_id < nitems(cpu_max_freq)) sc->cpu_max_freq = cpu_max_freq[sc->speedo_id]; else sc->cpu_max_freq = cpu_max_freq[0]; sc->act_speed_point = get_speed_point(sc, freq); /* Set safe startup CPU frequency. */ rv = set_cpu_freq(sc, 1632000000); if (rv != 0) { device_printf(dev, "Can't set initial CPU clock frequency\n"); return (rv); } /* This device is controlled by cpufreq(4). */ cpufreq_register(dev); return (0); } static int tegra210_cpufreq_detach(device_t dev) { struct tegra210_cpufreq_softc *sc; sc = device_get_softc(dev); cpufreq_unregister(dev); if (sc->clk_cpu_g != NULL) clk_release(sc->clk_cpu_g); if (sc->clk_pll_x != NULL) clk_release(sc->clk_pll_x); if (sc->clk_pll_p != NULL) clk_release(sc->clk_pll_p); if (sc->clk_dfll != NULL) clk_release(sc->clk_dfll); return (0); } static device_method_t tegra210_cpufreq_methods[] = { /* Device interface */ DEVMETHOD(device_identify, tegra210_cpufreq_identify), DEVMETHOD(device_probe, tegra210_cpufreq_probe), DEVMETHOD(device_attach, tegra210_cpufreq_attach), DEVMETHOD(device_detach, tegra210_cpufreq_detach), /* cpufreq interface */ DEVMETHOD(cpufreq_drv_set, tegra210_cpufreq_set), DEVMETHOD(cpufreq_drv_get, tegra210_cpufreq_get), DEVMETHOD(cpufreq_drv_settings, tegra210_cpufreq_settings), DEVMETHOD(cpufreq_drv_type, tegra210_cpufreq_type), DEVMETHOD_END }; static DEFINE_CLASS_0(tegra210_cpufreq, tegra210_cpufreq_driver, tegra210_cpufreq_methods, sizeof(struct tegra210_cpufreq_softc)); DRIVER_MODULE(tegra210_cpufreq, cpu, tegra210_cpufreq_driver, NULL, NULL); diff --git a/sys/arm64/nvidia/tegra210/tegra210_pmc.c b/sys/arm64/nvidia/tegra210/tegra210_pmc.c index 0f56342d1ac4..a04ec212a8c7 100644 --- a/sys/arm64/nvidia/tegra210/tegra210_pmc.c +++ b/sys/arm64/nvidia/tegra210/tegra210_pmc.c @@ -1,624 +1,624 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2020 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #define PMC_CNTRL 0x000 #define PMC_CNTRL_SHUTDOWN_OE (1 << 22) #define PMC_CNTRL_CPUPWRGOOD_SEL_MASK (0x3 << 20) #define PMC_CNTRL_CPUPWRGOOD_SEL_SHIFT 20 #define PMC_CNTRL_CPUPWRGOOD_EN (1 << 19) #define PMC_CNTRL_FUSE_OVERRIDE (1 << 18) #define PMC_CNTRL_INTR_POLARITY (1 << 17) #define PMC_CNTRL_CPU_PWRREQ_OE (1 << 16) #define PMC_CNTRL_CPU_PWRREQ_POLARITY (1 << 15) #define PMC_CNTRL_SIDE_EFFECT_LP0 (1 << 14) #define PMC_CNTRL_AOINIT (1 << 13) #define PMC_CNTRL_PWRGATE_DIS (1 << 12) #define PMC_CNTRL_SYSCLK_OE (1 << 11) #define PMC_CNTRL_SYSCLK_POLARITY (1 << 10) #define PMC_CNTRL_PWRREQ_OE (1 << 9) #define PMC_CNTRL_PWRREQ_POLARITY (1 << 8) #define PMC_CNTRL_BLINK_EN (1 << 7) #define PMC_CNTRL_GLITCHDET_DIS (1 << 6) #define PMC_CNTRL_LATCHWAKE_EN (1 << 5) #define PMC_CNTRL_MAIN_RST (1 << 4) #define PMC_CNTRL_KBC_RST (1 << 3) #define PMC_CNTRL_RTC_RST (1 << 2) #define PMC_CNTRL_RTC_CLK_DIS (1 << 1) #define PMC_CNTRL_KBC_CLK_DIS (1 << 0) #define PMC_DPD_SAMPLE 0x020 #define PMC_CLAMP_STATUS 0x02C #define PMC_CLAMP_STATUS_PARTID(x) (1 << ((x) & 0x1F)) #define PMC_PWRGATE_TOGGLE 0x030 #define PMC_PWRGATE_TOGGLE_START (1 << 8) #define PMC_PWRGATE_TOGGLE_PARTID(x) (((x) & 0x1F) << 0) #define PMC_REMOVE_CLAMPING_CMD 0x034 #define PMC_REMOVE_CLAMPING_CMD_PARTID(x) (1 << ((x) & 0x1F)) #define PMC_PWRGATE_STATUS 0x038 #define PMC_PWRGATE_STATUS_PARTID(x) (1 << ((x) & 0x1F)) #define PMC_SCRATCH0 0x050 #define PMC_SCRATCH0_MODE_RECOVERY (1 << 31) #define PMC_SCRATCH0_MODE_BOOTLOADER (1 << 30) #define PMC_SCRATCH0_MODE_RCM (1 << 1) #define PMC_SCRATCH0_MODE_MASK (PMC_SCRATCH0_MODE_RECOVERY | \ PMC_SCRATCH0_MODE_BOOTLOADER | \ PMC_SCRATCH0_MODE_RCM) #define PMC_CPUPWRGOOD_TIMER 0x0c8 #define PMC_CPUPWROFF_TIMER 0x0cc #define PMC_SCRATCH41 0x140 #define PMC_SENSOR_CTRL 0x1b0 #define PMC_SENSOR_CTRL_BLOCK_SCRATCH_WRITE (1 << 2) #define PMC_SENSOR_CTRL_ENABLE_RST (1 << 1) #define PMC_SENSOR_CTRL_ENABLE_PG (1 << 0) #define PMC_IO_DPD_REQ 0x1b8 #define PMC_IO_DPD_REQ_CODE_IDLE (0 << 30) #define PMC_IO_DPD_REQ_CODE_OFF (1 << 30) #define PMC_IO_DPD_REQ_CODE_ON (2 << 30) #define PMC_IO_DPD_REQ_CODE_MASK (3 << 30) #define PMC_IO_DPD_STATUS 0x1bc #define PMC_IO_DPD_STATUS_HDMI (1 << 28) #define PMC_IO_DPD2_REQ 0x1c0 #define PMC_IO_DPD2_STATUS 0x1c4 #define PMC_IO_DPD2_STATUS_HV (1 << 6) #define PMC_SEL_DPD_TIM 0x1c8 #define PMC_SCRATCH54 0x258 #define PMC_SCRATCH54_DATA_SHIFT 8 #define PMC_SCRATCH54_ADDR_SHIFT 0 #define PMC_SCRATCH55 0x25c #define PMC_SCRATCH55_RST_ENABLE (1 << 31) #define PMC_SCRATCH55_CNTRL_TYPE (1 << 30) #define PMC_SCRATCH55_CNTRL_ID_SHIFT 27 #define PMC_SCRATCH55_CNTRL_ID_MASK 0x07 #define PMC_SCRATCH55_PINMUX_SHIFT 24 #define PMC_SCRATCH55_PINMUX_MASK 0x07 #define PMC_SCRATCH55_CHECKSUM_SHIFT 16 #define PMC_SCRATCH55_CHECKSUM_MASK 0xFF #define PMC_SCRATCH55_16BITOP (1 << 15) #define PMC_SCRATCH55_I2CSLV1_SHIFT 0 #define PMC_SCRATCH55_I2CSLV1_MASK 0x7F #define PMC_GPU_RG_CNTRL 0x2d4 /* Secure access */ #define PMC_SMC 0xc2fffe00 #define PMC_SMC_READ 0xaa #define PMC_SMC_WRITE 0xbb #define PMC_LOCK(_sc) mtx_lock(&(_sc)->mtx) #define PMC_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define PMC_LOCK_INIT(_sc) mtx_init(&(_sc)->mtx, \ device_get_nameunit(_sc->dev), "tegra210_pmc", MTX_DEF) #define PMC_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx); #define PMC_ASSERT_LOCKED(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED); #define PMC_ASSERT_UNLOCKED(_sc) mtx_assert(&(_sc)->mtx, MA_NOTOWNED); struct tegra210_pmc_softc { device_t dev; struct resource *mem_res; clk_t clk; struct mtx mtx; bool secure_access; uint32_t rate; enum tegra_suspend_mode suspend_mode; uint32_t cpu_good_time; uint32_t cpu_off_time; uint32_t core_osc_time; uint32_t core_pmu_time; uint32_t core_off_time; int corereq_high; int sysclkreq_high; int combined_req; int cpu_pwr_good_en; uint32_t lp0_vec_phys; uint32_t lp0_vec_size; }; static struct ofw_compat_data compat_data[] = { {"nvidia,tegra210-pmc", 1}, {NULL, 0}, }; static struct tegra210_pmc_softc *pmc_sc; static inline struct tegra210_pmc_softc * tegra210_pmc_get_sc(void) { if (pmc_sc == NULL) panic("To early call to Tegra PMC driver.\n"); return (pmc_sc); } static void WR4(struct tegra210_pmc_softc *sc, bus_size_t r, uint32_t v) { struct arm_smccc_res res; if (sc->secure_access) { arm_smccc_smc(PMC_SMC, PMC_SMC_WRITE, r, v, 0, 0, 0, 0, &res); if (res.a0 != 0) device_printf(sc->dev," PMC SMC write failed: %lu\n", res.a0); } bus_write_4(sc->mem_res, r, v); } static uint32_t RD4(struct tegra210_pmc_softc *sc, bus_size_t r) { struct arm_smccc_res res; if (sc->secure_access) { arm_smccc_smc(PMC_SMC, PMC_SMC_READ, r, 0, 0, 0, 0, 0, &res); if (res.a0 != 0) device_printf(sc->dev," PMC SMC write failed: %lu\n", res.a0); return((uint32_t)res.a1); } return(bus_read_4(sc->mem_res, r)); } static int tegra210_pmc_set_powergate(struct tegra210_pmc_softc *sc, enum tegra_powergate_id id, int ena) { uint32_t reg; int i; PMC_LOCK(sc); reg = RD4(sc, PMC_PWRGATE_STATUS) & PMC_PWRGATE_STATUS_PARTID(id); if (((reg != 0) && ena) || ((reg == 0) && !ena)) { PMC_UNLOCK(sc); return (0); } for (i = 100; i > 0; i--) { reg = RD4(sc, PMC_PWRGATE_TOGGLE); if ((reg & PMC_PWRGATE_TOGGLE_START) == 0) break; DELAY(1); } if (i <= 0) device_printf(sc->dev, "Timeout when waiting for TOGGLE_START\n"); WR4(sc, PMC_PWRGATE_TOGGLE, PMC_PWRGATE_TOGGLE_START | PMC_PWRGATE_TOGGLE_PARTID(id)); for (i = 100; i > 0; i--) { reg = RD4(sc, PMC_PWRGATE_TOGGLE); if ((reg & PMC_PWRGATE_TOGGLE_START) == 0) break; DELAY(1); } if (i <= 0) device_printf(sc->dev, "Timeout when waiting for TOGGLE_START\n"); PMC_UNLOCK(sc); return (0); } int tegra_powergate_remove_clamping(enum tegra_powergate_id id) { struct tegra210_pmc_softc *sc; uint32_t reg; enum tegra_powergate_id swid; int i; sc = tegra210_pmc_get_sc(); if (id == TEGRA_POWERGATE_3D) { WR4(sc, PMC_GPU_RG_CNTRL, 0); return (0); } reg = RD4(sc, PMC_PWRGATE_STATUS); if ((reg & PMC_PWRGATE_STATUS_PARTID(id)) == 0) panic("Attempt to remove clamping for unpowered partition.\n"); if (id == TEGRA_POWERGATE_PCX) swid = TEGRA_POWERGATE_VDE; else if (id == TEGRA_POWERGATE_VDE) swid = TEGRA_POWERGATE_PCX; else swid = id; WR4(sc, PMC_REMOVE_CLAMPING_CMD, PMC_REMOVE_CLAMPING_CMD_PARTID(swid)); for (i = 100; i > 0; i--) { reg = RD4(sc, PMC_REMOVE_CLAMPING_CMD); if ((reg & PMC_REMOVE_CLAMPING_CMD_PARTID(swid)) == 0) break; DELAY(1); } if (i <= 0) device_printf(sc->dev, "Timeout when remove clamping\n"); reg = RD4(sc, PMC_CLAMP_STATUS); if ((reg & PMC_CLAMP_STATUS_PARTID(id)) != 0) panic("Cannot remove clamping\n"); return (0); } int tegra_powergate_is_powered(enum tegra_powergate_id id) { struct tegra210_pmc_softc *sc; uint32_t reg; sc = tegra210_pmc_get_sc(); reg = RD4(sc, PMC_PWRGATE_STATUS); return ((reg & PMC_PWRGATE_STATUS_PARTID(id)) ? 1 : 0); } int tegra_powergate_power_on(enum tegra_powergate_id id) { struct tegra210_pmc_softc *sc; int rv, i; sc = tegra210_pmc_get_sc(); rv = tegra210_pmc_set_powergate(sc, id, 1); if (rv != 0) { device_printf(sc->dev, "Cannot set powergate: %d\n", id); return (rv); } for (i = 100; i > 0; i--) { if (tegra_powergate_is_powered(id)) break; DELAY(1); } if (i <= 0) { device_printf(sc->dev, "Timeout when waiting on power up\n"); return(ETIMEDOUT); } return (rv); } int tegra_powergate_power_off(enum tegra_powergate_id id) { struct tegra210_pmc_softc *sc; int rv, i; sc = tegra210_pmc_get_sc(); rv = tegra210_pmc_set_powergate(sc, id, 0); if (rv != 0) { device_printf(sc->dev, "Cannot set powergate: %d\n", id); return (rv); } for (i = 100; i > 0; i--) { if (!tegra_powergate_is_powered(id)) break; DELAY(1); } if (i <= 0) device_printf(sc->dev, "Timeout when waiting on power off\n"); return (rv); } int tegra_powergate_sequence_power_up(enum tegra_powergate_id id, clk_t clk, hwreset_t rst) { struct tegra210_pmc_softc *sc; int rv; sc = tegra210_pmc_get_sc(); rv = hwreset_assert(rst); if (rv != 0) { device_printf(sc->dev, "Cannot assert reset\n"); return (rv); } rv = clk_stop(clk); if (rv != 0) { device_printf(sc->dev, "Cannot stop clock\n"); goto clk_fail; } rv = tegra_powergate_power_on(id); if (rv != 0) { device_printf(sc->dev, "Cannot power on powergate\n"); goto clk_fail; } rv = clk_enable(clk); if (rv != 0) { device_printf(sc->dev, "Cannot enable clock\n"); goto clk_fail; } DELAY(20); rv = tegra_powergate_remove_clamping(id); if (rv != 0) { device_printf(sc->dev, "Cannot remove clamping\n"); goto fail; } rv = hwreset_deassert(rst); if (rv != 0) { device_printf(sc->dev, "Cannot unreset reset\n"); goto fail; } return 0; fail: clk_disable(clk); clk_fail: hwreset_assert(rst); tegra_powergate_power_off(id); return (rv); } static int tegra210_pmc_parse_fdt(struct tegra210_pmc_softc *sc, phandle_t node) { int rv; uint32_t tmp; uint32_t tmparr[2]; rv = OF_getencprop(node, "nvidia,suspend-mode", &tmp, sizeof(tmp)); if (rv > 0) { switch (tmp) { case 0: sc->suspend_mode = TEGRA_SUSPEND_LP0; break; case 1: sc->suspend_mode = TEGRA_SUSPEND_LP1; break; case 2: sc->suspend_mode = TEGRA_SUSPEND_LP2; break; default: sc->suspend_mode = TEGRA_SUSPEND_NONE; break; } } rv = OF_getencprop(node, "nvidia,cpu-pwr-good-time", &tmp, sizeof(tmp)); if (rv > 0) { sc->cpu_good_time = tmp; sc->suspend_mode = TEGRA_SUSPEND_NONE; } rv = OF_getencprop(node, "nvidia,cpu-pwr-off-time", &tmp, sizeof(tmp)); if (rv > 0) { sc->cpu_off_time = tmp; sc->suspend_mode = TEGRA_SUSPEND_NONE; } rv = OF_getencprop(node, "nvidia,core-pwr-good-time", tmparr, sizeof(tmparr)); if (rv == sizeof(tmparr)) { sc->core_osc_time = tmparr[0]; sc->core_pmu_time = tmparr[1]; sc->suspend_mode = TEGRA_SUSPEND_NONE; } rv = OF_getencprop(node, "nvidia,core-pwr-off-time", &tmp, sizeof(tmp)); if (rv > 0) { sc->core_off_time = tmp; sc->suspend_mode = TEGRA_SUSPEND_NONE; } sc->corereq_high = OF_hasprop(node, "nvidia,core-power-req-active-high"); sc->sysclkreq_high = OF_hasprop(node, "nvidia,sys-clock-req-active-high"); sc->combined_req = OF_hasprop(node, "nvidia,combined-power-req"); sc->cpu_pwr_good_en = OF_hasprop(node, "nvidia,cpu-pwr-good-en"); rv = OF_getencprop(node, "nvidia,lp0-vec", tmparr, sizeof(tmparr)); if (rv == sizeof(tmparr)) { sc->lp0_vec_phys = tmparr[0]; sc->core_pmu_time = tmparr[1]; sc->lp0_vec_size = TEGRA_SUSPEND_NONE; if (sc->suspend_mode == TEGRA_SUSPEND_LP0) sc->suspend_mode = TEGRA_SUSPEND_LP1; } return 0; } static void tegra210_pmc_check_secure(struct tegra210_pmc_softc *sc) { uint32_t orig; sc->secure_access = false; /* * If PMC is coverd by secure trust zone, all reads returns 0, * Use scratch0 register acvcess test */ orig = RD4(sc, PMC_SCRATCH0); WR4(sc, PMC_SCRATCH0, 0xDEADBEEF); if (RD4(sc, PMC_SCRATCH0) == 0) { sc->secure_access = true; return; } WR4(sc, PMC_SCRATCH0, 0xBADC0DE); if (RD4(sc, PMC_SCRATCH0) == 0) { sc->secure_access = true; return; } WR4(sc, PMC_SCRATCH0, orig); } static int tegra210_pmc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Tegra PMC"); return (BUS_PROBE_DEFAULT); } static int tegra210_pmc_detach(device_t dev) { /* This device is always present. */ return (EBUSY); } static int tegra210_pmc_attach(device_t dev) { struct tegra210_pmc_softc *sc; int rid, rv; uint32_t reg; phandle_t node; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); PMC_LOCK_INIT(sc); rv = tegra210_pmc_parse_fdt(sc, node); if (rv != 0) { device_printf(sc->dev, "Cannot parse FDT data\n"); return (rv); } rv = clk_get_by_ofw_name(sc->dev, 0, "pclk", &sc->clk); if (rv != 0) { device_printf(sc->dev, "Cannot get \"pclk\" clock\n"); return (ENXIO); } rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot allocate memory resources\n"); return (ENXIO); } tegra210_pmc_check_secure(sc); /* Enable CPU power request. */ reg = RD4(sc, PMC_CNTRL); reg |= PMC_CNTRL_CPU_PWRREQ_OE; WR4(sc, PMC_CNTRL, reg); /* Set sysclk output polarity */ reg = RD4(sc, PMC_CNTRL); if (sc->sysclkreq_high) reg &= ~PMC_CNTRL_SYSCLK_POLARITY; else reg |= PMC_CNTRL_SYSCLK_POLARITY; WR4(sc, PMC_CNTRL, reg); /* Enable sysclk request. */ reg = RD4(sc, PMC_CNTRL); reg |= PMC_CNTRL_SYSCLK_OE; WR4(sc, PMC_CNTRL, reg); /* * Remove HDMI from deep power down mode. * XXX mote this to HDMI driver */ reg = RD4(sc, PMC_IO_DPD_STATUS); reg &= ~ PMC_IO_DPD_STATUS_HDMI; WR4(sc, PMC_IO_DPD_STATUS, reg); reg = RD4(sc, PMC_IO_DPD2_STATUS); reg &= ~ PMC_IO_DPD2_STATUS_HV; WR4(sc, PMC_IO_DPD2_STATUS, reg); if (pmc_sc != NULL) panic("tegra210_pmc: double driver attach"); pmc_sc = sc; return (0); } static device_method_t tegra210_pmc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra210_pmc_probe), DEVMETHOD(device_attach, tegra210_pmc_attach), DEVMETHOD(device_detach, tegra210_pmc_detach), DEVMETHOD_END }; static DEFINE_CLASS_0(pmc, tegra210_pmc_driver, tegra210_pmc_methods, sizeof(struct tegra210_pmc_softc)); EARLY_DRIVER_MODULE(tegra210_pmc, simplebus, tegra210_pmc_driver, NULL, NULL, 70); diff --git a/sys/arm64/nvidia/tegra210/tegra210_xusbpadctl.c b/sys/arm64/nvidia/tegra210/tegra210_xusbpadctl.c index c3bf2d54668b..bfcd8894034a 100644 --- a/sys/arm64/nvidia/tegra210/tegra210_xusbpadctl.c +++ b/sys/arm64/nvidia/tegra210/tegra210_xusbpadctl.c @@ -1,1957 +1,1957 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2020 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include #include "phynode_if.h" /* FUSE calibration data. */ #define FUSE_SKU_CALIB_0 0x0F0 #define FUSE_SKU_CALIB_0_HS_CURR_LEVEL_123(x, i) (((x) >> (11 + ((i) - 1) * 6)) & 0x3F); #define FUSE_SKU_CALIB_0_HS_TERM_RANGE_ADJ(x) (((x) >> 7) & 0x0F); #define FUSE_SKU_CALIB_0_HS_CURR_LEVEL_0(x) (((x) >> 0) & 0x3F); #define FUSE_USB_CALIB_EXT_0 0x250 #define FUSE_USB_CALIB_EXT_0_RPD_CTRL(x) (((x) >> 0) & 0x1F); /* Registers. */ #define XUSB_PADCTL_USB2_PAD_MUX 0x004 #define XUSB_PADCTL_USB2_PORT_CAP 0x008 #define USB2_PORT_CAP_PORT_REVERSE_ID(p) (1 << (3 + (p) * 4)) #define USB2_PORT_CAP_PORT_INTERNAL(p) (1 << (2 + (p) * 4)) #define USB2_PORT_CAP_PORT_CAP(p, x) (((x) & 3) << ((p) * 4)) #define USB2_PORT_CAP_PORT_CAP_OTG 0x3 #define USB2_PORT_CAP_PORT_CAP_DEVICE 0x2 #define USB2_PORT_CAP_PORT_CAP_HOST 0x1 #define USB2_PORT_CAP_PORT_CAP_DISABLED 0x0 #define XUSB_PADCTL_SS_PORT_MAP 0x014 #define SS_PORT_MAP_PORT_INTERNAL(p) (1 << (3 + (p) * 4)) #define SS_PORT_MAP_PORT_MAP(p, x) (((x) & 7) << ((p) * 4)) #define XUSB_PADCTL_ELPG_PROGRAM1 0x024 #define ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN (1 << 31) #define ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN_EARLY (1 << 30) #define ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN (1 << 29) #define ELPG_PROGRAM1_SSP_ELPG_VCORE_DOWN(x) (1 << (2 + (x) * 3)) #define ELPG_PROGRAM1_SSP_ELPG_CLAMP_EN_EARLY(x) (1 << (1 + (x) * 3)) #define ELPG_PROGRAM1_SSP_ELPG_CLAMP_EN(x) (1 << (0 + (x) * 3)) #define XUSB_PADCTL_USB3_PAD_MUX 0x028 #define USB3_PAD_MUX_SATA_IDDQ_DISABLE(x) (1 << (8 + (x))) #define USB3_PAD_MUX_PCIE_IDDQ_DISABLE(x) (1 << (1 + (x))) #define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1(x) (0x084 + (x) * 0x40) #define USB2_BATTERY_CHRG_OTGPAD_CTL1_USBON_RPU_OVRD_VAL (1 << 23) #define USB2_BATTERY_CHRG_OTGPAD_CTL1_USBON_RPU_OVRD ( 1 << 22) #define USB2_BATTERY_CHRG_OTGPAD_CTL1_USBON_RPD_OVRD_VAL (1 << 21) #define USB2_BATTERY_CHRG_OTGPAD_CTL1_USBON_RPD_OVRD (1 << 20) #define USB2_BATTERY_CHRG_OTGPAD_CTL1_USBOP_RPU_OVRD_VAL (1 << 19) #define USB2_BATTERY_CHRG_OTGPAD_CTL1_USBOP_RPU_OVRD (1 << 18) #define USB2_BATTERY_CHRG_OTGPAD_CTL1_USBOP_RPD_OVRD_VAL (1 << 17) #define USB2_BATTERY_CHRG_OTGPAD_CTL1_USBOP_RPD_OVRD (1 << 16) #define USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_DYN_DLY(x) (((x) & 0x3) << 9) #define USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV(x) (((x) & 0x3) << 7) #define USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18 (1 << 6) #define USB2_BATTERY_CHRG_OTGPAD_CTL1_DIV_DET_EN (1 << 4) #define USB2_BATTERY_CHRG_OTGPAD_CTL1_VOP_DIV2P7_DET (1 << 3) #define USB2_BATTERY_CHRG_OTGPAD_CTL1_VOP_DIV2P0_DET (1 << 2) #define USB2_BATTERY_CHRG_OTGPAD_CTL1_VON_DIV2P7_DET (1 << 1) #define USB2_BATTERY_CHRG_OTGPAD_CTL1_VON_DIV2P0_DET (1 << 0) #define XUSB_PADCTL_USB2_OTG_PAD_CTL0(x) (0x088 + (x) * 0x40) #define USB2_OTG_PAD_CTL0_PD_ZI (1 << 29) #define USB2_OTG_PAD_CTL0_PD2_OVRD_EN (1 << 28) #define USB2_OTG_PAD_CTL0_PD2 (1 << 27) #define USB2_OTG_PAD_CTL0_PD (1 << 26) #define USB2_OTG_PAD_CTL0_TERM_EN (1 << 25) #define USB2_OTG_PAD_CTL0_LS_FSLEW(x) (((x) & 0x0F) << 21) #define USB2_OTG_PAD_CTL0_LS_RSLEW(x) (((x) & 0x0F) << 17) #define USB2_OTG_PAD_CTL0_FS_FSLEW(x) (((x) & 0x0F) << 13) #define USB2_OTG_PAD_CTL0_FS_RSLEW(x) (((x) & 0x0F) << 9) #define USB2_OTG_PAD_CTL0_HS_SLEW(x) (((x) & 0x3F) << 6) #define USB2_OTG_PAD_CTL0_HS_CURR_LEVEL(x) (((x) & 0x3F) << 0) #define XUSB_PADCTL_USB2_OTG_PAD_CTL1(x) (0x08C + (x) * 0x40) #define USB2_OTG_PAD_CTL1_RPD_CTRL(x) (((x) & 0x1F) << 26) #define USB2_OTG_PAD_CTL1_RPU_STATUS_HIGH (1 << 25) #define USB2_OTG_PAD_CTL1_RPU_SWITCH_LOW (1 << 24) #define USB2_OTG_PAD_CTL1_RPU_SWITCH_OVRD (1 << 23) #define USB2_OTG_PAD_CTL1_HS_LOOPBACK_OVRD_VAL (1 << 22) #define USB2_OTG_PAD_CTL1_HS_LOOPBACK_OVRD_EN (1 << 21) #define USB2_OTG_PAD_CTL1_PTERM_RANGE_ADJ(x) (((x) & 0x0F) << 17) #define USB2_OTG_PAD_CTL1_PD_DISC_OVRD_VAL (1 << 16) #define USB2_OTG_PAD_CTL1_PD_CHRP_OVRD_VAL (1 << 15) #define USB2_OTG_PAD_CTL1_RPU_RANGE_ADJ(x) (((x) & 0x03) << 13) #define USB2_OTG_PAD_CTL1_HS_COUP_EN(x) (((x) & 0x03) << 11) #define USB2_OTG_PAD_CTL1_SPARE(x) (((x) & 0x0F) << 7) #define USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ(x) (((x) & 0x0F) << 3) #define USB2_OTG_PAD_CTL1_PD_DR (1 << 2) #define USB2_OTG_PAD_CTL1_PD_DISC_OVRD (1 << 1) #define USB2_OTG_PAD_CTL1_PD_CHRP_OVRD (1 << 0) #define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0(x) (0x0C0 + (x) * 0x40) #define XUSB_PADCTL_USB2_BIAS_PAD_CTL0 0x0284 #define USB2_BIAS_PAD_CTL0_TRK_PWR_ENA (1 << 29) #define USB2_BIAS_PAD_CTL0_SPARE(x) (((x) & 0xF) << 25) #define USB2_BIAS_PAD_CTL0_CHG_DIV(x) (((x) & 0xF) << 21) #define USB2_BIAS_PAD_CTL0_TEMP_COEF(x) (((x) & 0x7) << 18) #define USB2_BIAS_PAD_CTL0_VREF_CTRL(x) (((x) & 0x7) << 15) #define USB2_BIAS_PAD_CTL0_ADJRPU(x) (((x) & 0x7) << 12) #define USB2_BIAS_PAD_CTL0_PD (1 << 11) #define USB2_BIAS_PAD_CTL0_TERM_OFFSETL(x) (((x) & 0x7) << 8) #define USB2_BIAS_PAD_CTL0_HS_CHIRP_LEVEL(x) (((x) & 0x3) << 6) #define USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL(x) (((x) & 0x7) << 3) #define USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL(x) (((x) & 0x7) << 0) #define XUSB_PADCTL_USB2_BIAS_PAD_CTL1 0x0288 #define USB2_BIAS_PAD_CTL1_FORCE_TRK_CLK_EN (1 << 30) #define USB2_BIAS_PAD_CTL1_TRK_SW_OVRD (1 << 29) #define USB2_BIAS_PAD_CTL1_TRK_DONE (1 << 28) #define USB2_BIAS_PAD_CTL1_TRK_START (1 << 27) #define USB2_BIAS_PAD_CTL1_PD_TRK (1 << 26) #define USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER(x) (((x) & 0x7F) << 19) #define USB2_BIAS_PAD_CTL1_TRK_START_TIMER(x) (((x) & 0x7F) << 12) #define USB2_BIAS_PAD_CTL1_PCTRL(x) (((x) & 0x3F) << 6) #define USB2_BIAS_PAD_CTL1_TCTRL(x) (((x) & 0x3F) << 0) #define XUSB_PADCTL_HSIC_PAD_CTL0(x) (0x300 + (x) * 0x20) #define HSIC_PAD_CTL0_RPU_STROBE (1 << 18) #define HSIC_PAD_CTL0_RPU_DATA1 (1 << 17) #define HSIC_PAD_CTL0_RPU_DATA0 (1 << 16) #define HSIC_PAD_CTL0_RPD_STROBE (1 << 15) #define HSIC_PAD_CTL0_RPD_DATA1 (1 << 14) #define HSIC_PAD_CTL0_RPD_DATA0 (1 << 13) #define HSIC_PAD_CTL0_LPBK_STROBE (1 << 12) #define HSIC_PAD_CTL0_LPBK_DATA1 (1 << 11) #define HSIC_PAD_CTL0_LPBK_DATA0 (1 << 10) #define HSIC_PAD_CTL0_PD_ZI_STROBE (1 << 9) #define HSIC_PAD_CTL0_PD_ZI_DATA1 (1 << 8) #define HSIC_PAD_CTL0_PD_ZI_DATA0 (1 << 7) #define HSIC_PAD_CTL0_PD_RX_STROBE (1 << 6) #define HSIC_PAD_CTL0_PD_RX_DATA1 (1 << 5) #define HSIC_PAD_CTL0_PD_RX_DATA0 (1 << 4) #define HSIC_PAD_CTL0_PD_TX_STROBE (1 << 3) #define HSIC_PAD_CTL0_PD_TX_DATA1 (1 << 2) #define HSIC_PAD_CTL0_PD_TX_DATA0 (1 << 1) #define HSIC_PAD_CTL0_IDDQ (1 << 0) #define XUSB_PADCTL_HSIC_PAD_CTL1(x) (0x304 + (x) * 0x20) #define HSIC_PAD_CTL1_RTERM(x) (((x) & 0xF) << 12) #define HSIC_PAD_CTL1_HSIC_OPT(x) (((x) & 0xF) << 8) #define HSIC_PAD_CTL1_TX_SLEW(x) (((x) & 0xF) << 4) #define HSIC_PAD_CTL1_TX_RTUNEP(x) (((x) & 0xF) << 0) #define XUSB_PADCTL_HSIC_PAD_CTL2(x) (0x308 + (x) * 0x20) #define HSIC_PAD_CTL2_RX_STROBE_TRIM(x) (((x) & 0xF) << 8) #define HSIC_PAD_CTL2_RX_DATA1_TRIM(x) (((x) & 0xF) << 4) #define HSIC_PAD_CTL2_RX_DATA0_TRIM(x) (((x) & 0xF) << 0) #define XUSB_PADCTL_HSIC_PAD_TRK_CTL 0x340 #define HSIC_PAD_TRK_CTL_AUTO_RTERM_EN (1 << 24) #define HSIC_PAD_TRK_CTL_FORCE_TRK_CLK_EN (1 << 23) #define HSIC_PAD_TRK_CTL_TRK_SW_OVRD (1 << 22) #define HSIC_PAD_TRK_CTL_TRK_DONE (1 << 21) #define HSIC_PAD_TRK_CTL_TRK_START (1 << 20) #define HSIC_PAD_TRK_CTL_PD_TRK (1 << 19) #define HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER(x) (((x) & 0x3F) << 12) #define HSIC_PAD_TRK_CTL_TRK_START_TIMER(x) (((x) & 0x7F) << 5) #define HSIC_PAD_TRK_CTL_RTERM_OUT(x) (((x) & 0x1F) << 0) #define XUSB_PADCTL_HSIC_STRB_TRIM_CONTROL 0x344 #define XUSB_PADCTL_UPHY_PLL_P0_CTL1 0x360 #define UPHY_PLL_P0_CTL1_PLL0_FREQ_PSDIV(x) (((x) & 0x03) << 28) #define UPHY_PLL_P0_CTL1_PLL0_FREQ_NDIV(x) (((x) & 0xFF) << 20) #define UPHY_PLL_P0_CTL1_PLL0_FREQ_MDIV(x) (((x) & 0x03) << 16) #define UPHY_PLL_P0_CTL1_PLL0_LOCKDET_STATUS (1 << 15) #define UPHY_PLL_P0_CTL1_PLL0_MODE_GET(x) (((x) >> 8) & 0x03) #define UPHY_PLL_P0_CTL1_PLL0_BYPASS_EN (1 << 7) #define UPHY_PLL_P0_CTL1_PLL0_FREERUN_EN (1 << 6) #define UPHY_PLL_P0_CTL1_PLL0_PWR_OVRD (1 << 4) #define UPHY_PLL_P0_CTL1_PLL0_ENABLE (1 << 3) #define UPHY_PLL_P0_CTL1_PLL0_SLEEP(x) (((x) & 0x03) << 1) #define UPHY_PLL_P0_CTL1_PLL0_IDDQ (1 << 0) #define XUSB_PADCTL_UPHY_PLL_P0_CTL2 0x364 #define UPHY_PLL_P0_CTL2_PLL0_CAL_CTRL(x) (((x) & 0xFFFFFF) << 4) #define UPHY_PLL_P0_CTL2_PLL0_CAL_RESET (1 << 3) #define UPHY_PLL_P0_CTL2_PLL0_CAL_OVRD (1 << 2) #define UPHY_PLL_P0_CTL2_PLL0_CAL_DONE (1 << 1) #define UPHY_PLL_P0_CTL2_PLL0_CAL_EN (1 << 0) #define XUSB_PADCTL_UPHY_PLL_P0_CTL4 0x36c #define UPHY_PLL_P0_CTL4_PLL0_TCLKOUT_EN (1 << 28) #define UPHY_PLL_P0_CTL4_PLL0_CLKDIST_CTRL(x) (((x) & 0xF) << 20) #define UPHY_PLL_P0_CTL4_PLL0_XDIGCLK_EN (1 << 19) #define UPHY_PLL_P0_CTL4_PLL0_XDIGCLK_SEL(x) (((x) & 0x7) << 16) #define UPHY_PLL_P0_CTL4_PLL0_TXCLKREF_EN (1 << 15) #define UPHY_PLL_P0_CTL4_PLL0_TXCLKREF_SEL(x) (((x) & 0x3) << 12) #define UPHY_PLL_P0_CTL4_PLL0_FBCLKBUF_EN (1 << 9) #define UPHY_PLL_P0_CTL4_PLL0_REFCLKBUF_EN (1 << 8) #define UPHY_PLL_P0_CTL4_PLL0_REFCLK_SEL(x) (((x) & 0xF) << 4) #define UPHY_PLL_P0_CTL4_PLL0_REFCLK_TERM100 (1 << 0) #define XUSB_PADCTL_UPHY_PLL_P0_CTL5 0x370 #define UPHY_PLL_P0_CTL5_PLL0_DCO_CTRL(x) (((x) & 0xFF) << 16) #define UPHY_PLL_P0_CTL5_PLL0_LPF_CTRL(x) (((x) & 0xFF) << 8) #define UPHY_PLL_P0_CTL5_PLL0_CP_CTRL(x) (((x) & 0x0F) << 4) #define UPHY_PLL_P0_CTL5_PLL0_PFD_CTRL(x) (((x) & 0x03) << 0) #define XUSB_PADCTL_UPHY_PLL_P0_CTL8 0x37c #define UPHY_PLL_P0_CTL8_PLL0_RCAL_DONE (1U << 31) #define UPHY_PLL_P0_CTL8_PLL0_RCAL_VAL(x) (((x) & 0x1F) << 24) #define UPHY_PLL_P0_CTL8_PLL0_RCAL_BYP_EN (1 << 23) #define UPHY_PLL_P0_CTL8_PLL0_RCAL_BYP_CODE(x) (((x) & 0x1F) << 16) #define UPHY_PLL_P0_CTL8_PLL0_RCAL_OVRD (1 << 15) #define UPHY_PLL_P0_CTL8_PLL0_RCAL_CLK_EN (1 << 13) #define UPHY_PLL_P0_CTL8_PLL0_RCAL_EN (1 << 12) #define UPHY_PLL_P0_CTL8_PLL0_BGAP_CTRL(x) (((x) & 0xFFF) << 0) #define XUSB_PADCTL_UPHY_MISC_PAD_P_CTL1(x) (0x460 + (x) * 0x40) #define XUSB_PADCTL_UPHY_PLL_S0_CTL1 0x860 #define UPHY_PLL_S0_CTL1_PLL0_FREQ_PSDIV(x) (((x) & 0x03) << 28) #define UPHY_PLL_S0_CTL1_PLL0_FREQ_NDIV(x) (((x) & 0xFF) << 20) #define UPHY_PLL_S0_CTL1_PLL0_FREQ_MDIV(x) (((x) & 0x03) << 16) #define UPHY_PLL_S0_CTL1_PLL0_LOCKDET_STATUS (1 << 15) #define UPHY_PLL_S0_CTL1_PLL0_MODE_GET(x) (((x) >> 8) & 0x03) #define UPHY_PLL_S0_CTL1_PLL0_BYPASS_EN (1 << 7) #define UPHY_PLL_S0_CTL1_PLL0_FREERUN_EN (1 << 6) #define UPHY_PLL_S0_CTL1_PLL0_PWR_OVRD (1 << 4) #define UPHY_PLL_S0_CTL1_PLL0_ENABLE (1 << 3) #define UPHY_PLL_S0_CTL1_PLL0_SLEEP(x) (((x) & 0x03) << 1) #define UPHY_PLL_S0_CTL1_PLL0_IDDQ (1 << 0) #define XUSB_PADCTL_UPHY_PLL_S0_CTL2 0x864 #define UPHY_PLL_S0_CTL2_PLL0_CAL_CTRL(x) (((x) & 0xFFFFFF) << 4) #define UPHY_PLL_S0_CTL2_PLL0_CAL_RESET (1 << 3) #define UPHY_PLL_S0_CTL2_PLL0_CAL_OVRD (1 << 2) #define UPHY_PLL_S0_CTL2_PLL0_CAL_DONE (1 << 1) #define UPHY_PLL_S0_CTL2_PLL0_CAL_EN (1 << 0) #define XUSB_PADCTL_UPHY_PLL_S0_CTL4 0x86c #define UPHY_PLL_S0_CTL4_PLL0_TCLKOUT_EN (1 << 28) #define UPHY_PLL_S0_CTL4_PLL0_CLKDIST_CTRL(x) (((x) & 0xF) << 20) #define UPHY_PLL_S0_CTL4_PLL0_XDIGCLK_EN (1 << 19) #define UPHY_PLL_S0_CTL4_PLL0_XDIGCLK_SEL(x) (((x) & 0x7) << 16) #define UPHY_PLL_S0_CTL4_PLL0_TXCLKREF_EN (1 << 15) #define UPHY_PLL_S0_CTL4_PLL0_TXCLKREF_SEL(x) (((x) & 0x3) << 12) #define UPHY_PLL_S0_CTL4_PLL0_FBCLKBUF_EN (1 << 9) #define UPHY_PLL_S0_CTL4_PLL0_REFCLKBUF_EN (1 << 8) #define UPHY_PLL_S0_CTL4_PLL0_REFCLK_SEL(x) (((x) & 0xF) << 4) #define UPHY_PLL_S0_CTL4_PLL0_REFCLK_TERM100 (1 << 0) #define XUSB_PADCTL_UPHY_PLL_S0_CTL5 0x870 #define UPHY_PLL_S0_CTL5_PLL0_DCO_CTRL(x) (((x) & 0xFF) << 16) #define UPHY_PLL_S0_CTL5_PLL0_LPF_CTRL(x) (((x) & 0xFF) << 8) #define UPHY_PLL_S0_CTL5_PLL0_CP_CTRL(x) (((x) & 0x0F) << 4) #define UPHY_PLL_S0_CTL5_PLL0_PFD_CTRL(x) (((x) & 0x03) << 0) #define XUSB_PADCTL_UPHY_PLL_S0_CTL8 0x87c #define UPHY_PLL_S0_CTL8_PLL0_RCAL_DONE (1U << 31) #define UPHY_PLL_S0_CTL8_PLL0_RCAL_VAL(x) (((x) & 0x1F) << 24) #define UPHY_PLL_S0_CTL8_PLL0_RCAL_BYP_EN (1 << 23) #define UPHY_PLL_S0_CTL8_PLL0_RCAL_BYP_CODE(x) (((x) & 0x1F) << 16) #define UPHY_PLL_S0_CTL8_PLL0_RCAL_OVRD (1 << 15) #define UPHY_PLL_S0_CTL8_PLL0_RCAL_CLK_EN (1 << 13) #define UPHY_PLL_S0_CTL8_PLL0_RCAL_EN (1 << 12) #define UPHY_PLL_S0_CTL8_PLL0_BGAP_CTRL(x) (((x) & 0xFFF) << 0) #define XUSB_PADCTL_UPHY_MISC_PAD_S0_CTL1 0x960 #define XUSB_PADCTL_UPHY_USB3_PAD_ECTL1(x) (0xa60 + (x) * 0x40) #define UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL(x) (((x) & 0x3) << 16) #define XUSB_PADCTL_UPHY_USB3_PAD_ECTL2(x) (0xa64 + (x) * 0x40) #define UPHY_USB3_PAD_ECTL2_RX_IQ_CTRL(x) (((x) & 0x000F) << 16) #define UPHY_USB3_PAD_ECTL2_RX_CTLE(x) (((x) & 0xFFFF) << 0) #define XUSB_PADCTL_UPHY_USB3_PAD_ECTL3(x) (0xa68 + (x) * 0x40) #define XUSB_PADCTL_UPHY_USB3_PAD_ECTL4(x) (0xa6c + (x) * 0x40) #define UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL(x) (((x) & 0xFFFF) << 16) #define UPHY_USB3_PAD_ECTL4_RX_PI_CTRL(x) (((x) & 0x00FF) << 0) #define XUSB_PADCTL_UPHY_USB3_PAD_ECTL6(x) (0xa74 + (x) * 0x40) #define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, (_r)) struct padctl_softc { device_t dev; struct resource *mem_res; hwreset_t rst; int phy_ena_cnt; int pcie_ena_cnt; int sata_ena_cnt; /* Fuses calibration data */ /* USB2 */ uint32_t hs_curr_level[4]; uint32_t hs_curr_level_offs; /* Not inited yet, always 0 */ uint32_t hs_term_range_adj; uint32_t rpd_ctrl; /* HSIC */ uint32_t rx_strobe_trim; /* Not inited yet, always 0 */ uint32_t rx_data0_trim; /* Not inited yet, always 0 */ uint32_t rx_data1_trim; /* Not inited yet, always 0 */ uint32_t tx_rtune_p; /* Not inited yet, always 0 */ uint32_t strobe_trim; /* Not inited yet, always 0 */ }; static struct ofw_compat_data compat_data[] = { {"nvidia,tegra210-xusb-padctl", 1}, {NULL, 0}, }; /* Ports. */ enum padctl_port_type { PADCTL_PORT_USB2, PADCTL_PORT_HSIC, PADCTL_PORT_USB3, }; struct padctl_lane; struct padctl_port { enum padctl_port_type type; const char *name; const char *base_name; int idx; int (*init)(struct padctl_softc *sc, struct padctl_port *port); /* Runtime data. */ phandle_t xref; bool enabled; bool internal; uint32_t companion; regulator_t supply_vbus; struct padctl_lane *lane; }; static int usb3_port_init(struct padctl_softc *sc, struct padctl_port *port); #define PORT(t, n, p, i) { \ .type = t, \ .name = n "-" #p, \ .base_name = n, \ .idx = p, \ .init = i, \ } static struct padctl_port ports_tbl[] = { PORT(PADCTL_PORT_USB2, "usb2", 0, NULL), PORT(PADCTL_PORT_USB2, "usb2", 1, NULL), PORT(PADCTL_PORT_USB2, "usb2", 2, NULL), PORT(PADCTL_PORT_USB2, "usb2", 3, NULL), PORT(PADCTL_PORT_HSIC, "hsic", 0, NULL), PORT(PADCTL_PORT_HSIC, "hsic", 1, NULL), PORT(PADCTL_PORT_USB3, "usb3", 0, usb3_port_init), PORT(PADCTL_PORT_USB3, "usb3", 1, usb3_port_init), }; /* Pads - a group of lannes. */ enum padctl_pad_type { PADCTL_PAD_USB2, PADCTL_PAD_HSIC, PADCTL_PAD_PCIE, PADCTL_PAD_SATA, }; struct padctl_lane; struct padctl_pad { const char *name; enum padctl_pad_type type; const char *clock_name; char *reset_name; /* XXX constify !!!!!! */ int (*enable)(struct padctl_softc *sc, struct padctl_lane *lane); int (*disable)(struct padctl_softc *sc, struct padctl_lane *lane); /* Runtime data. */ bool enabled; clk_t clk; hwreset_t reset; int nlanes; struct padctl_lane *lanes[8]; /* Safe maximum value. */ }; static int usb2_enable(struct padctl_softc *sc, struct padctl_lane *lane); static int usb2_disable(struct padctl_softc *sc, struct padctl_lane *lane); static int hsic_enable(struct padctl_softc *sc, struct padctl_lane *lane); static int hsic_disable(struct padctl_softc *sc, struct padctl_lane *lane); static int pcie_enable(struct padctl_softc *sc, struct padctl_lane *lane); static int pcie_disable(struct padctl_softc *sc, struct padctl_lane *lane); static int sata_enable(struct padctl_softc *sc, struct padctl_lane *lane); static int sata_disable(struct padctl_softc *sc, struct padctl_lane *lane); #define PAD(n, t, cn, rn, e, d) { \ .name = n, \ .type = t, \ .clock_name = cn, \ .reset_name = rn, \ .enable = e, \ .disable = d, \ } static struct padctl_pad pads_tbl[] = { PAD("usb2", PADCTL_PAD_USB2, "trk", NULL, usb2_enable, usb2_disable), PAD("hsic", PADCTL_PAD_HSIC, "trk", NULL, hsic_enable, hsic_disable), PAD("pcie", PADCTL_PAD_PCIE, "pll", "phy", pcie_enable, pcie_disable), PAD("sata", PADCTL_PAD_SATA, "pll", "phy", sata_enable, sata_disable), }; /* Lanes. */ static char *usb_mux[] = {"snps", "xusb", "uart", "rsvd"}; static char *hsic_mux[] = {"snps", "xusb"}; static char *pci_mux[] = {"pcie-x1", "usb3-ss", "sata", "pcie-x4"}; struct padctl_lane { const char *name; int idx; bus_size_t reg; uint32_t shift; uint32_t mask; char **mux; int nmux; /* Runtime data. */ bool enabled; phandle_t xref; struct padctl_pad *pad; struct padctl_port *port; int mux_idx; }; #define LANE(n, p, r, s, m, mx) { \ .name = n "-" #p, \ .idx = p, \ .reg = r, \ .shift = s, \ .mask = m, \ .mux = mx, \ .nmux = nitems(mx), \ } static struct padctl_lane lanes_tbl[] = { LANE("usb2", 0, XUSB_PADCTL_USB2_PAD_MUX, 0, 0x3, usb_mux), LANE("usb2", 1, XUSB_PADCTL_USB2_PAD_MUX, 2, 0x3, usb_mux), LANE("usb2", 2, XUSB_PADCTL_USB2_PAD_MUX, 4, 0x3, usb_mux), LANE("usb2", 3, XUSB_PADCTL_USB2_PAD_MUX, 6, 0x3, usb_mux), LANE("hsic", 0, XUSB_PADCTL_USB2_PAD_MUX, 14, 0x1, hsic_mux), LANE("hsic", 1, XUSB_PADCTL_USB2_PAD_MUX, 15, 0x1, hsic_mux), LANE("pcie", 0, XUSB_PADCTL_USB3_PAD_MUX, 12, 0x3, pci_mux), LANE("pcie", 1, XUSB_PADCTL_USB3_PAD_MUX, 14, 0x3, pci_mux), LANE("pcie", 2, XUSB_PADCTL_USB3_PAD_MUX, 16, 0x3, pci_mux), LANE("pcie", 3, XUSB_PADCTL_USB3_PAD_MUX, 18, 0x3, pci_mux), LANE("pcie", 4, XUSB_PADCTL_USB3_PAD_MUX, 20, 0x3, pci_mux), LANE("pcie", 5, XUSB_PADCTL_USB3_PAD_MUX, 22, 0x3, pci_mux), LANE("pcie", 6, XUSB_PADCTL_USB3_PAD_MUX, 24, 0x3, pci_mux), LANE("sata", 0, XUSB_PADCTL_USB3_PAD_MUX, 30, 0x3, pci_mux), }; /* Define all possible mappings for USB3 port lanes */ struct padctl_lane_map { int port_idx; enum padctl_pad_type pad_type; int lane_idx; }; #define LANE_MAP(pi, pt, li) { \ .port_idx = pi, \ .pad_type = pt, \ .lane_idx = li, \ } static struct padctl_lane_map lane_map_tbl[] = { LANE_MAP(0, PADCTL_PAD_PCIE, 6), /* port USB3-0 -> lane PCIE-0 */ LANE_MAP(1, PADCTL_PAD_PCIE, 5), /* port USB3-1 -> lane PCIE-1 */ LANE_MAP(2, PADCTL_PAD_PCIE, 0), /* port USB3-2 -> lane PCIE-0 */ LANE_MAP(2, PADCTL_PAD_PCIE, 2), /* port USB3-2 -> lane PCIE-2 */ LANE_MAP(3, PADCTL_PAD_PCIE, 4), /* port USB3-3 -> lane PCIE-4 */ }; /* Phy class and methods. */ static int xusbpadctl_phy_enable(struct phynode *phy, bool enable); static phynode_method_t xusbpadctl_phynode_methods[] = { PHYNODEMETHOD(phynode_enable, xusbpadctl_phy_enable), PHYNODEMETHOD_END }; DEFINE_CLASS_1(xusbpadctl_phynode, xusbpadctl_phynode_class, xusbpadctl_phynode_methods, 0, phynode_class); static struct padctl_port *search_lane_port(struct padctl_softc *sc, struct padctl_lane *lane); static void tegra210_xusb_pll_hw_control_enable(void) {} static void tegra210_xusb_pll_hw_sequence_start(void) {} static void tegra210_sata_pll_hw_control_enable(void) {} static void tegra210_sata_pll_hw_sequence_start(void) {} /* ------------------------------------------------------------------------- * * PEX functions */ static int uphy_pex_enable(struct padctl_softc *sc, struct padctl_pad *pad) { uint32_t reg; int rv, i; if (sc->pcie_ena_cnt > 0) { sc->pcie_ena_cnt++; return (0); } /* 22.8.4 UPHY PLLs, Step 4, page 1346 */ /* 1. Deassert PLL/Lane resets. */ rv = clk_enable(pad->clk); if (rv < 0) { device_printf(sc->dev, "Cannot enable clock for pad '%s': %d\n", pad->name, rv); return (rv); } rv = hwreset_deassert(pad->reset); if (rv < 0) { device_printf(sc->dev, "Cannot unreset pad '%s': %d\n", pad->name, rv); clk_disable(pad->clk); return (rv); } reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL2); reg &= ~UPHY_PLL_P0_CTL2_PLL0_CAL_CTRL(~0); reg |= UPHY_PLL_P0_CTL2_PLL0_CAL_CTRL(0x136); WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL2, reg); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL5); reg &= ~UPHY_PLL_P0_CTL5_PLL0_DCO_CTRL(~0); reg |= UPHY_PLL_P0_CTL5_PLL0_DCO_CTRL(0x2a); WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL5, reg); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL1); reg |= UPHY_PLL_P0_CTL1_PLL0_PWR_OVRD; WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL1, reg); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL2); reg |= UPHY_PLL_P0_CTL2_PLL0_CAL_OVRD; WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL2, reg); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL8); reg |= UPHY_PLL_P0_CTL8_PLL0_RCAL_OVRD; WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL8, reg); /* * 2. For the following registers, default values * take care of the desired frequency. */ reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL4); reg &= ~UPHY_PLL_P0_CTL4_PLL0_TXCLKREF_SEL(~0); reg &= ~UPHY_PLL_P0_CTL4_PLL0_REFCLK_SEL(~0); reg |= UPHY_PLL_P0_CTL4_PLL0_TXCLKREF_SEL(0x2); reg |= UPHY_PLL_P0_CTL4_PLL0_TXCLKREF_EN; WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL4, reg); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL1); reg &= ~UPHY_PLL_P0_CTL1_PLL0_FREQ_MDIV(~0); reg &= ~UPHY_PLL_P0_CTL1_PLL0_FREQ_NDIV(~0); reg |= UPHY_PLL_P0_CTL1_PLL0_FREQ_NDIV(0x19); WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL1, reg); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL1); reg &= ~UPHY_PLL_P0_CTL1_PLL0_IDDQ; WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL1, reg); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL1); reg &= ~UPHY_PLL_P0_CTL1_PLL0_SLEEP(~0); WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL1, reg); /* 3. Wait 100 ns. */ DELAY(10); /* XXX This in not in TRM */ reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL4); reg |= UPHY_PLL_P0_CTL4_PLL0_REFCLKBUF_EN; WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL4, reg); /* 4. Calibration. */ reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL2); reg |= UPHY_PLL_P0_CTL2_PLL0_CAL_EN; WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL2, reg); for (i = 30; i > 0; i--) { reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL2); if (reg & UPHY_PLL_P0_CTL2_PLL0_CAL_DONE) break; DELAY(10); } if (i <= 0) { device_printf(sc->dev, "Timedout in calibration step 1 " "for pad '%s' (0x%08X).\n", pad->name, reg); rv = ETIMEDOUT; goto err; } reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL2); reg &= ~UPHY_PLL_P0_CTL2_PLL0_CAL_EN; WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL2, reg); for (i = 10; i > 0; i--) { reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL2); if ((reg & UPHY_PLL_P0_CTL2_PLL0_CAL_DONE) == 0) break; DELAY(10); } if (i <= 0) { device_printf(sc->dev, "Timedout in calibration step 2 " "for pad '%s'.\n", pad->name); rv = ETIMEDOUT; goto err; } /* 5. Enable the PLL (20 μs Lock time) */ reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL1); reg |= UPHY_PLL_P0_CTL1_PLL0_ENABLE; WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL1, reg); for (i = 10; i > 0; i--) { reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL1); if (reg & UPHY_PLL_P0_CTL1_PLL0_LOCKDET_STATUS) break; DELAY(10); } if (i <= 0) { device_printf(sc->dev, "Timedout while enabling PLL " "for pad '%s'.\n", pad->name); rv = ETIMEDOUT; goto err; } /* 6. RCAL. */ reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL8); reg |= UPHY_PLL_P0_CTL8_PLL0_RCAL_EN; reg |= UPHY_PLL_P0_CTL8_PLL0_RCAL_CLK_EN; WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL8, reg); for (i = 10; i > 0; i--) { reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL8); if (reg & UPHY_PLL_P0_CTL8_PLL0_RCAL_DONE) break; DELAY(10); } if (i <= 0) { device_printf(sc->dev, "Timedout in RX calibration step 1 " "for pad '%s'.\n", pad->name); rv = ETIMEDOUT; goto err; } reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL8); reg &= ~UPHY_PLL_P0_CTL8_PLL0_RCAL_EN; WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL8, reg); for (i = 10; i > 0; i--) { reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL8); if (!(reg & UPHY_PLL_P0_CTL8_PLL0_RCAL_DONE)) break; DELAY(10); } if (i <= 0) { device_printf(sc->dev, "Timedout in RX calibration step 2 " "for pad '%s'.\n", pad->name); rv = ETIMEDOUT; goto err; } reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL8); reg &= ~UPHY_PLL_P0_CTL8_PLL0_RCAL_CLK_EN; WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL8, reg); /* Enable Hardware Power Sequencer. */ tegra210_xusb_pll_hw_control_enable(); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL1); reg &= ~UPHY_PLL_P0_CTL1_PLL0_PWR_OVRD; WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL1, reg); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL2); reg &= ~UPHY_PLL_P0_CTL2_PLL0_CAL_OVRD; WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL2, reg); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL8); reg &= ~UPHY_PLL_P0_CTL8_PLL0_RCAL_OVRD; WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL8, reg); DELAY(50); tegra210_xusb_pll_hw_sequence_start(); sc->pcie_ena_cnt++; return (0); err: hwreset_deassert(pad->reset); clk_disable(pad->clk); return (rv); } static void uphy_pex_disable(struct padctl_softc *sc, struct padctl_pad *pad) { int rv; sc->pcie_ena_cnt--; if (sc->pcie_ena_cnt <= 0) { rv = hwreset_assert(pad->reset); if (rv != 0) { device_printf(sc->dev, "Cannot reset pad '%s': %d\n", pad->name, rv); } rv = clk_disable(pad->clk); if (rv != 0) { device_printf(sc->dev, "Cannot dicable clock for pad '%s': %d\n", pad->name, rv); } } } static int uphy_sata_enable(struct padctl_softc *sc, struct padctl_pad *pad, bool usb) { uint32_t reg; int rv, i; /* 22.8.4 UPHY PLLs, Step 4, page 1346 */ /* 1. Deassert PLL/Lane resets. */ if (sc->sata_ena_cnt > 0) { sc->sata_ena_cnt++; return (0); } rv = clk_enable(pad->clk); if (rv < 0) { device_printf(sc->dev, "Cannot enable clock for pad '%s': %d\n", pad->name, rv); return (rv); } rv = hwreset_deassert(pad->reset); if (rv < 0) { device_printf(sc->dev, "Cannot unreset pad '%s': %d\n", pad->name, rv); clk_disable(pad->clk); return (rv); } reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL2); reg &= ~UPHY_PLL_P0_CTL2_PLL0_CAL_CTRL(~0); reg |= UPHY_PLL_P0_CTL2_PLL0_CAL_CTRL(0x136); WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL2, reg); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL5); reg &= ~UPHY_PLL_P0_CTL5_PLL0_DCO_CTRL(~0); reg |= UPHY_PLL_P0_CTL5_PLL0_DCO_CTRL(0x2a); WR4(sc, XUSB_PADCTL_UPHY_PLL_P0_CTL5, reg); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL1); reg |= UPHY_PLL_S0_CTL1_PLL0_PWR_OVRD; WR4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL1, reg); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL2); reg |= UPHY_PLL_S0_CTL2_PLL0_CAL_OVRD; WR4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL2, reg); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL8); reg |= UPHY_PLL_S0_CTL8_PLL0_RCAL_OVRD; WR4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL8, reg); /* * 2. For the following registers, default values * take care of the desired frequency. */ reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL4); reg &= ~UPHY_PLL_S0_CTL4_PLL0_TXCLKREF_SEL(~0); reg &= ~UPHY_PLL_S0_CTL4_PLL0_REFCLK_SEL(~0); reg |= UPHY_PLL_S0_CTL4_PLL0_TXCLKREF_EN; if (usb) reg |= UPHY_PLL_S0_CTL4_PLL0_TXCLKREF_SEL(0x2); else reg |= UPHY_PLL_S0_CTL4_PLL0_TXCLKREF_SEL(0x0); /* XXX PLL0_XDIGCLK_EN */ /* value &= ~(1 << 19); WR4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL4, reg); */ reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL1); reg &= ~UPHY_PLL_S0_CTL1_PLL0_FREQ_MDIV(~0); reg &= ~UPHY_PLL_S0_CTL1_PLL0_FREQ_NDIV(~0); if (usb) reg |= UPHY_PLL_S0_CTL1_PLL0_FREQ_NDIV(0x19); else reg |= UPHY_PLL_S0_CTL1_PLL0_FREQ_NDIV(0x1e); WR4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL1, reg); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL1); reg &= ~UPHY_PLL_S0_CTL1_PLL0_IDDQ; WR4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL1, reg); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL1); reg &= ~UPHY_PLL_S0_CTL1_PLL0_SLEEP(~0); WR4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL1, reg); /* 3. Wait 100 ns. */ DELAY(1); /* XXX This in not in TRM */ reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL4); reg |= UPHY_PLL_S0_CTL4_PLL0_REFCLKBUF_EN; WR4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL4, reg); /* 4. Calibration. */ reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL2); reg |= UPHY_PLL_S0_CTL2_PLL0_CAL_EN; WR4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL2, reg); for (i = 30; i > 0; i--) { reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL2); if (reg & UPHY_PLL_S0_CTL2_PLL0_CAL_DONE) break; DELAY(10); } if (i <= 0) { device_printf(sc->dev, "Timedout in calibration step 1 " "for pad '%s'.\n", pad->name); rv = ETIMEDOUT; goto err; } reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL2); reg &= ~UPHY_PLL_S0_CTL2_PLL0_CAL_EN; WR4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL2, reg); for (i = 10; i > 0; i--) { reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL2); if ((reg & UPHY_PLL_S0_CTL2_PLL0_CAL_DONE) == 0) break; DELAY(10); } if (i <= 0) { device_printf(sc->dev, "Timedout in calibration step 2 " "for pad '%s'.\n", pad->name); rv = ETIMEDOUT; goto err; } /* 5. Enable the PLL (20 μs Lock time) */ reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL1); reg |= UPHY_PLL_S0_CTL1_PLL0_ENABLE; WR4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL1, reg); for (i = 10; i > 0; i--) { reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL1); if (reg & UPHY_PLL_S0_CTL1_PLL0_LOCKDET_STATUS) break; DELAY(10); } if (i <= 0) { device_printf(sc->dev, "Timedout while enabling PLL " "for pad '%s'.\n", pad->name); rv = ETIMEDOUT; goto err; } /* 6. RCAL. */ reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL8); reg |= UPHY_PLL_S0_CTL8_PLL0_RCAL_EN; reg |= UPHY_PLL_S0_CTL8_PLL0_RCAL_CLK_EN; WR4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL8, reg); for (i = 10; i > 0; i--) { reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL8); if (reg & UPHY_PLL_S0_CTL8_PLL0_RCAL_DONE) break; DELAY(10); } if (i <= 0) { device_printf(sc->dev, "Timedout in RX calibration step 1 " "for pad '%s'.\n", pad->name); rv = ETIMEDOUT; goto err; } reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL8); reg &= ~UPHY_PLL_S0_CTL8_PLL0_RCAL_EN; WR4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL8, reg); for (i = 10; i > 0; i--) { reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL8); if (!(reg & UPHY_PLL_S0_CTL8_PLL0_RCAL_DONE)) break; DELAY(10); } if (i <= 0) { device_printf(sc->dev, "Timedout in RX calibration step 2 " "for pad '%s'.\n", pad->name); rv = ETIMEDOUT; goto err; } reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL8); reg &= ~UPHY_PLL_S0_CTL8_PLL0_RCAL_CLK_EN; WR4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL8, reg); /* Enable Hardware Power Sequencer. */ tegra210_sata_pll_hw_control_enable(); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL1); reg &= ~UPHY_PLL_S0_CTL1_PLL0_PWR_OVRD; WR4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL1, reg); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL2); reg &= ~UPHY_PLL_S0_CTL2_PLL0_CAL_OVRD; WR4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL2, reg); reg = RD4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL8); reg &= ~UPHY_PLL_S0_CTL8_PLL0_RCAL_OVRD; WR4(sc, XUSB_PADCTL_UPHY_PLL_S0_CTL8, reg); DELAY(50); tegra210_sata_pll_hw_sequence_start(); sc->sata_ena_cnt++; return (0); err: hwreset_deassert(pad->reset); clk_disable(pad->clk); return (rv); } static void uphy_sata_disable(struct padctl_softc *sc, struct padctl_pad *pad) { int rv; sc->sata_ena_cnt--; if (sc->sata_ena_cnt <= 0) { rv = hwreset_assert(pad->reset); if (rv != 0) { device_printf(sc->dev, "Cannot reset pad '%s': %d\n", pad->name, rv); } rv = clk_disable(pad->clk); if (rv != 0) { device_printf(sc->dev, "Cannot dicable clock for pad '%s': %d\n", pad->name, rv); } } } static int usb3_port_init(struct padctl_softc *sc, struct padctl_port *port) { uint32_t reg; struct padctl_pad *pad; int rv; pad = port->lane->pad; reg = RD4(sc, XUSB_PADCTL_SS_PORT_MAP); if (port->internal) reg &= ~SS_PORT_MAP_PORT_INTERNAL(port->idx); else reg |= SS_PORT_MAP_PORT_INTERNAL(port->idx); reg &= ~SS_PORT_MAP_PORT_MAP(port->idx, ~0); reg |= SS_PORT_MAP_PORT_MAP(port->idx, port->companion); WR4(sc, XUSB_PADCTL_SS_PORT_MAP, reg); if (port->supply_vbus != NULL) { rv = regulator_enable(port->supply_vbus); if (rv != 0) { device_printf(sc->dev, "Cannot enable vbus regulator\n"); return (rv); } } reg = RD4(sc, XUSB_PADCTL_UPHY_USB3_PAD_ECTL1(port->idx)); reg &= ~UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL(~0); reg |= UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL(2); WR4(sc, XUSB_PADCTL_UPHY_USB3_PAD_ECTL1(port->idx), reg); reg = RD4(sc, XUSB_PADCTL_UPHY_USB3_PAD_ECTL2(port->idx)); reg &= ~UPHY_USB3_PAD_ECTL2_RX_CTLE(~0); reg |= UPHY_USB3_PAD_ECTL2_RX_CTLE(0x00fc); WR4(sc, XUSB_PADCTL_UPHY_USB3_PAD_ECTL2(port->idx), reg); WR4(sc, XUSB_PADCTL_UPHY_USB3_PAD_ECTL3(port->idx), 0xc0077f1f); reg = RD4(sc, XUSB_PADCTL_UPHY_USB3_PAD_ECTL4(port->idx)); reg &= ~UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL(~0); reg |= UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL(0x01c7); WR4(sc, XUSB_PADCTL_UPHY_USB3_PAD_ECTL4(port->idx), reg); WR4(sc, XUSB_PADCTL_UPHY_USB3_PAD_ECTL6(port->idx), 0xfcf01368); if (pad->type == PADCTL_PAD_SATA) rv = uphy_sata_enable(sc, pad, true); else rv = uphy_pex_enable(sc, pad); if (rv != 0) return (rv); reg = RD4(sc, XUSB_PADCTL_ELPG_PROGRAM1); reg &= ~ELPG_PROGRAM1_SSP_ELPG_VCORE_DOWN(port->idx); WR4(sc, XUSB_PADCTL_ELPG_PROGRAM1, reg); DELAY(100); reg = RD4(sc, XUSB_PADCTL_ELPG_PROGRAM1); reg &= ~ELPG_PROGRAM1_SSP_ELPG_CLAMP_EN_EARLY(port->idx); WR4(sc, XUSB_PADCTL_ELPG_PROGRAM1, reg); DELAY(100); reg = RD4(sc, XUSB_PADCTL_ELPG_PROGRAM1); reg &= ~ELPG_PROGRAM1_SSP_ELPG_CLAMP_EN(port->idx); WR4(sc, XUSB_PADCTL_ELPG_PROGRAM1, reg); DELAY(100); return (0); } static int pcie_enable(struct padctl_softc *sc, struct padctl_lane *lane) { uint32_t reg; int rv; rv = uphy_pex_enable(sc, lane->pad); if (rv != 0) return (rv); reg = RD4(sc, XUSB_PADCTL_USB3_PAD_MUX); reg |= USB3_PAD_MUX_PCIE_IDDQ_DISABLE(lane->idx); WR4(sc, XUSB_PADCTL_USB3_PAD_MUX, reg); return (0); } static int pcie_disable(struct padctl_softc *sc, struct padctl_lane *lane) { uint32_t reg; reg = RD4(sc, XUSB_PADCTL_USB3_PAD_MUX); reg &= ~USB3_PAD_MUX_PCIE_IDDQ_DISABLE(lane->idx); WR4(sc, XUSB_PADCTL_USB3_PAD_MUX, reg); uphy_pex_disable(sc, lane->pad); return (0); } static int sata_enable(struct padctl_softc *sc, struct padctl_lane *lane) { uint32_t reg; int rv; rv = uphy_sata_enable(sc, lane->pad, false); if (rv != 0) return (rv); reg = RD4(sc, XUSB_PADCTL_USB3_PAD_MUX); reg |= USB3_PAD_MUX_SATA_IDDQ_DISABLE(lane->idx); WR4(sc, XUSB_PADCTL_USB3_PAD_MUX, reg); return (0); } static int sata_disable(struct padctl_softc *sc, struct padctl_lane *lane) { uint32_t reg; reg = RD4(sc, XUSB_PADCTL_USB3_PAD_MUX); reg &= ~USB3_PAD_MUX_SATA_IDDQ_DISABLE(lane->idx); WR4(sc, XUSB_PADCTL_USB3_PAD_MUX, reg); uphy_sata_disable(sc, lane->pad); return (0); } static int hsic_enable(struct padctl_softc *sc, struct padctl_lane *lane) { uint32_t reg; struct padctl_pad *pad; struct padctl_port *port; int rv; port = search_lane_port(sc, lane); if (port == NULL) { device_printf(sc->dev, "Cannot find port for lane: %s\n", lane->name); } pad = lane->pad; if (port->supply_vbus != NULL) { rv = regulator_enable(port->supply_vbus); if (rv != 0) { device_printf(sc->dev, "Cannot enable vbus regulator\n"); return (rv); } } WR4(sc, XUSB_PADCTL_HSIC_STRB_TRIM_CONTROL, sc->strobe_trim); reg = RD4(sc, XUSB_PADCTL_HSIC_PAD_CTL1(lane->idx)); reg &= ~HSIC_PAD_CTL1_TX_RTUNEP(~0); reg |= HSIC_PAD_CTL1_TX_RTUNEP(sc->tx_rtune_p); WR4(sc, XUSB_PADCTL_HSIC_PAD_CTL1(lane->idx), reg); reg = RD4(sc, XUSB_PADCTL_HSIC_PAD_CTL2(lane->idx)); reg &= ~HSIC_PAD_CTL2_RX_STROBE_TRIM(~0); reg &= ~HSIC_PAD_CTL2_RX_DATA1_TRIM(~0); reg &= ~HSIC_PAD_CTL2_RX_DATA0_TRIM(~0); reg |= HSIC_PAD_CTL2_RX_STROBE_TRIM(sc->rx_strobe_trim); reg |= HSIC_PAD_CTL2_RX_DATA1_TRIM(sc->rx_data1_trim); reg |= HSIC_PAD_CTL2_RX_DATA0_TRIM(sc->rx_data0_trim); WR4(sc, XUSB_PADCTL_HSIC_PAD_CTL2(lane->idx), reg); reg = RD4(sc, XUSB_PADCTL_HSIC_PAD_CTL0(lane->idx)); reg &= ~HSIC_PAD_CTL0_RPU_DATA0; reg &= ~HSIC_PAD_CTL0_RPU_DATA1; reg &= ~HSIC_PAD_CTL0_RPU_STROBE; reg &= ~HSIC_PAD_CTL0_PD_RX_DATA0; reg &= ~HSIC_PAD_CTL0_PD_RX_DATA1; reg &= ~HSIC_PAD_CTL0_PD_RX_STROBE; reg &= ~HSIC_PAD_CTL0_PD_ZI_DATA0; reg &= ~HSIC_PAD_CTL0_PD_ZI_DATA1; reg &= ~HSIC_PAD_CTL0_PD_ZI_STROBE; reg &= ~HSIC_PAD_CTL0_PD_TX_DATA0; reg &= ~HSIC_PAD_CTL0_PD_TX_DATA1; reg &= ~HSIC_PAD_CTL0_PD_TX_STROBE; reg |= HSIC_PAD_CTL0_RPD_DATA0; reg |= HSIC_PAD_CTL0_RPD_DATA1; reg |= HSIC_PAD_CTL0_RPD_STROBE; WR4(sc, XUSB_PADCTL_HSIC_PAD_CTL0(lane->idx), reg); rv = clk_enable(pad->clk); if (rv < 0) { device_printf(sc->dev, "Cannot enable clock for pad '%s': %d\n", pad->name, rv); if (port->supply_vbus != NULL) regulator_disable(port->supply_vbus); return (rv); } reg = RD4(sc, XUSB_PADCTL_HSIC_PAD_TRK_CTL); reg &= ~HSIC_PAD_TRK_CTL_TRK_START_TIMER(~0); reg &= ~HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER(~0); reg |= HSIC_PAD_TRK_CTL_TRK_START_TIMER(0x1e); reg |= HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER(0x0a); WR4(sc, XUSB_PADCTL_HSIC_PAD_TRK_CTL, reg); DELAY(10); reg = RD4(sc, XUSB_PADCTL_HSIC_PAD_TRK_CTL); reg &= ~HSIC_PAD_TRK_CTL_PD_TRK; WR4(sc, XUSB_PADCTL_HSIC_PAD_TRK_CTL, reg); DELAY(50); clk_disable(pad->clk); return (0); } static int hsic_disable(struct padctl_softc *sc, struct padctl_lane *lane) { uint32_t reg; struct padctl_port *port; int rv; port = search_lane_port(sc, lane); if (port == NULL) { device_printf(sc->dev, "Cannot find port for lane: %s\n", lane->name); } reg = RD4(sc, XUSB_PADCTL_HSIC_PAD_CTL0(lane->idx)); reg |= HSIC_PAD_CTL0_PD_RX_DATA0; reg |= HSIC_PAD_CTL0_PD_RX_DATA1; reg |= HSIC_PAD_CTL0_PD_RX_STROBE; reg |= HSIC_PAD_CTL0_PD_ZI_DATA0; reg |= HSIC_PAD_CTL0_PD_ZI_DATA1; reg |= HSIC_PAD_CTL0_PD_ZI_STROBE; reg |= HSIC_PAD_CTL0_PD_TX_DATA0; reg |= HSIC_PAD_CTL0_PD_TX_DATA1; reg |= HSIC_PAD_CTL0_PD_TX_STROBE; WR4(sc, XUSB_PADCTL_HSIC_PAD_CTL1(lane->idx), reg); if (port->supply_vbus != NULL) { rv = regulator_disable(port->supply_vbus); if (rv != 0) { device_printf(sc->dev, "Cannot disable vbus regulator\n"); return (rv); } } return (0); } static int usb2_enable(struct padctl_softc *sc, struct padctl_lane *lane) { uint32_t reg; struct padctl_pad *pad; struct padctl_port *port; int rv; port = search_lane_port(sc, lane); if (port == NULL) { device_printf(sc->dev, "Cannot find port for lane: %s\n", lane->name); } pad = lane->pad; reg = RD4(sc, XUSB_PADCTL_USB2_BIAS_PAD_CTL0); reg &= ~USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL(~0); reg &= ~USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL(~0); reg |= USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL(0x7); WR4(sc, XUSB_PADCTL_USB2_BIAS_PAD_CTL0, reg); reg = RD4(sc, XUSB_PADCTL_USB2_PORT_CAP); reg &= ~USB2_PORT_CAP_PORT_CAP(lane->idx, ~0); reg |= USB2_PORT_CAP_PORT_CAP(lane->idx, USB2_PORT_CAP_PORT_CAP_HOST); WR4(sc, XUSB_PADCTL_USB2_PORT_CAP, reg); reg = RD4(sc, XUSB_PADCTL_USB2_OTG_PAD_CTL0(lane->idx)); reg &= ~USB2_OTG_PAD_CTL0_HS_CURR_LEVEL(~0); reg &= ~USB2_OTG_PAD_CTL0_HS_SLEW(~0); reg &= ~USB2_OTG_PAD_CTL0_PD; reg &= ~USB2_OTG_PAD_CTL0_PD2; reg &= ~USB2_OTG_PAD_CTL0_PD_ZI; reg |= USB2_OTG_PAD_CTL0_HS_SLEW(14); reg |= USB2_OTG_PAD_CTL0_HS_CURR_LEVEL(sc->hs_curr_level[lane->idx] + sc->hs_curr_level_offs); WR4(sc, XUSB_PADCTL_USB2_OTG_PAD_CTL0(lane->idx), reg); reg = RD4(sc, XUSB_PADCTL_USB2_OTG_PAD_CTL1(lane->idx)); reg &= ~USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ(~0); reg &= ~USB2_OTG_PAD_CTL1_RPD_CTRL(~0); reg &= ~USB2_OTG_PAD_CTL1_PD_DR; reg &= ~USB2_OTG_PAD_CTL1_PD_CHRP_OVRD; reg &= ~USB2_OTG_PAD_CTL1_PD_DISC_OVRD; reg |= USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ(sc->hs_term_range_adj); reg |= USB2_OTG_PAD_CTL1_RPD_CTRL(sc->rpd_ctrl); WR4(sc, XUSB_PADCTL_USB2_OTG_PAD_CTL1(lane->idx), reg); reg = RD4(sc, XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1(lane->idx)); reg &= ~USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV(~0); reg |= USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18; WR4(sc, XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1(lane->idx), reg); if (port->supply_vbus != NULL) { rv = regulator_enable(port->supply_vbus); if (rv != 0) { device_printf(sc->dev, "Cannot enable vbus regulator\n"); return (rv); } } rv = clk_enable(pad->clk); if (rv < 0) { device_printf(sc->dev, "Cannot enable clock for pad '%s': %d\n", pad->name, rv); if (port->supply_vbus != NULL) regulator_disable(port->supply_vbus); return (rv); } reg = RD4(sc, XUSB_PADCTL_USB2_BIAS_PAD_CTL1); reg &= ~USB2_BIAS_PAD_CTL1_TRK_START_TIMER(~0); reg &= ~USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER(~0); reg |= USB2_BIAS_PAD_CTL1_TRK_START_TIMER(0x1e); reg |= USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER(0x0a); WR4(sc, XUSB_PADCTL_USB2_BIAS_PAD_CTL1, reg); reg = RD4(sc, XUSB_PADCTL_USB2_BIAS_PAD_CTL0); reg &= ~USB2_BIAS_PAD_CTL0_PD; WR4(sc, XUSB_PADCTL_USB2_BIAS_PAD_CTL0, reg); return (0); } static int usb2_disable(struct padctl_softc *sc, struct padctl_lane *lane) { uint32_t reg; struct padctl_pad *pad; struct padctl_port *port; int rv; port = search_lane_port(sc, lane); if (port == NULL) { device_printf(sc->dev, "Cannot find port for lane: %s\n", lane->name); } pad = lane->pad; reg = RD4(sc, XUSB_PADCTL_USB2_BIAS_PAD_CTL0); reg |= USB2_BIAS_PAD_CTL0_PD; WR4(sc, XUSB_PADCTL_USB2_BIAS_PAD_CTL0, reg); if (port->supply_vbus != NULL) { rv = regulator_disable(port->supply_vbus); if (rv != 0) { device_printf(sc->dev, "Cannot disable vbus regulator\n"); return (rv); } } rv = clk_disable(pad->clk); if (rv < 0) { device_printf(sc->dev, "Cannot disable clock for pad '%s': %d\n", pad->name, rv); return (rv); } return (0); } static int pad_common_enable(struct padctl_softc *sc) { uint32_t reg; reg = RD4(sc, XUSB_PADCTL_ELPG_PROGRAM1); reg &= ~ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN; WR4(sc, XUSB_PADCTL_ELPG_PROGRAM1, reg); DELAY(100); reg = RD4(sc, XUSB_PADCTL_ELPG_PROGRAM1); reg &= ~ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN_EARLY; WR4(sc, XUSB_PADCTL_ELPG_PROGRAM1, reg); DELAY(100); reg = RD4(sc, XUSB_PADCTL_ELPG_PROGRAM1); reg &= ~ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN; WR4(sc, XUSB_PADCTL_ELPG_PROGRAM1, reg); DELAY(100); return (0); } static int pad_common_disable(struct padctl_softc *sc) { uint32_t reg; reg = RD4(sc, XUSB_PADCTL_ELPG_PROGRAM1); reg |= ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN; WR4(sc, XUSB_PADCTL_ELPG_PROGRAM1, reg); DELAY(100); reg = RD4(sc, XUSB_PADCTL_ELPG_PROGRAM1); reg |= ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN_EARLY; WR4(sc, XUSB_PADCTL_ELPG_PROGRAM1, reg); DELAY(100); reg = RD4(sc, XUSB_PADCTL_ELPG_PROGRAM1); reg |= ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN; WR4(sc, XUSB_PADCTL_ELPG_PROGRAM1, reg); DELAY(100); return (0); } static int xusbpadctl_phy_enable(struct phynode *phy, bool enable) { device_t dev; intptr_t id; struct padctl_softc *sc; struct padctl_lane *lane; struct padctl_pad *pad; int rv; dev = phynode_get_device(phy); id = phynode_get_id(phy); sc = device_get_softc(dev); if (id < 0 || id >= nitems(lanes_tbl)) { device_printf(dev, "Unknown phy: %d\n", (int)id); return (ENXIO); } lane = lanes_tbl + id; if (!lane->enabled) { device_printf(dev, "Lane is not enabled/configured: %s\n", lane->name); return (ENXIO); } pad = lane->pad; if (enable) { if (sc->phy_ena_cnt == 0) { rv = pad_common_enable(sc); if (rv != 0) return (rv); } sc->phy_ena_cnt++; } if (enable) rv = pad->enable(sc, lane); else rv = pad->disable(sc, lane); if (rv != 0) return (rv); if (!enable) { if (sc->phy_ena_cnt == 1) { rv = pad_common_disable(sc); if (rv != 0) return (rv); } sc->phy_ena_cnt--; } return (0); } /* ------------------------------------------------------------------------- * * FDT processing */ static struct padctl_port * search_port(struct padctl_softc *sc, char *port_name) { int i; for (i = 0; i < nitems(ports_tbl); i++) { if (strcmp(port_name, ports_tbl[i].name) == 0) return (&ports_tbl[i]); } return (NULL); } static struct padctl_port * search_lane_port(struct padctl_softc *sc, struct padctl_lane *lane) { int i; for (i = 0; i < nitems(ports_tbl); i++) { if (!ports_tbl[i].enabled) continue; if (ports_tbl[i].lane == lane) return (ports_tbl + i); } return (NULL); } static struct padctl_lane * search_lane(struct padctl_softc *sc, char *lane_name) { int i; for (i = 0; i < nitems(lanes_tbl); i++) { if (strcmp(lane_name, lanes_tbl[i].name) == 0) return (lanes_tbl + i); } return (NULL); } static struct padctl_lane * search_pad_lane(struct padctl_softc *sc, enum padctl_pad_type type, int idx) { int i; for (i = 0; i < nitems(lanes_tbl); i++) { if (!lanes_tbl[i].enabled) continue; if (type == lanes_tbl[i].pad->type && idx == lanes_tbl[i].idx) return (lanes_tbl + i); } return (NULL); } static struct padctl_lane * search_usb3_pad_lane(struct padctl_softc *sc, int idx) { int i; struct padctl_lane *lane, *tmp; lane = NULL; for (i = 0; i < nitems(lane_map_tbl); i++) { if (idx != lane_map_tbl[i].port_idx) continue; tmp = search_pad_lane(sc, lane_map_tbl[i].pad_type, lane_map_tbl[i].lane_idx); if (tmp == NULL) continue; if (strcmp(tmp->mux[tmp->mux_idx], "usb3-ss") != 0) continue; if (lane != NULL) { device_printf(sc->dev, "Duplicated mappings found for" " lanes: %s and %s\n", lane->name, tmp->name); return (NULL); } lane = tmp; } return (lane); } static struct padctl_pad * search_pad(struct padctl_softc *sc, char *pad_name) { int i; for (i = 0; i < nitems(pads_tbl); i++) { if (strcmp(pad_name, pads_tbl[i].name) == 0) return (pads_tbl + i); } return (NULL); } static int search_mux(struct padctl_softc *sc, struct padctl_lane *lane, char *fnc_name) { int i; for (i = 0; i < lane->nmux; i++) { if (strcmp(fnc_name, lane->mux[i]) == 0) return (i); } return (-1); } static int config_lane(struct padctl_softc *sc, struct padctl_lane *lane) { uint32_t reg; reg = RD4(sc, lane->reg); reg &= ~(lane->mask << lane->shift); reg |= (lane->mux_idx & lane->mask) << lane->shift; WR4(sc, lane->reg, reg); return (0); } static int process_lane(struct padctl_softc *sc, phandle_t node, struct padctl_pad *pad) { struct padctl_lane *lane; struct phynode *phynode; struct phynode_init_def phy_init; char *name; char *function; int rv; name = NULL; function = NULL; rv = OF_getprop_alloc(node, "name", (void **)&name); if (rv <= 0) { device_printf(sc->dev, "Cannot read lane name.\n"); return (ENXIO); } lane = search_lane(sc, name); if (lane == NULL) { device_printf(sc->dev, "Unknown lane: %s\n", name); rv = ENXIO; goto end; } /* Read function (mux) settings. */ rv = OF_getprop_alloc(node, "nvidia,function", (void **)&function); if (rv <= 0) { device_printf(sc->dev, "Cannot read lane function.\n"); rv = ENXIO; goto end; } lane->mux_idx = search_mux(sc, lane, function); if (lane->mux_idx == ~0) { device_printf(sc->dev, "Unknown function %s for lane %s\n", function, name); rv = ENXIO; goto end; } rv = config_lane(sc, lane); if (rv != 0) { device_printf(sc->dev, "Cannot configure lane: %s: %d\n", name, rv); rv = ENXIO; goto end; } lane->xref = OF_xref_from_node(node); lane->pad = pad; lane->enabled = true; pad->lanes[pad->nlanes++] = lane; /* Create and register phy. */ bzero(&phy_init, sizeof(phy_init)); phy_init.id = lane - lanes_tbl; phy_init.ofw_node = node; phynode = phynode_create(sc->dev, &xusbpadctl_phynode_class, &phy_init); if (phynode == NULL) { device_printf(sc->dev, "Cannot create phy\n"); rv = ENXIO; goto end; } if (phynode_register(phynode) == NULL) { device_printf(sc->dev, "Cannot create phy\n"); return (ENXIO); } rv = 0; end: if (name != NULL) OF_prop_free(name); if (function != NULL) OF_prop_free(function); return (rv); } static int process_pad(struct padctl_softc *sc, phandle_t node) { phandle_t xref; struct padctl_pad *pad; char *name; int rv; name = NULL; rv = OF_getprop_alloc(node, "name", (void **)&name); if (rv <= 0) { device_printf(sc->dev, "Cannot read pad name.\n"); return (ENXIO); } pad = search_pad(sc, name); if (pad == NULL) { device_printf(sc->dev, "Unknown pad: %s\n", name); rv = ENXIO; goto end; } if (pad->clock_name != NULL) { rv = clk_get_by_ofw_name(sc->dev, node, pad->clock_name, &pad->clk); if (rv != 0) { device_printf(sc->dev, "Cannot get '%s' clock\n", pad->clock_name); return (ENXIO); } } if (pad->reset_name != NULL) { rv = hwreset_get_by_ofw_name(sc->dev, node, pad->reset_name, &pad->reset); if (rv != 0) { device_printf(sc->dev, "Cannot get '%s' reset\n", pad->reset_name); return (ENXIO); } } /* Read and process associated lanes. */ node = ofw_bus_find_child(node, "lanes"); if (node <= 0) { device_printf(sc->dev, "Cannot find 'lanes' subnode\n"); rv = ENXIO; goto end; } for (node = OF_child(node); node != 0; node = OF_peer(node)) { if (!ofw_bus_node_status_okay(node)) continue; rv = process_lane(sc, node, pad); if (rv != 0) goto end; xref = OF_xref_from_node(node); OF_device_register_xref(xref, sc->dev); } pad->enabled = true; rv = 0; end: if (name != NULL) OF_prop_free(name); return (rv); } static int process_port(struct padctl_softc *sc, phandle_t node) { struct padctl_port *port; char *name; int rv; name = NULL; rv = OF_getprop_alloc(node, "name", (void **)&name); if (rv <= 0) { device_printf(sc->dev, "Cannot read port name.\n"); return (ENXIO); } port = search_port(sc, name); if (port == NULL) { device_printf(sc->dev, "Unknown port: %s\n", name); rv = ENXIO; goto end; } regulator_get_by_ofw_property(sc->dev, node, "vbus-supply", &port->supply_vbus); if (OF_hasprop(node, "nvidia,internal")) port->internal = true; /* Find assigned lane */ if (port->lane == NULL) { switch(port->type) { /* Routing is fixed for USB2 AND HSIC. */ case PADCTL_PORT_USB2: port->lane = search_pad_lane(sc, PADCTL_PAD_USB2, port->idx); break; case PADCTL_PORT_HSIC: port->lane = search_pad_lane(sc, PADCTL_PAD_HSIC, port->idx); break; case PADCTL_PORT_USB3: port->lane = search_usb3_pad_lane(sc, port->idx); break; } } if (port->lane == NULL) { device_printf(sc->dev, "Cannot find lane for port: %s\n", name); rv = ENXIO; goto end; } if (port->type == PADCTL_PORT_USB3) { rv = OF_getencprop(node, "nvidia,usb2-companion", &(port->companion), sizeof(port->companion)); if (rv <= 0) { device_printf(sc->dev, "Missing 'nvidia,usb2-companion' property " "for port: %s\n", name); rv = ENXIO; goto end; } } port->enabled = true; rv = 0; end: if (name != NULL) OF_prop_free(name); return (rv); } static int parse_fdt(struct padctl_softc *sc, phandle_t base_node) { phandle_t node; int rv; rv = 0; node = ofw_bus_find_child(base_node, "pads"); if (node <= 0) { device_printf(sc->dev, "Cannot find pads subnode.\n"); return (ENXIO); } for (node = OF_child(node); node != 0; node = OF_peer(node)) { if (!ofw_bus_node_status_okay(node)) continue; rv = process_pad(sc, node); if (rv != 0) return (rv); } node = ofw_bus_find_child(base_node, "ports"); if (node <= 0) { device_printf(sc->dev, "Cannot find ports subnode.\n"); return (ENXIO); } for (node = OF_child(node); node != 0; node = OF_peer(node)) { if (!ofw_bus_node_status_okay(node)) continue; rv = process_port(sc, node); if (rv != 0) return (rv); } return (0); } static void load_calibration(struct padctl_softc *sc) { uint32_t reg; int i; reg = tegra_fuse_read_4(FUSE_SKU_CALIB_0); sc->hs_curr_level[0] = FUSE_SKU_CALIB_0_HS_CURR_LEVEL_0(reg); for (i = 1; i < nitems(sc->hs_curr_level); i++) { sc->hs_curr_level[i] = FUSE_SKU_CALIB_0_HS_CURR_LEVEL_123(reg, i); } sc->hs_term_range_adj = FUSE_SKU_CALIB_0_HS_TERM_RANGE_ADJ(reg); tegra_fuse_read_4(FUSE_USB_CALIB_EXT_0); sc->rpd_ctrl = FUSE_USB_CALIB_EXT_0_RPD_CTRL(reg); } /* ------------------------------------------------------------------------- * * BUS functions */ static int xusbpadctl_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Tegra XUSB phy"); return (BUS_PROBE_DEFAULT); } static int xusbpadctl_detach(device_t dev) { /* This device is always present. */ return (EBUSY); } static int xusbpadctl_attach(device_t dev) { struct padctl_softc * sc; int i, rid, rv; struct padctl_port *port; phandle_t node; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot allocate memory resources\n"); return (ENXIO); } rv = hwreset_get_by_ofw_name(dev, 0, "padctl", &sc->rst); if (rv != 0) { device_printf(dev, "Cannot get 'padctl' reset: %d\n", rv); return (rv); } rv = hwreset_deassert(sc->rst); if (rv != 0) { device_printf(dev, "Cannot unreset 'padctl' reset: %d\n", rv); return (rv); } load_calibration(sc); rv = parse_fdt(sc, node); if (rv != 0) { device_printf(dev, "Cannot parse fdt configuration: %d\n", rv); return (rv); } for (i = 0; i < nitems(ports_tbl); i++) { port = ports_tbl + i; if (!port->enabled) continue; if (port->init == NULL) continue; rv = port->init(sc, port); if (rv != 0) { device_printf(dev, "Cannot init port '%s'\n", port->name); return (rv); } } return (0); } static device_method_t tegra_xusbpadctl_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xusbpadctl_probe), DEVMETHOD(device_attach, xusbpadctl_attach), DEVMETHOD(device_detach, xusbpadctl_detach), DEVMETHOD_END }; static DEFINE_CLASS_0(xusbpadctl, tegra_xusbpadctl_driver, tegra_xusbpadctl_methods, sizeof(struct padctl_softc)); EARLY_DRIVER_MODULE(tegra_xusbpadctl, simplebus, tegra_xusbpadctl_driver, NULL, NULL, 73); diff --git a/sys/arm64/qoriq/clk/ls1028a_clkgen.c b/sys/arm64/qoriq/clk/ls1028a_clkgen.c index a003d366267b..9c4dafed4e59 100644 --- a/sys/arm64/qoriq/clk/ls1028a_clkgen.c +++ b/sys/arm64/qoriq/clk/ls1028a_clkgen.c @@ -1,298 +1,298 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Alstom Group. * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include static uint8_t ls1028a_pltfrm_pll_divs[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0 }; static struct qoriq_clk_pll_def ls1028a_pltfrm_pll = { .clkdef = { .name = "ls1028a_platform_pll", .id = QORIQ_CLK_ID(QORIQ_TYPE_PLATFORM_PLL, 0), .flags = 0 }, .offset = 0x60080, .shift = 1, .mask = 0xFE, .dividers = ls1028a_pltfrm_pll_divs, .flags = QORIQ_CLK_PLL_HAS_KILL_BIT }; static const uint8_t ls1028a_cga_pll_divs[] = { 2, 3, 4, 0 }; static struct qoriq_clk_pll_def ls1028a_cga_pll1 = { .clkdef = { .name = "ls1028a_cga_pll1", .id = QORIQ_CLK_ID(QORIQ_TYPE_INTERNAL, 0), .flags = 0 }, .offset = 0x80, .shift = 1, .mask = 0xFE, .dividers = ls1028a_cga_pll_divs, .flags = QORIQ_CLK_PLL_HAS_KILL_BIT }; static struct qoriq_clk_pll_def ls1028a_cga_pll2 = { .clkdef = { .name = "ls1028a_cga_pll2", .id = QORIQ_CLK_ID(QORIQ_TYPE_INTERNAL, 20), .flags = 0 }, .offset = 0xA0, .shift = 1, .mask = 0xFE, .dividers = ls1028a_cga_pll_divs, .flags = QORIQ_CLK_PLL_HAS_KILL_BIT }; static struct qoriq_clk_pll_def *ls1028a_cga_plls[] = { &ls1028a_cga_pll1, &ls1028a_cga_pll2 }; static const char *ls1028a_cmux0_parent_names[] = { "ls1028a_cga_pll1", "ls1028a_cga_pll1_div2", "ls1028a_cga_pll1_div4", NULL, "ls1028a_cga_pll2", "ls1028a_cga_pll2_div2", "ls1028a_cga_pll2_div4" }; static struct clk_mux_def ls1028a_cmux0 = { .clkdef = { .name = "ls1028a_cmux0", .id = QORIQ_CLK_ID(QORIQ_TYPE_CMUX, 0), .parent_names = ls1028a_cmux0_parent_names, .parent_cnt = nitems(ls1028a_cmux0_parent_names), .flags = 0 }, .offset = 0x70000, .shift = 27, .width = 4, .mux_flags = 0 }; static struct clk_mux_def ls1028a_cmux1 = { .clkdef = { .name = "ls1028a_cmux1", .id = QORIQ_CLK_ID(QORIQ_TYPE_CMUX, 1), .parent_names = ls1028a_cmux0_parent_names, .parent_cnt = nitems(ls1028a_cmux0_parent_names), .flags = 0 }, .offset = 0x70020, .shift = 27, .width = 4, .mux_flags = 0 }; static struct clk_mux_def ls1028a_cmux2 = { .clkdef = { .name = "ls1028a_cmux2", .id = QORIQ_CLK_ID(QORIQ_TYPE_CMUX, 2), .parent_names = ls1028a_cmux0_parent_names, .parent_cnt = nitems(ls1028a_cmux0_parent_names), .flags = 0 }, .offset = 0x70040, .shift = 27, .width = 4, .mux_flags = 0 }; static struct clk_mux_def ls1028a_cmux3 = { .clkdef = { .name = "ls1028a_cmux3", .id = QORIQ_CLK_ID(QORIQ_TYPE_CMUX, 3), .parent_names = ls1028a_cmux0_parent_names, .parent_cnt = nitems(ls1028a_cmux0_parent_names), .flags = 0 }, .offset = 0x70060, .shift = 27, .width = 4, .mux_flags = 0 }; static const char *ls1028a_hwaccel1_parent_names[] = { "ls1028a_platform_pll", "ls1028a_cga_pll1", "ls1028a_cga_pll1_div2", "ls1028a_cga_pll1_div3", "ls1028a_cga_pll1_div4", NULL, "ls1028a_cga_pll2_div2", "ls1028a_cga_pll2_div3" }; static const char *ls1028a_hwaccel2_parent_names[] = { "ls1028a_platform_pll", "ls1028a_cga_pll2", "ls1028a_cga_pll2_div2", "ls1028a_cga_pll2_div3", "ls1028a_cga_pll2_div4", NULL, "ls1028a_cga_pll1_div2", "ls1028a_cga_pll1_div3" }; static struct clk_mux_def ls1028a_hwaccel1 = { .clkdef = { .name = "ls1028a_hwaccel1", .id = QORIQ_CLK_ID(QORIQ_TYPE_HWACCEL, 0), .parent_names = ls1028a_hwaccel1_parent_names, .parent_cnt = nitems(ls1028a_hwaccel1_parent_names), .flags = 0 }, .offset = 0x10, .shift = 27, .width = 4, .mux_flags = 0 }; static struct clk_mux_def ls1028a_hwaccel2 = { .clkdef = { .name = "ls1028a_hwaccel2", .id = QORIQ_CLK_ID(QORIQ_TYPE_HWACCEL, 1), .parent_names = ls1028a_hwaccel2_parent_names, .parent_cnt = nitems(ls1028a_hwaccel2_parent_names), .flags = 0 }, .offset = 0x30, .shift = 27, .width = 4, .mux_flags = 0 }; static struct clk_mux_def ls1028a_hwaccel3 = { .clkdef = { .name = "ls1028a_hwaccel3", .id = QORIQ_CLK_ID(QORIQ_TYPE_HWACCEL, 2), .parent_names = ls1028a_hwaccel1_parent_names, .parent_cnt = nitems(ls1028a_hwaccel1_parent_names), .flags = 0 }, .offset = 0x50, .shift = 27, .width = 4, .mux_flags = 0 }; static struct clk_mux_def ls1028a_hwaccel4 = { .clkdef = { .name = "ls1028a_hwaccel4", .id = QORIQ_CLK_ID(QORIQ_TYPE_HWACCEL, 3), .parent_names = ls1028a_hwaccel2_parent_names, .parent_cnt = nitems(ls1028a_hwaccel2_parent_names), .flags = 0 }, .offset = 0x70, .shift = 27, .width = 4, .mux_flags = 0 }; static struct clk_mux_def *ls1028a_mux_nodes[] = { &ls1028a_cmux0, &ls1028a_cmux1, &ls1028a_cmux2, &ls1028a_cmux3, &ls1028a_hwaccel1, &ls1028a_hwaccel2, &ls1028a_hwaccel3, &ls1028a_hwaccel4 }; static int ls1028a_clkgen_probe(device_t); static int ls1028a_clkgen_attach(device_t); static device_method_t ls1028a_clkgen_methods[] = { DEVMETHOD(device_probe, ls1028a_clkgen_probe), DEVMETHOD(device_attach, ls1028a_clkgen_attach), DEVMETHOD_END }; DEFINE_CLASS_1(ls1028a_clkgen, ls1028a_clkgen_driver, ls1028a_clkgen_methods, sizeof(struct qoriq_clkgen_softc), qoriq_clkgen_driver); EARLY_DRIVER_MODULE(ls1028a_clkgen, simplebus, ls1028a_clkgen_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); static int ls1028a_clkgen_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if(!ofw_bus_is_compatible(dev, "fsl,ls1028a-clockgen")) return (ENXIO); device_set_desc(dev, "LS1028A clockgen"); return (BUS_PROBE_DEFAULT); } static int ls1028a_clkgen_attach(device_t dev) { struct qoriq_clkgen_softc *sc; sc = device_get_softc(dev); sc->pltfrm_pll_def = &ls1028a_pltfrm_pll; sc->cga_pll = ls1028a_cga_plls; sc->cga_pll_num = nitems(ls1028a_cga_plls); sc->mux = ls1028a_mux_nodes; sc->mux_num = nitems(ls1028a_mux_nodes); sc->flags = QORIQ_LITTLE_ENDIAN; return (qoriq_clkgen_attach(dev)); } diff --git a/sys/arm64/qoriq/clk/ls1028a_flexspi_clk.c b/sys/arm64/qoriq/clk/ls1028a_flexspi_clk.c index 00cf738d1b9e..e8635b35fe5f 100644 --- a/sys/arm64/qoriq/clk/ls1028a_flexspi_clk.c +++ b/sys/arm64/qoriq/clk/ls1028a_flexspi_clk.c @@ -1,307 +1,307 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Alstom Group. * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "clkdev_if.h" #include "syscon_if.h" struct ls1028a_flexspi_clk_softc { device_t dev; struct clkdom *clkdom; uint64_t reg_offset; struct syscon *syscon; struct clk_div_def clk_def; struct mtx mtx; }; static struct clk_div_table ls1028a_flexspi_div_tbl[] = { { .value = 0, .divider = 1, }, { .value = 1, .divider = 2, }, { .value = 2, .divider = 3, }, { .value = 3, .divider = 4, }, { .value = 4, .divider = 5, }, { .value = 5, .divider = 6, }, { .value = 6, .divider = 7, }, { .value = 7, .divider = 8, }, { .value = 11, .divider = 12, }, { .value = 15, .divider = 16, }, { .value = 16, .divider = 20, }, { .value = 17, .divider = 24, }, { .value = 18, .divider = 28, }, { .value = 19, .divider = 32, }, { .value = 20, .divider = 80, }, {} }; static struct clk_div_table lx2160a_flexspi_div_tbl[] = { { .value = 1, .divider = 2, }, { .value = 3, .divider = 4, }, { .value = 5, .divider = 6, }, { .value = 7, .divider = 8, }, { .value = 11, .divider = 12, }, { .value = 15, .divider = 16, }, { .value = 16, .divider = 20, }, { .value = 17, .divider = 24, }, { .value = 18, .divider = 28, }, { .value = 19, .divider = 32, }, { .value = 20, .divider = 80, }, {} }; static struct ofw_compat_data compat_data[] = { { "fsl,ls1028a-flexspi-clk", (uintptr_t)ls1028a_flexspi_div_tbl }, { "fsl,lx2160a-flexspi-clk", (uintptr_t)lx2160a_flexspi_div_tbl }, { NULL, 0 } }; static int ls1028a_flexspi_clk_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data != 0) { device_set_desc(dev, "NXP FlexSPI clock driver"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int ls1028a_flexspi_clk_attach(device_t dev) { struct ls1028a_flexspi_clk_softc *sc; const char *oclkname = NULL; const char *pclkname[1]; uint32_t acells; uint32_t scells; pcell_t cells[4]; phandle_t node; uint64_t reg_size; int ret; clk_t clk; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); /* Parse address-cells and size-cells from the parent node as a fallback */ if (OF_getencprop(node, "#address-cells", &acells, sizeof(acells)) == -1) { if (OF_getencprop(OF_parent(node), "#address-cells", &acells, sizeof(acells)) == -1) { acells = 2; } } if (OF_getencprop(node, "#size-cells", &scells, sizeof(scells)) == -1) { if (OF_getencprop(OF_parent(node), "#size-cells", &scells, sizeof(scells)) == -1) { scells = 1; } } ret = OF_getencprop(node, "reg", cells, (acells + scells) * sizeof(pcell_t)); if (ret < 0) { device_printf(dev, "ERROR: failed to read REG property\n"); return (ENOMEM); } sc->reg_offset = (uint64_t)cells[0]; if (acells == 2) sc->reg_offset = (sc->reg_offset << 32) | (uint64_t)cells[1]; reg_size = (uint64_t)cells[acells]; if (scells == 2) reg_size = (reg_size << 32) | (uint64_t)cells[acells + 1]; if (reg_size != 4) { device_printf(dev, "ERROR, expected only single register\n"); return (EINVAL); } if (sc->reg_offset >> 32UL) { device_printf(dev, "ERROR, only 32-bit address offset is supported\n"); return (EINVAL); } /* Get syscon handle */ ret = SYSCON_GET_HANDLE(dev, &sc->syscon); if ((ret != 0) || (sc->syscon == NULL)) { device_printf(dev, "ERROR: failed to get syscon\n"); return (EFAULT); } /* Initialize access mutex */ mtx_init(&sc->mtx, "FSL clock mtx", NULL, MTX_DEF); /* Get clock names */ ret = clk_get_by_ofw_index(dev, node, 0, &clk); if (ret) { device_printf(dev, "ERROR: failed to get parent clock\n"); return (EINVAL); } pclkname[0] = clk_get_name(clk); ret = clk_parse_ofw_clk_name(dev, node, &oclkname); if (ret) { device_printf(dev, "ERROR: failed to get output clock name\n"); return (EINVAL); } #ifdef DEBUG device_printf(dev, "INFO: pclkname %s, oclkname %s\n", pclkname[0], oclkname); #endif /* Fixup CLK structure */ sc->clk_def.clkdef.name = oclkname; sc->clk_def.clkdef.parent_names = (const char **)pclkname; sc->clk_def.offset = (uint32_t)sc->reg_offset; sc->clk_def.clkdef.id = 1; sc->clk_def.clkdef.parent_cnt = 1; sc->clk_def.clkdef.flags = 0; sc->clk_def.div_flags = CLK_DIV_WITH_TABLE; sc->clk_def.i_shift = 0; sc->clk_def.i_width = 5; sc->clk_def.div_table = (struct clk_div_table*)ofw_bus_search_compatible(dev, compat_data)->ocd_data; /* Create clock */ sc->clkdom = clkdom_create(dev); if (sc->clkdom == NULL) panic("clkdom == NULL"); ret = clknode_div_register(sc->clkdom, &sc->clk_def); if (ret) { device_printf(dev, "ERROR: unable to register clock\n"); return (EINVAL); } clkdom_finit(sc->clkdom); if (bootverbose) clkdom_dump(sc->clkdom); return (0); } static int ls1028a_flexspi_clk_detach(device_t dev) { /* Clock detaching is not supported */ return (EACCES); } static int ls1028a_flexspi_clk_read_4(device_t dev, bus_addr_t addr, uint32_t *val) { struct ls1028a_flexspi_clk_softc *sc; sc = device_get_softc(dev); *val = SYSCON_READ_4(sc->syscon, addr); return (0); } static int ls1028a_flexspi_clk_write_4(device_t dev, bus_addr_t addr, uint32_t val) { struct ls1028a_flexspi_clk_softc *sc; int ret; sc = device_get_softc(dev); ret = SYSCON_WRITE_4(sc->syscon, addr, val); return (ret); } static int ls1028a_flexspi_clk_modify_4(device_t dev, bus_addr_t addr, uint32_t clr, uint32_t set) { struct ls1028a_flexspi_clk_softc *sc; int ret; sc = device_get_softc(dev); ret = SYSCON_MODIFY_4(sc->syscon, addr, clr, set); return (ret); } static void ls1028a_flexspi_clk_device_lock(device_t dev) { struct ls1028a_flexspi_clk_softc *sc; sc = device_get_softc(dev); mtx_lock(&sc->mtx); } static void ls1028a_flexspi_clk_device_unlock(device_t dev) { struct ls1028a_flexspi_clk_softc *sc; sc = device_get_softc(dev); mtx_unlock(&sc->mtx); } static device_method_t ls1028a_flexspi_clk_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ls1028a_flexspi_clk_probe), DEVMETHOD(device_attach, ls1028a_flexspi_clk_attach), DEVMETHOD(device_detach, ls1028a_flexspi_clk_detach), DEVMETHOD(clkdev_read_4, ls1028a_flexspi_clk_read_4), DEVMETHOD(clkdev_write_4, ls1028a_flexspi_clk_write_4), DEVMETHOD(clkdev_modify_4, ls1028a_flexspi_clk_modify_4), DEVMETHOD(clkdev_device_lock, ls1028a_flexspi_clk_device_lock), DEVMETHOD(clkdev_device_unlock, ls1028a_flexspi_clk_device_unlock), DEVMETHOD_END }; static DEFINE_CLASS_0(fspi_clk, ls1028a_flexspi_clk_driver, ls1028a_flexspi_clk_methods, sizeof(struct ls1028a_flexspi_clk_softc)); EARLY_DRIVER_MODULE(ls1028a_flexspi_clk, simple_mfd, ls1028a_flexspi_clk_driver, NULL, NULL, BUS_PASS_TIMER); diff --git a/sys/arm64/qoriq/clk/ls1046a_clkgen.c b/sys/arm64/qoriq/clk/ls1046a_clkgen.c index cc6f01537f81..96bafe415089 100644 --- a/sys/arm64/qoriq/clk/ls1046a_clkgen.c +++ b/sys/arm64/qoriq/clk/ls1046a_clkgen.c @@ -1,250 +1,250 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Alstom Group. * Copyright (c) 2020 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include static uint8_t ls1046a_pltfrm_pll_divs[] = { 2, 4, 0 }; static struct qoriq_clk_pll_def ls1046a_pltfrm_pll = { .clkdef = { .name = "ls1046a_platform_pll", .id = QORIQ_CLK_ID(QORIQ_TYPE_PLATFORM_PLL, 0), .flags = 0 }, .offset = 0xC00, .shift = 1, .mask = 0x7E, .dividers = ls1046a_pltfrm_pll_divs, .flags = 0 }; static const uint8_t ls1046a_cga1_pll_divs[] = { 2, 3, 4, 0 }; static struct qoriq_clk_pll_def ls1046a_cga1_pll = { .clkdef = { .name = "ls1046a_cga_pll1", .id = QORIQ_CLK_ID(QORIQ_TYPE_INTERNAL, 0), .flags = 0 }, .offset = 0x800, .shift = 1, .mask = 0x1FE, .dividers = ls1046a_cga1_pll_divs, .flags = QORIQ_CLK_PLL_HAS_KILL_BIT }; static struct qoriq_clk_pll_def ls1046a_cga2_pll = { .clkdef = { .name = "ls1046a_cga_pll2", .id = QORIQ_CLK_ID(QORIQ_TYPE_INTERNAL, 20), .flags = 0 }, .offset = 0x820, .shift = 1, .mask = 0x1FE, .dividers = ls1046a_cga1_pll_divs, .flags = QORIQ_CLK_PLL_HAS_KILL_BIT }; static struct qoriq_clk_pll_def *ls1046a_cga_plls[] = { &ls1046a_cga1_pll, &ls1046a_cga2_pll }; static const char *ls1046a_cmux0_parent_names[] = { "ls1046a_cga_pll1", NULL, "ls1046a_cga_pll1_div2", NULL, "ls1046a_cga_pll2", NULL, "ls1046a_cga_pll2_div2" }; static struct clk_mux_def ls1046a_cmux0 = { .clkdef = { .name = "ls1046a_cmux0", .id = QORIQ_CLK_ID(QORIQ_TYPE_CMUX, 0), .parent_names = ls1046a_cmux0_parent_names, .parent_cnt = nitems(ls1046a_cmux0_parent_names), .flags = 0 }, .offset = 0, .shift = 27, .width = 4, .mux_flags = 0 }; static const char *ls1046a_hwaccel1_parent_names[] = { NULL, NULL, "ls1046a_cga_pll1_div2", "ls1046a_cga_pll1_div3", "ls1046a_cga_pll1_div4", "ls1046a_platform_pll", "ls1046a_cga_pll2_div2", "ls1046a_cga_pll2_div3" }; static const char *ls1046a_hwaccel2_parent_names[] = { NULL, "ls1046a_cga_pll2", "ls1046a_cga_pll2_div2", "ls1046a_cga_pll2_div3", NULL, NULL, "ls1046a_cga_pll1_div2" }; static struct clk_mux_def ls1046a_hwaccel1 = { .clkdef = { .name = "ls1046a_hwaccel1", .id = QORIQ_CLK_ID(QORIQ_TYPE_HWACCEL, 0), .parent_names = ls1046a_hwaccel1_parent_names, .parent_cnt = nitems(ls1046a_hwaccel1_parent_names), .flags = 0 }, .offset = 0x10, .shift = 27, .width = 4, .mux_flags = 0 }; static struct clk_mux_def ls1046a_hwaccel2 = { .clkdef = { .name = "ls1046a_hwaccel2", .id = QORIQ_CLK_ID(QORIQ_TYPE_HWACCEL, 1), .parent_names = ls1046a_hwaccel2_parent_names, .parent_cnt = nitems(ls1046a_hwaccel2_parent_names), .flags = 0 }, .offset = 0x30, .shift = 27, .width = 4, .mux_flags = 0 }; static struct clk_mux_def *ls1046a_mux_nodes[] = { &ls1046a_cmux0, &ls1046a_hwaccel1, &ls1046a_hwaccel2 }; const char *ls1046a_fman_srcs[] = { "ls1046a_hwaccel1" }; static int ls1046a_clkgen_probe(device_t); static int ls1046a_clkgen_attach(device_t); static device_method_t ls1046a_clkgen_methods[] = { DEVMETHOD(device_probe, ls1046a_clkgen_probe), DEVMETHOD(device_attach, ls1046a_clkgen_attach), DEVMETHOD_END }; DEFINE_CLASS_1(ls1046a_clkgen, ls1046a_clkgen_driver, ls1046a_clkgen_methods, sizeof(struct qoriq_clkgen_softc), qoriq_clkgen_driver); EARLY_DRIVER_MODULE(ls1046a_clkgen, simplebus, ls1046a_clkgen_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); static int ls1046a_fman_init(device_t dev) { struct qoriq_clkgen_softc *sc; struct clk_fixed_def def; int error; sc = device_get_softc(dev); def.clkdef.name = "ls1046a_fman", def.clkdef.id = QORIQ_CLK_ID(QORIQ_TYPE_FMAN, 0), def.clkdef.parent_names = ls1046a_fman_srcs; def.clkdef.parent_cnt = nitems(ls1046a_fman_srcs); def.clkdef.flags = 0; def.freq = 0; def.mult = 1; def.div = 1; def.fixed_flags = 0; error = clknode_fixed_register(sc->clkdom, &def); return (error); } static int ls1046a_clkgen_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if(!ofw_bus_is_compatible(dev, "fsl,ls1046a-clockgen")) return (ENXIO); device_set_desc(dev, "LS1046A clockgen"); return (BUS_PROBE_DEFAULT); } static int ls1046a_clkgen_attach(device_t dev) { struct qoriq_clkgen_softc *sc; sc = device_get_softc(dev); sc->pltfrm_pll_def = &ls1046a_pltfrm_pll; sc->cga_pll = ls1046a_cga_plls; sc->cga_pll_num = nitems(ls1046a_cga_plls); sc->mux = ls1046a_mux_nodes; sc->mux_num = nitems(ls1046a_mux_nodes); sc->init_func = ls1046a_fman_init; sc->flags = 0; return (qoriq_clkgen_attach(dev)); } diff --git a/sys/arm64/qoriq/clk/ls1088a_clkgen.c b/sys/arm64/qoriq/clk/ls1088a_clkgen.c index 916bf9a25331..df5a343ec109 100644 --- a/sys/arm64/qoriq/clk/ls1088a_clkgen.c +++ b/sys/arm64/qoriq/clk/ls1088a_clkgen.c @@ -1,270 +1,270 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Alstom Group. * Copyright (c) 2021 Semihalf. * Copyright (c) 2022 Bjoern A. Zeeb * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Based on QorIQ LS1088A Reference Manual, Rev. 1, 11/2020. * [LS1088ARM.pdf] */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include static uint8_t ls1088a_pltfrm_pll_divs[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0 }; static struct qoriq_clk_pll_def ls1088a_pltfrm_pll = { .clkdef = { .name = "ls1088a_platform_pll", .id = QORIQ_CLK_ID(QORIQ_TYPE_PLATFORM_PLL, 0), .flags = 0 }, .offset = 0x60080, .shift = 1, .mask = 0xFE, .dividers = ls1088a_pltfrm_pll_divs, .flags = QORIQ_CLK_PLL_HAS_KILL_BIT }; static const uint8_t ls1088a_cga_pll_divs[] = { 2, 3, 4, 0 }; static struct qoriq_clk_pll_def ls1088a_cga_pll1 = { .clkdef = { .name = "ls1088a_cga_pll1", .id = QORIQ_CLK_ID(QORIQ_TYPE_INTERNAL, 0), .flags = 0 }, .offset = 0x80, .shift = 1, .mask = 0xFE, .dividers = ls1088a_cga_pll_divs, .flags = QORIQ_CLK_PLL_HAS_KILL_BIT }; static struct qoriq_clk_pll_def ls1088a_cga_pll2 = { .clkdef = { .name = "ls1088a_cga_pll2", .id = QORIQ_CLK_ID(QORIQ_TYPE_INTERNAL, 20), .flags = 0 }, .offset = 0xA0, .shift = 1, .mask = 0xFE, .dividers = ls1088a_cga_pll_divs, .flags = QORIQ_CLK_PLL_HAS_KILL_BIT }; static struct qoriq_clk_pll_def *ls1088a_cga_plls[] = { &ls1088a_cga_pll1, &ls1088a_cga_pll2 }; /* 4.7.2 Core Cluster a Clock Control/Status Register (CLKC1CSR - CLKC2CSR) */ static const char *ls1088a_cmux0_parent_names[] = { "ls1088a_cga_pll1", "ls1088a_cga_pll1_div2", "ls1088a_cga_pll1_div4", NULL, "ls1088a_cga_pll2", "ls1088a_cga_pll2_div2", "ls1088a_cga_pll2_div4" }; static struct clk_mux_def ls1088a_cmux0 = { .clkdef = { .name = "ls1088a_cmux0", .id = QORIQ_CLK_ID(QORIQ_TYPE_CMUX, 0), .parent_names = ls1088a_cmux0_parent_names, .parent_cnt = nitems(ls1088a_cmux0_parent_names), .flags = 0 }, .offset = 0x70000, .shift = 27, .width = 4, .mux_flags = 0 }; static struct clk_mux_def ls1088a_cmux1 = { .clkdef = { .name = "ls1088a_cmux1", .id = QORIQ_CLK_ID(QORIQ_TYPE_CMUX, 1), .parent_names = ls1088a_cmux0_parent_names, .parent_cnt = nitems(ls1088a_cmux0_parent_names), .flags = 0 }, .offset = 0x70020, .shift = 27, .width = 4, .mux_flags = 0 }; /* 4.4.2 HWAaCSR (HWA1CSR - HWA3CSR) */ static const char *ls1088a_hwaccel1_parent_names[] = { "ls1088a_platform_pll", "ls1088a_cga_pll1", "ls1088a_cga_pll1_div2", "ls1088a_cga_pll1_div3", "ls1088a_cga_pll1_div4", NULL, /* HWAMUX1 External Clock Source */ "ls1088a_cga_pll2_div2", "ls1088a_cga_pll2_div3" }; static const char *ls1088a_hwaccel2_parent_names[] = { "ls1088a_platform_pll", "ls1088a_cga_pll2", "ls1088a_cga_pll2_div2", "ls1088a_cga_pll2_div3", "ls1088a_cga_pll2_div4", NULL, /* HWAMUX2 External Clock Source */ "ls1088a_cga_pll1_div2", "ls1088a_cga_pll1_div3" }; static const char *ls1088a_hwaccel3_parent_names[] = { "ls1088a_platform_pll", NULL, NULL, NULL, NULL, NULL, /* HWAMUX3 External Clock Source */ "ls1088a_cga_pll2_div2", "ls1088a_cga_pll2_div3" }; static struct clk_mux_def ls1088a_hwaccel1 = { .clkdef = { .name = "ls1088a_hwaccel1", .id = QORIQ_CLK_ID(QORIQ_TYPE_HWACCEL, 0), .parent_names = ls1088a_hwaccel1_parent_names, .parent_cnt = nitems(ls1088a_hwaccel1_parent_names), .flags = 0 }, .offset = 0x10, .shift = 27, .width = 4, .mux_flags = 0 }; static struct clk_mux_def ls1088a_hwaccel2 = { .clkdef = { .name = "ls1088a_hwaccel2", .id = QORIQ_CLK_ID(QORIQ_TYPE_HWACCEL, 1), .parent_names = ls1088a_hwaccel2_parent_names, .parent_cnt = nitems(ls1088a_hwaccel2_parent_names), .flags = 0 }, .offset = 0x30, .shift = 27, .width = 4, .mux_flags = 0 }; static struct clk_mux_def ls1088a_hwaccel3 = { .clkdef = { .name = "ls1088a_hwaccel3", .id = QORIQ_CLK_ID(QORIQ_TYPE_HWACCEL, 2), .parent_names = ls1088a_hwaccel3_parent_names, .parent_cnt = nitems(ls1088a_hwaccel3_parent_names), .flags = 0 }, .offset = 0x50, .shift = 27, .width = 4, .mux_flags = 0 }; static struct clk_mux_def *ls1088a_mux_nodes[] = { &ls1088a_cmux0, &ls1088a_cmux1, &ls1088a_hwaccel1, &ls1088a_hwaccel2, &ls1088a_hwaccel3 }; static int ls1088a_clkgen_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if(!ofw_bus_is_compatible(dev, "fsl,ls1088a-clockgen")) return (ENXIO); device_set_desc(dev, "LS1088A clockgen"); return (BUS_PROBE_DEFAULT); } static int ls1088a_clkgen_attach(device_t dev) { struct qoriq_clkgen_softc *sc; sc = device_get_softc(dev); sc->pltfrm_pll_def = &ls1088a_pltfrm_pll; sc->cga_pll = ls1088a_cga_plls; sc->cga_pll_num = nitems(ls1088a_cga_plls); sc->mux = ls1088a_mux_nodes; sc->mux_num = nitems(ls1088a_mux_nodes); sc->flags = QORIQ_LITTLE_ENDIAN; return (qoriq_clkgen_attach(dev)); } static device_method_t ls1088a_clkgen_methods[] = { DEVMETHOD(device_probe, ls1088a_clkgen_probe), DEVMETHOD(device_attach, ls1088a_clkgen_attach), DEVMETHOD_END }; DEFINE_CLASS_1(ls1088a_clkgen, ls1088a_clkgen_driver, ls1088a_clkgen_methods, sizeof(struct qoriq_clkgen_softc), qoriq_clkgen_driver); EARLY_DRIVER_MODULE(ls1088a_clkgen, simplebus, ls1088a_clkgen_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/arm64/qoriq/clk/lx2160a_clkgen.c b/sys/arm64/qoriq/clk/lx2160a_clkgen.c index 3ce70470cb96..e4e99811991d 100644 --- a/sys/arm64/qoriq/clk/lx2160a_clkgen.c +++ b/sys/arm64/qoriq/clk/lx2160a_clkgen.c @@ -1,207 +1,207 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2020 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Clock driver for LX2160A SoC. */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include #define PLL(_id1, _id2, cname, o, d) \ { \ .clkdef.id = QORIQ_CLK_ID(_id1, _id2), \ .clkdef.name = cname, \ .clkdef.flags = 0, \ .offset = o, \ .shift = 1, \ .mask = 0xFE, \ .dividers = d, \ .flags = QORIQ_CLK_PLL_HAS_KILL_BIT, \ } static const uint8_t plt_divs[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0}; static const uint8_t cga_divs[] = {2, 4, 0}; static const uint8_t cgb_divs[] = {2, 3, 4, 0}; static struct qoriq_clk_pll_def pltfrm_pll = PLL(QORIQ_TYPE_PLATFORM_PLL, 0, "platform_pll", 0x60080, plt_divs); static struct qoriq_clk_pll_def cga_pll1 = PLL(QORIQ_TYPE_INTERNAL, 0, "cga_pll1", 0x80, cga_divs); static struct qoriq_clk_pll_def cga_pll2 = PLL(QORIQ_TYPE_INTERNAL, 0, "cga_pll2", 0xA0, cga_divs); static struct qoriq_clk_pll_def cgb_pll1 = PLL(QORIQ_TYPE_INTERNAL, 0, "cgb_pll1", 0x10080, cgb_divs); static struct qoriq_clk_pll_def cgb_pll2 = PLL(QORIQ_TYPE_INTERNAL, 0, "cgb_pll2", 0x100A0, cgb_divs); static struct qoriq_clk_pll_def *cg_plls[] = { &cga_pll1, &cga_pll2, &cgb_pll1, &cgb_pll2, }; #if 0 static struct qoriq_clk_pll_def *cg_plls[] = { &(struct qoriq_clk_pll_def) {PLL(QORIQ_TYPE_INTERNAL, 0, "cga_pll1", 0x80, cg_divs)}, &(struct qoriq_clk_pll_def) {PLL(QORIQ_TYPE_INTERNAL, 0, "cga_pll2", 0xA0, cg_divs)}, &(struct qoriq_clk_pll_def) {PLL(QORIQ_TYPE_INTERNAL, 0, "cgb_pll1", 0x10080, cg_divs)}, &(struct qoriq_clk_pll_def) {PLL(QORIQ_TYPE_INTERNAL, 0, "cgb_pll2", 0x100A0, cg_divs)}, }; #endif static const char *cmuxa_plist[] = { "cga_pll1", "cga_pll1_div2", "cga_pll1_div4", NULL, "cga_pll2", "cga_pll2_div2", "cga_pll2_div4", }; static const char *cmuxb_plist[] = { "cgb_pll1", "cgb_pll1_div2", "cgb_pll1_div4", NULL, "cgb_pll2", "cgb_pll2_div2", "cgb_pll2_div4", }; #define MUX(_id1, _id2, cname, plist, o) \ { \ .clkdef.id = QORIQ_CLK_ID(_id1, _id2), \ .clkdef.name = cname, \ .clkdef.parent_names = plist, \ .clkdef.parent_cnt = nitems(plist), \ .clkdef.flags = 0, \ .offset = o, \ .width = 4, \ .shift = 27, \ .mux_flags = 0, \ } static struct clk_mux_def cmux0 = MUX(QORIQ_TYPE_CMUX, 0, "cg-cmux0", cmuxa_plist, 0x70000); static struct clk_mux_def cmux1 = MUX(QORIQ_TYPE_CMUX, 1, "cg-cmux1", cmuxa_plist, 0x70020); static struct clk_mux_def cmux2 = MUX(QORIQ_TYPE_CMUX, 2, "cg-cmux2", cmuxa_plist, 0x70040); static struct clk_mux_def cmux3 = MUX(QORIQ_TYPE_CMUX, 3, "cg-cmux3", cmuxa_plist, 0x70060); static struct clk_mux_def cmux4 = MUX(QORIQ_TYPE_CMUX, 4, "cg-cmux4", cmuxb_plist, 0x70080); static struct clk_mux_def cmux5 = MUX(QORIQ_TYPE_CMUX, 5, "cg-cmux5", cmuxb_plist, 0x700A0); static struct clk_mux_def cmux6 = MUX(QORIQ_TYPE_CMUX, 6, "cg-cmux6", cmuxb_plist, 0x700C0); static struct clk_mux_def cmux7 = MUX(QORIQ_TYPE_CMUX, 7, "cg-cmux7", cmuxb_plist, 0x700E0); static struct clk_mux_def *mux_nodes[] = { &cmux0, &cmux1, &cmux2, &cmux3, &cmux4, &cmux5, &cmux6, &cmux7, }; static int lx2160a_clkgen_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if(!ofw_bus_is_compatible(dev, "fsl,lx2160a-clockgen")) return (ENXIO); device_set_desc(dev, "LX2160A clockgen"); return (BUS_PROBE_DEFAULT); } static int lx2160a_clkgen_attach(device_t dev) { struct qoriq_clkgen_softc *sc; int rv; sc = device_get_softc(dev); sc->pltfrm_pll_def = &pltfrm_pll; sc->cga_pll = cg_plls; sc->cga_pll_num = nitems(cg_plls); sc->mux = mux_nodes; sc->mux_num = nitems(mux_nodes); sc->flags = QORIQ_LITTLE_ENDIAN; rv = qoriq_clkgen_attach(dev); printf(" %s: offset: 0x%08X, val: 0x%08X\n", __func__, 0x00080, bus_read_4(sc->res, 0x00080)); printf(" %s: offset: 0x%08X, val: 0x%08X\n", __func__, 0x000A0, bus_read_4(sc->res, 0x000A0)); printf(" %s: offset: 0x%08X, val: 0x%08X\n", __func__, 0x10080, bus_read_4(sc->res, 0x10080)); printf(" %s: offset: 0x%08X, val: 0x%08X\n", __func__, 0x100A0, bus_read_4(sc->res, 0x100A0)); printf(" %s: offset: 0x%08X, val: 0x%08X\n", __func__, 0x60080, bus_read_4(sc->res, 0x60080)); printf(" %s: offset: 0x%08X, val: 0x%08X\n", __func__, 0x600A0, bus_read_4(sc->res, 0x600A0)); return (rv); } static device_method_t lx2160a_clkgen_methods[] = { DEVMETHOD(device_probe, lx2160a_clkgen_probe), DEVMETHOD(device_attach, lx2160a_clkgen_attach), DEVMETHOD_END }; DEFINE_CLASS_1(lx2160a_clkgen, lx2160a_clkgen_driver, lx2160a_clkgen_methods, sizeof(struct qoriq_clkgen_softc), qoriq_clkgen_driver); EARLY_DRIVER_MODULE(lx2160a_clkgen, simplebus, lx2160a_clkgen_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/arm64/qoriq/clk/qoriq_clk_pll.c b/sys/arm64/qoriq/clk/qoriq_clk_pll.c index 0ee05afbd0ca..0d63ae0f79ed 100644 --- a/sys/arm64/qoriq/clk/qoriq_clk_pll.c +++ b/sys/arm64/qoriq/clk/qoriq_clk_pll.c @@ -1,150 +1,150 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Alstom Group. * Copyright (c) 2020 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include -#include -#include +#include +#include #include #include "clkdev_if.h" struct qoriq_clk_pll_softc { bus_addr_t offset; uint32_t mask; uint32_t shift; uint32_t flags; }; #define WR4(_clk, offset, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), offset, val) #define RD4(_clk, offset, val) \ CLKDEV_READ_4(clknode_get_device(_clk), offset, val) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) #define QORIQ_PLL_KILL_BIT (1 << 31) static int qoriq_clk_pll_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static int qoriq_clk_pll_recalc_freq(struct clknode *clk, uint64_t *freq) { struct qoriq_clk_pll_softc *sc; uint32_t mul; sc = clknode_get_softc(clk); RD4(clk, sc->offset, &mul); if (sc->flags & QORIQ_CLK_PLL_HAS_KILL_BIT && mul & QORIQ_PLL_KILL_BIT) return (0); mul &= sc->mask; mul >>= sc->shift; *freq = *freq * mul; return (0); } static clknode_method_t qoriq_clk_pll_clknode_methods[] = { CLKNODEMETHOD(clknode_init, qoriq_clk_pll_init), CLKNODEMETHOD(clknode_recalc_freq, qoriq_clk_pll_recalc_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(qoriq_clk_pll_clknode, qoriq_clk_pll_clknode_class, qoriq_clk_pll_clknode_methods, sizeof(struct qoriq_clk_pll_softc), clknode_class); int qoriq_clk_pll_register(struct clkdom *clkdom, const struct qoriq_clk_pll_def *clkdef) { char namebuf[QORIQ_CLK_NAME_MAX_LEN]; struct qoriq_clk_pll_softc *sc; struct clk_fixed_def def; const char *parent_name; struct clknode *clk; int error; int i; clk = clknode_create(clkdom, &qoriq_clk_pll_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->mask = clkdef->mask; sc->shift = clkdef->shift; sc->flags = clkdef->flags; sc->offset = clkdef->offset; clknode_register(clkdom, clk); parent_name = clkdef->clkdef.name; def.clkdef.parent_names = &parent_name; def.clkdef.parent_cnt = 1; def.clkdef.name = namebuf; def.mult = 1; def.freq = 0; i = 0; while (clkdef->dividers[i] != 0) { def.div = clkdef->dividers[i]; def.clkdef.id = clkdef->clkdef.id + i; snprintf(namebuf, QORIQ_CLK_NAME_MAX_LEN, "%s_div%d", parent_name, clkdef->dividers[i]); error = clknode_fixed_register(clkdom, &def); if (error != 0) return (error); i++; } return (0); } diff --git a/sys/arm64/qoriq/clk/qoriq_clk_pll.h b/sys/arm64/qoriq/clk/qoriq_clk_pll.h index f6b5df647d9c..f9d4c0f6355d 100644 --- a/sys/arm64/qoriq/clk/qoriq_clk_pll.h +++ b/sys/arm64/qoriq/clk/qoriq_clk_pll.h @@ -1,50 +1,50 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Alstom Group. * Copyright (c) 2020 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #ifndef _QORIQ_CLK_PLL_H_ #define _QORIQ_CLK_PLL_H_ -#include +#include #define QORIQ_CLK_PLL_HAS_KILL_BIT 0x01 struct qoriq_clk_pll_def { struct clknode_init_def clkdef; bus_addr_t offset; uint32_t mask; uint8_t shift; const uint8_t *dividers; uint8_t flags; }; int qoriq_clk_pll_register(struct clkdom *clkdom, const struct qoriq_clk_pll_def *clkdef); #endif /* _QORIQ_CLK_PLL_H_ */ diff --git a/sys/arm64/qoriq/clk/qoriq_clkgen.c b/sys/arm64/qoriq/clk/qoriq_clkgen.c index 8a6d162926dd..9dca9b8c3ce8 100644 --- a/sys/arm64/qoriq/clk/qoriq_clkgen.c +++ b/sys/arm64/qoriq/clk/qoriq_clkgen.c @@ -1,316 +1,316 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Alstom Group. * Copyright (c) 2020 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include "clkdev_if.h" MALLOC_DEFINE(M_QORIQ_CLKGEN, "qoriq_clkgen", "qoriq_clkgen"); static struct resource_spec qoriq_clkgen_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; static const char *qoriq_pll_parents_coreclk[] = { QORIQ_CORECLK_NAME }; static const char *qoriq_pll_parents_sysclk[] = { QORIQ_SYSCLK_NAME }; static int qoriq_clkgen_ofw_mapper(struct clkdom *clkdom, uint32_t ncells, phandle_t *cells, struct clknode **clk) { if (ncells != 2) return (EINVAL); if (cells[0] > 5) return (EINVAL); if (cells[0] == QORIQ_TYPE_SYSCLK || cells[0] == QORIQ_TYPE_CORECLK) if (cells[1] != 0) return (EINVAL); *clk = clknode_find_by_id(clkdom, QORIQ_CLK_ID(cells[0], cells[1])); if (*clk == NULL) return (EINVAL); return (0); } static int qoriq_clkgen_write_4(device_t dev, bus_addr_t addr, uint32_t val) { struct qoriq_clkgen_softc *sc; sc = device_get_softc(dev); if (sc->flags & QORIQ_LITTLE_ENDIAN) bus_write_4(sc->res, addr, htole32(val)); else bus_write_4(sc->res, addr, htobe32(val)); return (0); } static int qoriq_clkgen_read_4(device_t dev, bus_addr_t addr, uint32_t *val) { struct qoriq_clkgen_softc *sc; sc = device_get_softc(dev); if (sc->flags & QORIQ_LITTLE_ENDIAN) *val = le32toh(bus_read_4(sc->res, addr)); else *val = be32toh(bus_read_4(sc->res, addr)); return (0); } static int qoriq_clkgen_modify_4(device_t dev, bus_addr_t addr, uint32_t clr, uint32_t set) { struct qoriq_clkgen_softc *sc; uint32_t reg; sc = device_get_softc(dev); if (sc->flags & QORIQ_LITTLE_ENDIAN) reg = le32toh(bus_read_4(sc->res, addr)); else reg = be32toh(bus_read_4(sc->res, addr)); reg &= ~clr; reg |= set; if (sc->flags & QORIQ_LITTLE_ENDIAN) bus_write_4(sc->res, addr, htole32(reg)); else bus_write_4(sc->res, addr, htobe32(reg)); return (0); } static void qoriq_clkgen_device_lock(device_t dev) { struct qoriq_clkgen_softc *sc; sc = device_get_softc(dev); mtx_lock(&sc->mtx); } static void qoriq_clkgen_device_unlock(device_t dev) { struct qoriq_clkgen_softc *sc; sc = device_get_softc(dev); mtx_unlock(&sc->mtx); } static device_method_t qoriq_clkgen_methods[] = { DEVMETHOD(clkdev_write_4, qoriq_clkgen_write_4), DEVMETHOD(clkdev_read_4, qoriq_clkgen_read_4), DEVMETHOD(clkdev_modify_4, qoriq_clkgen_modify_4), DEVMETHOD(clkdev_device_lock, qoriq_clkgen_device_lock), DEVMETHOD(clkdev_device_unlock, qoriq_clkgen_device_unlock), DEVMETHOD_END }; DEFINE_CLASS_0(qoriq_clkgen, qoriq_clkgen_driver, qoriq_clkgen_methods, sizeof(struct qoriq_clkgen_softc)); static int qoriq_clkgen_create_sysclk(device_t dev) { struct qoriq_clkgen_softc *sc; struct clk_fixed_def def; const char *clkname; phandle_t node; uint32_t freq; clk_t clock; int rv; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); sc->has_coreclk = false; memset(&def, 0, sizeof(def)); rv = OF_getencprop(node, "clock-frequency", &freq, sizeof(freq)); if (rv > 0) { def.clkdef.name = QORIQ_SYSCLK_NAME; def.clkdef.id = QORIQ_CLK_ID(QORIQ_TYPE_SYSCLK, 0); def.freq = freq; rv = clknode_fixed_register(sc->clkdom, &def); return (rv); } else { /* * As both sysclk and coreclk need to be accessible from * device tree, create internal 1:1 divider nodes. */ def.clkdef.parent_cnt = 1; def.freq = 0; def.mult = 1; def.div = 1; rv = clk_get_by_ofw_name(dev, node, "coreclk", &clock); if (rv == 0) { def.clkdef.name = QORIQ_CORECLK_NAME; clkname = clk_get_name(clock); def.clkdef.parent_names = &clkname; def.clkdef.id = QORIQ_CLK_ID(QORIQ_TYPE_CORECLK, 0); rv = clknode_fixed_register(sc->clkdom, &def); if (rv) return (rv); sc->has_coreclk = true; } rv = clk_get_by_ofw_name(dev, node, "sysclk", &clock); if (rv != 0) { rv = clk_get_by_ofw_index(dev, node, 0, &clock); if (rv != 0) return (rv); } clkname = clk_get_name(clock); def.clkdef.name = QORIQ_SYSCLK_NAME; def.clkdef.id = QORIQ_CLK_ID(QORIQ_TYPE_SYSCLK, 0); def.clkdef.parent_names = &clkname; rv = clknode_fixed_register(sc->clkdom, &def); return (rv); } } int qoriq_clkgen_attach(device_t dev) { struct qoriq_clkgen_softc *sc; int i, error; sc = device_get_softc(dev); sc->dev = dev; if (bus_alloc_resources(dev, qoriq_clkgen_spec, &sc->res) != 0) { device_printf(dev, "Cannot allocate resources.\n"); return (ENXIO); } mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); sc->clkdom = clkdom_create(dev); if (sc->clkdom == NULL) panic("Cannot create clock domain.\n"); error = qoriq_clkgen_create_sysclk(dev); if (error != 0) { device_printf(dev, "Cannot create sysclk.\n"); return (error); } sc->pltfrm_pll_def->clkdef.parent_names = qoriq_pll_parents_sysclk; sc->pltfrm_pll_def->clkdef.parent_cnt = 1; error = qoriq_clk_pll_register(sc->clkdom, sc->pltfrm_pll_def); if (error != 0) { device_printf(dev, "Cannot create platform PLL.\n"); return (error); } for (i = 0; i < sc->cga_pll_num; i++) { if (sc->has_coreclk) sc->cga_pll[i]->clkdef.parent_names = qoriq_pll_parents_coreclk; else sc->cga_pll[i]->clkdef.parent_names = qoriq_pll_parents_sysclk; sc->cga_pll[i]->clkdef.parent_cnt = 1; error = qoriq_clk_pll_register(sc->clkdom, sc->cga_pll[i]); if (error != 0) { device_printf(dev, "Cannot create CGA PLLs\n."); return (error); } } /* * Both CMUX and HWACCEL multiplexer nodes can be represented * by using built in clk_mux nodes. */ for (i = 0; i < sc->mux_num; i++) { error = clknode_mux_register(sc->clkdom, sc->mux[i]); if (error != 0) { device_printf(dev, "Cannot create MUX nodes.\n"); return (error); } } if (sc->init_func != NULL) { error = sc->init_func(dev); if (error) { device_printf(dev, "Clock init function failed.\n"); return (error); } } clkdom_set_ofw_mapper(sc->clkdom, qoriq_clkgen_ofw_mapper); if (clkdom_finit(sc->clkdom) != 0) panic("Cannot finalize clock domain initialization.\n"); if (bootverbose) clkdom_dump(sc->clkdom); return (0); } diff --git a/sys/arm64/qoriq/clk/qoriq_clkgen.h b/sys/arm64/qoriq/clk/qoriq_clkgen.h index b96c34270bab..4bfcaea2a929 100644 --- a/sys/arm64/qoriq/clk/qoriq_clkgen.h +++ b/sys/arm64/qoriq/clk/qoriq_clkgen.h @@ -1,94 +1,94 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Alstom Group. * Copyright (c) 2020 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #ifndef _QORIQ_CLKGEN_H_ #define _QORIQ_CLKGEN_H_ -#include -#include +#include +#include #include #define QORIQ_CLK_NAME_MAX_LEN 32 #define QORIQ_LITTLE_ENDIAN 0x01 #define QORIQ_TYPE_SYSCLK 0 #define QORIQ_TYPE_CMUX 1 #define QORIQ_TYPE_HWACCEL 2 #define QORIQ_TYPE_FMAN 3 #define QORIQ_TYPE_PLATFORM_PLL 4 #define QORIQ_TYPE_CORECLK 5 #define QORIQ_TYPE_INTERNAL 6 #define PLL_DIV1 0 #define PLL_DIV2 1 #define PLL_DIV3 2 #define PLL_DIV4 3 #define PLL_DIV5 4 #define PLL_DIV6 5 #define PLL_DIV7 6 #define PLL_DIV8 7 #define PLL_DIV9 8 #define PLL_DIV10 9 #define PLL_DIV11 10 #define PLL_DIV12 11 #define PLL_DIV13 12 #define PLL_DIV14 13 #define PLL_DIV15 14 #define PLL_DIV16 15 #define QORIQ_CLK_ID(_type, _index) ((_type << 8) + _index) #define QORIQ_SYSCLK_NAME "clockgen_sysclk" #define QORIQ_CORECLK_NAME "clockgen_coreclk" typedef int (*qoriq_init_func_t)(device_t); struct qoriq_clkgen_softc { device_t dev; struct resource *res; struct clkdom *clkdom; struct mtx mtx; struct qoriq_clk_pll_def *pltfrm_pll_def; struct qoriq_clk_pll_def **cga_pll; int cga_pll_num; struct clk_mux_def **mux; int mux_num; qoriq_init_func_t init_func; uint32_t flags; bool has_coreclk; }; MALLOC_DECLARE(M_QORIQ_CLKGEN); DECLARE_CLASS(qoriq_clkgen_driver); int qoriq_clkgen_attach(device_t); #endif /* _QORIQ_CLKGEN_H_ */ diff --git a/sys/arm64/qoriq/qoriq_therm.c b/sys/arm64/qoriq/qoriq_therm.c index 6169eec3f17c..decd55fad6e4 100644 --- a/sys/arm64/qoriq/qoriq_therm.c +++ b/sys/arm64/qoriq/qoriq_therm.c @@ -1,516 +1,516 @@ /*- * * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2020 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Thermometer driver for QorIQ SoCs. */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include "qoriq_therm_if.h" #define TMU_TMR 0x00 #define TMU_TSR 0x04 #define TMUV1_TMTMIR 0x08 #define TMUV2_TMSR 0x08 #define TMUV2_TMTMIR 0x0C #define TMU_TIER 0x20 #define TMU_TTCFGR 0x80 #define TMU_TSCFGR 0x84 #define TMU_TRITSR(x) (0x100 + (16 * (x))) #define TMU_TRITSR_VALID (1U << 31) #define TMUV2_TMSAR(x) (0x304 + (16 * (x))) #define TMU_VERSION 0xBF8 /* not in TRM */ #define TMUV2_TEUMR(x) (0xF00 + (4 * (x))) #define TMU_TTRCR(x) (0xF10 + (4 * (x))) struct tsensor { int site_id; char *name; int id; }; struct qoriq_therm_softc { device_t dev; struct resource *mem_res; struct resource *irq_res; void *irq_ih; int ntsensors; struct tsensor *tsensors; bool little_endian; clk_t clk; int ver; }; static struct sysctl_ctx_list qoriq_therm_sysctl_ctx; struct tsensor default_sensors[] = { { 0, "site0", 0 }, { 1, "site1", 1 }, { 2, "site2", 2 }, { 3, "site3", 3 }, { 4, "site4", 4 }, { 5, "site5", 5 }, { 6, "site6", 6 }, { 7, "site7", 7 }, { 8, "site8", 8 }, { 9, "site9", 9 }, { 10, "site10", 10 }, { 11, "site11", 11 }, { 12, "site12", 12 }, { 13, "site13", 13 }, { 14, "site14", 14 }, { 15, "site15", 15 }, }; static struct tsensor imx8mq_sensors[] = { { 0, "cpu", 0 }, { 1, "gpu", 1 }, { 2, "vpu", 2 }, }; static struct tsensor ls1012_sensors[] = { { 0, "cpu-thermal", 0 }, }; static struct tsensor ls1028_sensors[] = { { 0, "ddr-controller", 0 }, { 1, "core-cluster", 1 }, }; static struct tsensor ls1043_sensors[] = { { 0, "ddr-controller", 0 }, { 1, "serdes", 1 }, { 2, "fman", 2 }, { 3, "core-cluster", 3 }, }; static struct tsensor ls1046_sensors[] = { { 0, "ddr-controller", 0 }, { 1, "serdes", 1 }, { 2, "fman", 2 }, { 3, "core-cluster", 3 }, { 4, "sec", 4 }, }; static struct tsensor ls1088_sensors[] = { { 0, "core-cluster", 0 }, { 1, "soc", 1 }, }; /* Note: tmu[1..7] not [0..6]. */ static struct tsensor lx2080_sensors[] = { { 1, "ddr-controller1", 0 }, { 2, "ddr-controller2", 1 }, { 3, "ddr-controller3", 2 }, { 4, "core-cluster1", 3 }, { 5, "core-cluster2", 4 }, { 6, "core-cluster3", 5 }, { 7, "core-cluster4", 6 }, }; static struct tsensor lx2160_sensors[] = { { 0, "cluster6-7", 0 }, { 1, "ddr-cluster5", 1 }, { 2, "wriop", 2 }, { 3, "dce-qbman-hsio2", 3 }, { 4, "ccn-dpaa-tbu", 4 }, { 5, "cluster4-hsio3", 5 }, { 6, "cluster2-3", 6 }, }; struct qoriq_therm_socs { const char *name; struct tsensor *tsensors; int ntsensors; } qoriq_therm_socs[] = { #define _SOC(_n, _a) { _n, _a, nitems(_a) } _SOC("fsl,imx8mq", imx8mq_sensors), _SOC("fsl,ls1012a", ls1012_sensors), _SOC("fsl,ls1028a", ls1028_sensors), _SOC("fsl,ls1043a", ls1043_sensors), _SOC("fsl,ls1046a", ls1046_sensors), _SOC("fsl,ls1088a", ls1088_sensors), _SOC("fsl,ls2080a", lx2080_sensors), _SOC("fsl,lx2160a", lx2160_sensors), { NULL, NULL, 0 } #undef _SOC }; static struct ofw_compat_data compat_data[] = { {"fsl,qoriq-tmu", 1}, {"fsl,imx8mq-tmu", 1}, {NULL, 0}, }; static inline void WR4(struct qoriq_therm_softc *sc, bus_size_t addr, uint32_t val) { val = sc->little_endian ? htole32(val): htobe32(val); bus_write_4(sc->mem_res, addr, val); } static inline uint32_t RD4(struct qoriq_therm_softc *sc, bus_size_t addr) { uint32_t val; val = bus_read_4(sc->mem_res, addr); return (sc->little_endian ? le32toh(val): be32toh(val)); } static int qoriq_therm_read_temp(struct qoriq_therm_softc *sc, struct tsensor *sensor, int *temp) { int timeout; uint32_t val; /* wait for valid sample */ for (timeout = 1000; timeout > 0; timeout--) { val = RD4(sc, TMU_TRITSR(sensor->site_id)); if (val & TMU_TRITSR_VALID) break; DELAY(100); } if (timeout <= 0) device_printf(sc->dev, "Sensor %s timeouted\n", sensor->name); *temp = (int)(val & 0x1FF) * 1000; if (sc->ver == 1) *temp = (int)(val & 0xFF) * 1000; else *temp = (int)(val & 0x1FF) * 1000 - 273100; return (0); } static int qoriq_therm_get_temp(device_t dev, device_t cdev, uintptr_t id, int *val) { struct qoriq_therm_softc *sc; sc = device_get_softc(dev); if (id >= sc->ntsensors) return (ERANGE); return(qoriq_therm_read_temp(sc, sc->tsensors + id, val)); } static int qoriq_therm_sysctl_temperature(SYSCTL_HANDLER_ARGS) { struct qoriq_therm_softc *sc; int val; int rv; int id; /* Write request */ if (req->newptr != NULL) return (EINVAL); sc = arg1; id = arg2; if (id >= sc->ntsensors) return (ERANGE); rv = qoriq_therm_read_temp(sc, sc->tsensors + id, &val); if (rv != 0) return (rv); val = val / 100; val += 2731; rv = sysctl_handle_int(oidp, &val, 0, req); return (rv); } static int qoriq_therm_init_sysctl(struct qoriq_therm_softc *sc) { int i; struct sysctl_oid *oid, *tmp; /* create node for hw.temp */ oid = SYSCTL_ADD_NODE(&qoriq_therm_sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, "temperature", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); if (oid == NULL) return (ENXIO); /* add sensors */ for (i = sc->ntsensors - 1; i >= 0; i--) { tmp = SYSCTL_ADD_PROC(&qoriq_therm_sysctl_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, sc->tsensors[i].name, CTLTYPE_INT | CTLFLAG_RD , sc, i, qoriq_therm_sysctl_temperature, "IK", "SoC Temperature"); if (tmp == NULL) return (ENXIO); } return (0); } static int qoriq_therm_fdt_calib(struct qoriq_therm_softc *sc, phandle_t node) { int nranges, ncalibs, i; int *ranges, *calibs; /* initialize temperature range control registes */ nranges = OF_getencprop_alloc_multi(node, "fsl,tmu-range", sizeof(*ranges), (void **)&ranges); if (nranges < 2 || nranges > 4) { device_printf(sc->dev, "Invalid 'tmu-range' property\n"); return (ERANGE); } for (i = 0; i < nranges; i++) { WR4(sc, TMU_TTRCR(i), ranges[i]); } /* initialize calibration data for above ranges */ ncalibs = OF_getencprop_alloc_multi(node, "fsl,tmu-calibration", sizeof(*calibs),(void **)&calibs); if (ncalibs <= 0 || (ncalibs % 2) != 0) { device_printf(sc->dev, "Invalid 'tmu-calibration' property\n"); return (ERANGE); } for (i = 0; i < ncalibs; i +=2) { WR4(sc, TMU_TTCFGR, calibs[i]); WR4(sc, TMU_TSCFGR, calibs[i + 1]); } return (0); } static int qoriq_therm_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "QorIQ temperature sensors"); return (BUS_PROBE_DEFAULT); } static int qoriq_therm_attach(device_t dev) { struct qoriq_therm_softc *sc; struct qoriq_therm_socs *soc; phandle_t node, root; uint32_t sites; int rid, rv; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(sc->dev); sc->little_endian = OF_hasprop(node, "little-endian"); sysctl_ctx_init(&qoriq_therm_sysctl_ctx); rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot allocate memory resources\n"); goto fail; } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate IRQ resources\n"); goto fail; } /* if ((bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, qoriq_therm_intr, NULL, sc, &sc->irq_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); goto fail; } */ rv = clk_get_by_ofw_index(dev, 0, 0, &sc->clk); if (rv != 0 && rv != ENOENT) { device_printf(dev, "Cannot get clock: %d %d\n", rv, ENOENT); goto fail; } if (sc->clk != NULL) { rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable clock: %d\n", rv); goto fail; } } sc->ver = (RD4(sc, TMU_VERSION) >> 8) & 0xFF; /* Select per SoC configuration. */ root = OF_finddevice("/"); if (root < 0) { device_printf(dev, "Cannot get root node: %d\n", root); goto fail; } soc = qoriq_therm_socs; while (soc != NULL && soc->name != NULL) { if (ofw_bus_node_is_compatible(root, soc->name)) break; soc++; } if (soc == NULL) { device_printf(dev, "Unsupported SoC, using default sites.\n"); sc->tsensors = default_sensors; sc->ntsensors = nitems(default_sensors); } else { sc->tsensors = soc->tsensors; sc->ntsensors = soc->ntsensors; } /* stop monitoring */ WR4(sc, TMU_TMR, 0); RD4(sc, TMU_TMR); /* disable all interrupts */ WR4(sc, TMU_TIER, 0); /* setup measurement interval */ if (sc->ver == 1) { WR4(sc, TMUV1_TMTMIR, 0x0F); } else { WR4(sc, TMUV2_TMTMIR, 0x0F); /* disable */ /* these registers are not of settings is not in TRM */ WR4(sc, TMUV2_TEUMR(0), 0x51009c00); for (int i = 0; i < sc->ntsensors; i++) WR4(sc, TMUV2_TMSAR(sc->tsensors[i].site_id), 0xE); } /* prepare calibration tables */ rv = qoriq_therm_fdt_calib(sc, node); if (rv != 0) { device_printf(sc->dev, "Cannot initialize calibration tables\n"); goto fail; } /* start monitoring */ sites = 0; if (sc->ver == 1) { for (int i = 0; i < sc->ntsensors; i++) sites |= 1 << (15 - sc->tsensors[i].site_id); WR4(sc, TMU_TMR, 0x8C000000 | sites); } else { for (int i = 0; i < sc->ntsensors; i++) sites |= 1 << sc->tsensors[i].site_id; WR4(sc, TMUV2_TMSR, sites); WR4(sc, TMU_TMR, 0x83000000); } rv = qoriq_therm_init_sysctl(sc); if (rv != 0) { device_printf(sc->dev, "Cannot initialize sysctls\n"); goto fail; } OF_device_register_xref(OF_xref_from_node(node), dev); return (bus_generic_attach(dev)); fail: if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); sysctl_ctx_free(&qoriq_therm_sysctl_ctx); if (sc->clk != NULL) clk_release(sc->clk); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (ENXIO); } static int qoriq_therm_detach(device_t dev) { struct qoriq_therm_softc *sc; sc = device_get_softc(dev); if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); sysctl_ctx_free(&qoriq_therm_sysctl_ctx); if (sc->clk != NULL) clk_release(sc->clk); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (0); } static device_method_t qoriq_qoriq_therm_methods[] = { /* Device interface */ DEVMETHOD(device_probe, qoriq_therm_probe), DEVMETHOD(device_attach, qoriq_therm_attach), DEVMETHOD(device_detach, qoriq_therm_detach), /* SOCTHERM interface */ DEVMETHOD(qoriq_therm_get_temperature, qoriq_therm_get_temp), DEVMETHOD_END }; static DEFINE_CLASS_0(soctherm, qoriq_qoriq_therm_driver, qoriq_qoriq_therm_methods, sizeof(struct qoriq_therm_softc)); DRIVER_MODULE(qoriq_soctherm, simplebus, qoriq_qoriq_therm_driver, NULL, NULL); diff --git a/sys/arm64/rockchip/rk3328_codec.c b/sys/arm64/rockchip/rk3328_codec.c index 11c44c4f66ae..ff6f2a3da881 100644 --- a/sys/arm64/rockchip/rk3328_codec.c +++ b/sys/arm64/rockchip/rk3328_codec.c @@ -1,605 +1,605 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Oleksandr Tymoshenko * Copyright (c) 2018 Jared McNeill * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include "syscon_if.h" #include "opt_snd.h" #include #include #include "audio_dai_if.h" #include "mixer_if.h" #define RKCODEC_MIXER_DEVS (1 << SOUND_MIXER_VOLUME) #define GRF_SOC_CON2 0x0408 #define SOC_CON2_I2S_ACODEC_EN (1 << 14) #define SOC_CON2_I2S_ACODEC_EN_MASK ((1 << 14) << 16) #define GRF_SOC_CON10 0x0428 #define SOC_CON10_GPIOMUT (1 << 1) #define SOC_CON10_GPIOMUT_MASK ((1 << 1) << 16) #define SOC_CON10_GPIOMUT_EN (1 << 0) #define SOC_CON10_GPIOMUT_EN_MASK ((1 << 0) << 16) #define CODEC_RESET 0x00 #define RESET_DIG_CORE_RST (1 << 1) #define RESET_SYS_RST (1 << 0) #define CODEC_DAC_INIT_CTRL1 0x0c #define DAC_INIT_CTRL1_DIRECTION_IN (0 << 5) #define DAC_INIT_CTRL1_DIRECTION_OUT (1 << 5) #define DAC_INIT_CTRL1_DAC_I2S_MODE_SLAVE (0 << 4) #define DAC_INIT_CTRL1_DAC_I2S_MODE_MASTER (1 << 4) #define DAC_INIT_CTRL1_MODE_MASK (3 << 4) #define CODEC_DAC_INIT_CTRL2 0x10 #define DAC_INIT_CTRL2_DAC_VDL_16BITS (0 << 5) #define DAC_INIT_CTRL2_DAC_VDL_20BITS (1 << 5) #define DAC_INIT_CTRL2_DAC_VDL_24BITS (2 << 5) #define DAC_INIT_CTRL2_DAC_VDL_32BITS (3 << 5) #define DAC_INIT_CTRL2_DAC_VDL_MASK (3 << 5) #define DAC_INIT_CTRL2_DAC_MODE_RJM (0 << 3) #define DAC_INIT_CTRL2_DAC_MODE_LJM (1 << 3) #define DAC_INIT_CTRL2_DAC_MODE_I2S (2 << 3) #define DAC_INIT_CTRL2_DAC_MODE_PCM (3 << 3) #define DAC_INIT_CTRL2_DAC_MODE_MASK (3 << 3) #define CODEC_DAC_INIT_CTRL3 0x14 #define DAC_INIT_CTRL3_WL_16BITS (0 << 2) #define DAC_INIT_CTRL3_WL_20BITS (1 << 2) #define DAC_INIT_CTRL3_WL_24BITS (2 << 2) #define DAC_INIT_CTRL3_WL_32BITS (3 << 2) #define DAC_INIT_CTRL3_WL_MASK (3 << 2) #define DAC_INIT_CTRL3_RST_MASK (1 << 1) #define DAC_INIT_CTRL3_RST_DIS (1 << 1) #define DAC_INIT_CTRL3_DAC_BCP_REVERSAL (1 << 0) #define DAC_INIT_CTRL3_DAC_BCP_NORMAL (0 << 0) #define DAC_INIT_CTRL3_DAC_BCP_MASK (1 << 0) #define CODEC_DAC_PRECHARGE_CTRL 0x88 #define DAC_PRECHARGE_CTRL_DAC_CHARGE_PRECHARGE (1 << 7) #define DAC_PRECHARGE_CTRL_DAC_CHARGE_CURRENT_I (1 << 0) #define DAC_PRECHARGE_CTRL_DAC_CHARGE_CURRENT_ALL (0x7f) #define CODEC_DAC_PWR_CTRL 0x8c #define DAC_PWR_CTRL_DAC_PWR (1 << 6) #define DAC_PWR_CTRL_DACL_PATH_REFV (1 << 5) #define DAC_PWR_CTRL_HPOUTL_ZERO_CROSSING (1 << 4) #define DAC_PWR_CTRL_DACR_PATH_REFV (1 << 1) #define DAC_PWR_CTRL_HPOUTR_ZERO_CROSSING (1 << 0) #define CODEC_DAC_CLK_CTRL 0x90 #define DAC_CLK_CTRL_DACL_REFV_ON (1 << 7) #define DAC_CLK_CTRL_DACL_CLK_ON (1 << 6) #define DAC_CLK_CTRL_DACL_ON (1 << 5) #define DAC_CLK_CTRL_DACL_INIT_ON (1 << 4) #define DAC_CLK_CTRL_DACR_REFV_ON (1 << 3) #define DAC_CLK_CTRL_DACR_CLK_ON (1 << 2) #define DAC_CLK_CTRL_DACR_ON (1 << 1) #define DAC_CLK_CTRL_DACR_INIT_ON (1 << 0) #define CODEC_HPMIX_CTRL 0x94 #define HPMIX_CTRL_HPMIXL_EN (1 << 6) #define HPMIX_CTRL_HPMIXL_INIT_EN (1 << 5) #define HPMIX_CTRL_HPMIXL_INIT2_EN (1 << 4) #define HPMIX_CTRL_HPMIXR_EN (1 << 2) #define HPMIX_CTRL_HPMIXR_INIT_EN (1 << 1) #define HPMIX_CTRL_HPMIXR_INIT2_EN (1 << 0) #define CODEC_DAC_SELECT 0x98 #define DAC_SELECT_DACL_SELECT (1 << 4) #define DAC_SELECT_DACR_SELECT (1 << 0) #define CODEC_HPOUT_CTRL 0x9c #define HPOUT_CTRL_HPOUTL_EN (1 << 7) #define HPOUT_CTRL_HPOUTL_INIT_EN (1 << 6) #define HPOUT_CTRL_HPOUTL_UNMUTE (1 << 5) #define HPOUT_CTRL_HPOUTR_EN (1 << 4) #define HPOUT_CTRL_HPOUTR_INIT_EN (1 << 3) #define HPOUT_CTRL_HPOUTR_UNMUTE (1 << 2) #define CODEC_HPOUTL_GAIN_CTRL 0xa0 #define CODEC_HPOUTR_GAIN_CTRL 0xa4 #define CODEC_HPOUT_POP_CTRL 0xa8 #define HPOUT_POP_CTRL_HPOUTR_POP (1 << 5) #define HPOUT_POP_CTRL_HPOUTR_POP_XCHARGE (1 << 4) #define HPOUT_POP_CTRL_HPOUTL_POP (1 << 1) #define HPOUT_POP_CTRL_HPOUTL_POP_XCHARGE (1 << 0) #define DEFAULT_RATE (48000 * 256) static struct ofw_compat_data compat_data[] = { { "rockchip,rk3328-codec", 1}, { NULL, 0 } }; struct rkcodec_softc { device_t dev; struct resource *res; struct mtx mtx; clk_t mclk; clk_t pclk; struct syscon *grf; u_int regaddr; /* address for the sysctl */ }; #define RKCODEC_LOCK(sc) mtx_lock(&(sc)->mtx) #define RKCODEC_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define RKCODEC_READ(sc, reg) bus_read_4((sc)->res, (reg)) #define RKCODEC_WRITE(sc, reg, val) bus_write_4((sc)->res, (reg), (val)) static int rkcodec_probe(device_t dev); static int rkcodec_attach(device_t dev); static int rkcodec_detach(device_t dev); static void rkcodec_set_power(struct rkcodec_softc *sc, bool poweron) { uint32_t val; val = RKCODEC_READ(sc, CODEC_DAC_PRECHARGE_CTRL); if (poweron) val |= DAC_PRECHARGE_CTRL_DAC_CHARGE_PRECHARGE; else val &= ~(DAC_PRECHARGE_CTRL_DAC_CHARGE_PRECHARGE); RKCODEC_WRITE(sc, CODEC_DAC_PRECHARGE_CTRL, val); DELAY(10000); val = RKCODEC_READ(sc, CODEC_DAC_PRECHARGE_CTRL); if (poweron) val |= DAC_PRECHARGE_CTRL_DAC_CHARGE_CURRENT_ALL; else val &= ~(DAC_PRECHARGE_CTRL_DAC_CHARGE_CURRENT_ALL); RKCODEC_WRITE(sc, CODEC_DAC_PRECHARGE_CTRL, val); } static void rkcodec_set_mute(struct rkcodec_softc *sc, bool muted) { uint32_t val; val = SOC_CON10_GPIOMUT_MASK; if (!muted) val |= SOC_CON10_GPIOMUT; SYSCON_WRITE_4(sc->grf, GRF_SOC_CON10, val); } static void rkcodec_reset(struct rkcodec_softc *sc) { RKCODEC_WRITE(sc, CODEC_RESET, 0); DELAY(10000); RKCODEC_WRITE(sc, CODEC_RESET, RESET_DIG_CORE_RST | RESET_SYS_RST); } static int rkcodec_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Rockchip RK3328 CODEC"); return (BUS_PROBE_DEFAULT); } static int rkcodec_attach(device_t dev) { struct rkcodec_softc *sc; int error, rid; phandle_t node; uint32_t val; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); rid = 0; sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->res) { device_printf(dev, "could not allocate resource for device\n"); error = ENXIO; goto fail; } node = ofw_bus_get_node(dev); if (syscon_get_by_ofw_property(dev, node, "rockchip,grf", &sc->grf) != 0) { device_printf(dev, "cannot get rockchip,grf handle\n"); return (ENXIO); } val = SOC_CON2_I2S_ACODEC_EN | SOC_CON2_I2S_ACODEC_EN_MASK; SYSCON_WRITE_4(sc->grf, GRF_SOC_CON2, val); val = 0 | SOC_CON10_GPIOMUT_EN_MASK; SYSCON_WRITE_4(sc->grf, GRF_SOC_CON10, val); error = clk_get_by_ofw_name(dev, 0, "pclk", &sc->pclk); if (error != 0) { device_printf(dev, "could not get pclk clock\n"); goto fail; } error = clk_get_by_ofw_name(dev, 0, "mclk", &sc->mclk); if (error != 0) { device_printf(dev, "could not get mclk clock\n"); goto fail; } error = clk_enable(sc->pclk); if (error != 0) { device_printf(sc->dev, "could not enable pclk clock\n"); goto fail; } error = clk_enable(sc->mclk); if (error != 0) { device_printf(sc->dev, "could not enable mclk clock\n"); goto fail; } #if 0 error = clk_set_freq(sc->mclk, DEFAULT_RATE, 0); if (error != 0) { device_printf(sc->dev, "could not set frequency for mclk clock\n"); goto fail; } #endif /* TODO: handle mute-gpios */ rkcodec_reset(sc); rkcodec_set_power(sc, true); val = RKCODEC_READ(sc, CODEC_DAC_PWR_CTRL); val |= DAC_PWR_CTRL_DAC_PWR; RKCODEC_WRITE(sc, CODEC_DAC_PWR_CTRL, val); DELAY(1000); val |= DAC_PWR_CTRL_DACL_PATH_REFV | DAC_PWR_CTRL_DACR_PATH_REFV; RKCODEC_WRITE(sc, CODEC_DAC_PWR_CTRL, val); DELAY(1000); val |= DAC_PWR_CTRL_HPOUTL_ZERO_CROSSING | DAC_PWR_CTRL_HPOUTR_ZERO_CROSSING; RKCODEC_WRITE(sc, CODEC_DAC_PWR_CTRL, val); DELAY(1000); val = RKCODEC_READ(sc, CODEC_HPOUT_POP_CTRL); val |= HPOUT_POP_CTRL_HPOUTR_POP | HPOUT_POP_CTRL_HPOUTL_POP; val &= ~(HPOUT_POP_CTRL_HPOUTR_POP_XCHARGE | HPOUT_POP_CTRL_HPOUTL_POP_XCHARGE); RKCODEC_WRITE(sc, CODEC_HPOUT_POP_CTRL, val); DELAY(1000); val = RKCODEC_READ(sc, CODEC_HPMIX_CTRL); val |= HPMIX_CTRL_HPMIXL_EN | HPMIX_CTRL_HPMIXR_EN; RKCODEC_WRITE(sc, CODEC_HPMIX_CTRL, val); DELAY(1000); val |= HPMIX_CTRL_HPMIXL_INIT_EN | HPMIX_CTRL_HPMIXR_INIT_EN; RKCODEC_WRITE(sc, CODEC_HPMIX_CTRL, val); DELAY(1000); val = RKCODEC_READ(sc, CODEC_HPOUT_CTRL); val |= HPOUT_CTRL_HPOUTL_EN | HPOUT_CTRL_HPOUTR_EN; RKCODEC_WRITE(sc, CODEC_HPOUT_CTRL, val); DELAY(1000); val |= HPOUT_CTRL_HPOUTL_INIT_EN | HPOUT_CTRL_HPOUTR_INIT_EN; RKCODEC_WRITE(sc, CODEC_HPOUT_CTRL, val); DELAY(1000); val = RKCODEC_READ(sc, CODEC_DAC_CLK_CTRL); val |= DAC_CLK_CTRL_DACL_REFV_ON | DAC_CLK_CTRL_DACR_REFV_ON; RKCODEC_WRITE(sc, CODEC_DAC_CLK_CTRL, val); DELAY(1000); val |= DAC_CLK_CTRL_DACL_CLK_ON | DAC_CLK_CTRL_DACR_CLK_ON; RKCODEC_WRITE(sc, CODEC_DAC_CLK_CTRL, val); DELAY(1000); val |= DAC_CLK_CTRL_DACL_ON | DAC_CLK_CTRL_DACR_ON; RKCODEC_WRITE(sc, CODEC_DAC_CLK_CTRL, val); DELAY(1000); val |= DAC_CLK_CTRL_DACL_INIT_ON | DAC_CLK_CTRL_DACR_INIT_ON; RKCODEC_WRITE(sc, CODEC_DAC_CLK_CTRL, val); DELAY(1000); val = RKCODEC_READ(sc, CODEC_DAC_SELECT); val |= DAC_SELECT_DACL_SELECT | DAC_SELECT_DACR_SELECT; RKCODEC_WRITE(sc, CODEC_DAC_SELECT, val); DELAY(1000); val = RKCODEC_READ(sc, CODEC_HPMIX_CTRL); val |= HPMIX_CTRL_HPMIXL_INIT2_EN | HPMIX_CTRL_HPMIXR_INIT2_EN; RKCODEC_WRITE(sc, CODEC_HPMIX_CTRL, val); DELAY(1000); val = RKCODEC_READ(sc, CODEC_HPOUT_CTRL); val |= HPOUT_CTRL_HPOUTL_UNMUTE | HPOUT_CTRL_HPOUTR_UNMUTE; RKCODEC_WRITE(sc, CODEC_HPOUT_CTRL, val); DELAY(1000); RKCODEC_WRITE(sc, CODEC_HPOUTL_GAIN_CTRL, 0x18); RKCODEC_WRITE(sc, CODEC_HPOUTR_GAIN_CTRL, 0x18); DELAY(1000); rkcodec_set_mute(sc, false); node = ofw_bus_get_node(dev); OF_device_register_xref(OF_xref_from_node(node), dev); return (0); fail: rkcodec_detach(dev); return (error); } static int rkcodec_detach(device_t dev) { struct rkcodec_softc *sc; sc = device_get_softc(dev); if (sc->res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); mtx_destroy(&sc->mtx); return (0); } static int rkcodec_mixer_init(struct snd_mixer *m) { mix_setdevs(m, RKCODEC_MIXER_DEVS); return (0); } static int rkcodec_mixer_uninit(struct snd_mixer *m) { return (0); } static int rkcodec_mixer_reinit(struct snd_mixer *m) { return (0); } static int rkcodec_mixer_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct rkcodec_softc *sc; struct mtx *mixer_lock; uint8_t do_unlock; sc = device_get_softc(mix_getdevinfo(m)); mixer_lock = mixer_get_lock(m); if (mtx_owned(mixer_lock)) { do_unlock = 0; } else { do_unlock = 1; mtx_lock(mixer_lock); } right = left; RKCODEC_LOCK(sc); switch(dev) { case SOUND_MIXER_VOLUME: printf("[%s] %s:%d\n", __func__, __FILE__, __LINE__); break; case SOUND_MIXER_MIC: printf("[%s] %s:%d\n", __func__, __FILE__, __LINE__); break; default: break; } RKCODEC_UNLOCK(sc); if (do_unlock) { mtx_unlock(mixer_lock); } return (left | (right << 8)); } static unsigned rkcodec_mixer_setrecsrc(struct snd_mixer *m, unsigned src) { return (0); } static kobj_method_t rkcodec_mixer_methods[] = { KOBJMETHOD(mixer_init, rkcodec_mixer_init), KOBJMETHOD(mixer_uninit, rkcodec_mixer_uninit), KOBJMETHOD(mixer_reinit, rkcodec_mixer_reinit), KOBJMETHOD(mixer_set, rkcodec_mixer_set), KOBJMETHOD(mixer_setrecsrc, rkcodec_mixer_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(rkcodec_mixer); static int rkcodec_dai_init(device_t dev, uint32_t format) { struct rkcodec_softc *sc; int fmt, pol, clk; uint32_t ctrl1, ctrl2, ctrl3; sc = device_get_softc(dev); fmt = AUDIO_DAI_FORMAT_FORMAT(format); pol = AUDIO_DAI_FORMAT_POLARITY(format); clk = AUDIO_DAI_FORMAT_CLOCK(format); ctrl1 = RKCODEC_READ(sc, CODEC_DAC_INIT_CTRL1); ctrl2 = RKCODEC_READ(sc, CODEC_DAC_INIT_CTRL2); ctrl3 = RKCODEC_READ(sc, CODEC_DAC_INIT_CTRL3); ctrl3 &= ~(DAC_INIT_CTRL3_DAC_BCP_MASK); switch (pol) { case AUDIO_DAI_POLARITY_IB_NF: ctrl3 |= DAC_INIT_CTRL3_DAC_BCP_REVERSAL; break; case AUDIO_DAI_POLARITY_NB_NF: ctrl3 |= DAC_INIT_CTRL3_DAC_BCP_NORMAL; break; default: return (EINVAL); } ctrl1 &= ~(DAC_INIT_CTRL1_MODE_MASK); switch (clk) { case AUDIO_DAI_CLOCK_CBM_CFM: ctrl1 |= DAC_INIT_CTRL1_DIRECTION_OUT | DAC_INIT_CTRL1_DAC_I2S_MODE_SLAVE; break; case AUDIO_DAI_CLOCK_CBS_CFS: ctrl1 |= DAC_INIT_CTRL1_DIRECTION_IN | DAC_INIT_CTRL1_DAC_I2S_MODE_SLAVE; break; default: return (EINVAL); } ctrl2 &= ~(DAC_INIT_CTRL2_DAC_VDL_MASK | DAC_INIT_CTRL2_DAC_MODE_MASK); ctrl2 |= DAC_INIT_CTRL2_DAC_VDL_16BITS; ctrl3 &= ~(DAC_INIT_CTRL3_WL_MASK); ctrl3 |= DAC_INIT_CTRL3_WL_32BITS; switch (fmt) { case AUDIO_DAI_FORMAT_I2S: ctrl2 |= DAC_INIT_CTRL2_DAC_MODE_I2S; break; case AUDIO_DAI_FORMAT_LJ: ctrl2 |= DAC_INIT_CTRL2_DAC_MODE_LJM; break; case AUDIO_DAI_FORMAT_RJ: ctrl2 |= DAC_INIT_CTRL2_DAC_MODE_RJM; break; default: return EINVAL; } ctrl3 &= ~(DAC_INIT_CTRL3_RST_MASK); ctrl3 |= DAC_INIT_CTRL3_RST_DIS; RKCODEC_WRITE(sc, CODEC_DAC_INIT_CTRL1, ctrl1); RKCODEC_WRITE(sc, CODEC_DAC_INIT_CTRL2, ctrl2); RKCODEC_WRITE(sc, CODEC_DAC_INIT_CTRL3, ctrl3); return (0); } static int rkcodec_dai_trigger(device_t dev, int go, int pcm_dir) { // struct rkcodec_softc *sc = device_get_softc(dev); if ((pcm_dir != PCMDIR_PLAY) && (pcm_dir != PCMDIR_REC)) return (EINVAL); switch (go) { case PCMTRIG_START: if (pcm_dir == PCMDIR_PLAY) { printf("[%s] %s:%d\n", __func__, __FILE__, __LINE__); } else if (pcm_dir == PCMDIR_REC) { printf("[%s] %s:%d\n", __func__, __FILE__, __LINE__); } break; case PCMTRIG_STOP: case PCMTRIG_ABORT: if (pcm_dir == PCMDIR_PLAY) { printf("[%s] %s:%d\n", __func__, __FILE__, __LINE__); } else if (pcm_dir == PCMDIR_REC) { printf("[%s] %s:%d\n", __func__, __FILE__, __LINE__); } break; } return (0); } static int rkcodec_dai_setup_mixer(device_t dev, device_t pcmdev) { mixer_init(pcmdev, &rkcodec_mixer_class, dev); return (0); } static device_method_t rkcodec_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rkcodec_probe), DEVMETHOD(device_attach, rkcodec_attach), DEVMETHOD(device_detach, rkcodec_detach), DEVMETHOD(audio_dai_init, rkcodec_dai_init), DEVMETHOD(audio_dai_setup_mixer, rkcodec_dai_setup_mixer), DEVMETHOD(audio_dai_trigger, rkcodec_dai_trigger), DEVMETHOD_END }; static driver_t rkcodec_driver = { "rk3328codec", rkcodec_methods, sizeof(struct rkcodec_softc), }; DRIVER_MODULE(rkcodec, simplebus, rkcodec_driver, 0, 0); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/arm64/rockchip/rk3399_emmcphy.c b/sys/arm64/rockchip/rk3399_emmcphy.c index bc28ee3bf7d8..70c96e4daf4e 100644 --- a/sys/arm64/rockchip/rk3399_emmcphy.c +++ b/sys/arm64/rockchip/rk3399_emmcphy.c @@ -1,337 +1,337 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Ganbold Tsagaankhuu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Rockchip RK3399 eMMC PHY */ #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include "syscon_if.h" #define GRF_EMMCPHY_BASE 0xf780 #define GRF_EMMCPHY_CON0 (GRF_EMMCPHY_BASE + 0x00) #define PHYCTRL_FRQSEL (1 << 13) | (1 << 12) #define PHYCTRL_FRQSEL_200M 0 #define PHYCTRL_FRQSEL_50M 1 #define PHYCTRL_FRQSEL_100M 2 #define PHYCTRL_FRQSEL_150M 3 #define PHYCTRL_OTAPDLYENA (1 << 11) #define PHYCTRL_OTAPDLYSEL (1 << 10) | (1 << 9) | (1 << 8) | (1 << 7) #define PHYCTRL_ITAPCHGWIN (1 << 6) #define PHYCTRL_ITAPDLYSEL (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) | \ (1 << 1) #define PHYCTRL_ITAPDLYENA (1 << 0) #define GRF_EMMCPHY_CON1 (GRF_EMMCPHY_BASE + 0x04) #define PHYCTRL_CLKBUFSEL (1 << 8) | (1 << 7) | (1 << 6) #define PHYCTRL_SELDLYTXCLK (1 << 5) #define PHYCTRL_SELDLYRXCLK (1 << 4) #define PHYCTRL_STRBSEL 0xf #define GRF_EMMCPHY_CON2 (GRF_EMMCPHY_BASE + 0x08) #define PHYCTRL_REN_STRB (1 << 9) #define PHYCTRL_REN_CMD (1 << 8) #define PHYCTRL_REN_DAT 0xff #define GRF_EMMCPHY_CON3 (GRF_EMMCPHY_BASE + 0x0c) #define PHYCTRL_PU_STRB (1 << 9) #define PHYCTRL_PU_CMD (1 << 8) #define PHYCTRL_PU_DAT 0xff #define GRF_EMMCPHY_CON4 (GRF_EMMCPHY_BASE + 0x10) #define PHYCTRL_OD_RELEASE_CMD (1 << 9) #define PHYCTRL_OD_RELEASE_STRB (1 << 8) #define PHYCTRL_OD_RELEASE_DAT 0xff #define GRF_EMMCPHY_CON5 (GRF_EMMCPHY_BASE + 0x14) #define PHYCTRL_ODEN_STRB (1 << 9) #define PHYCTRL_ODEN_CMD (1 << 8) #define PHYCTRL_ODEN_DAT 0xff #define GRF_EMMCPHY_CON6 (GRF_EMMCPHY_BASE + 0x18) #define PHYCTRL_DLL_TRM_ICP (1 << 12) | (1 << 11) | (1 << 10) | (1 << 9) #define PHYCTRL_EN_RTRIM (1 << 8) #define PHYCTRL_RETRIM (1 << 7) #define PHYCTRL_DR_TY (1 << 6) | (1 << 5) | (1 << 4) #define PHYCTRL_RETENB (1 << 3) #define PHYCTRL_RETEN (1 << 2) #define PHYCTRL_ENDLL (1 << 1) #define PHYCTRL_PDB (1 << 0) #define GRF_EMMCPHY_STATUS (GRF_EMMCPHY_BASE + 0x20) #define PHYCTRL_CALDONE (1 << 6) #define PHYCTRL_DLLRDY (1 << 5) #define PHYCTRL_RTRIM (1 << 4) | (1 << 3) | (1 << 2) | (1 << 1) #define PHYCTRL_EXR_NINST (1 << 0) static struct ofw_compat_data compat_data[] = { { "rockchip,rk3399-emmc-phy", 1 }, { NULL, 0 } }; struct rk_emmcphy_softc { struct syscon *syscon; struct rk_emmcphy_conf *phy_conf; clk_t clk; }; #define LOWEST_SET_BIT(mask) ((((mask) - 1) & (mask)) ^ (mask)) #define SHIFTIN(x, mask) ((x) * LOWEST_SET_BIT(mask)) /* Phy class and methods. */ static int rk_emmcphy_enable(struct phynode *phynode, bool enable); static phynode_method_t rk_emmcphy_phynode_methods[] = { PHYNODEMETHOD(phynode_enable, rk_emmcphy_enable), PHYNODEMETHOD_END }; DEFINE_CLASS_1(rk_emmcphy_phynode, rk_emmcphy_phynode_class, rk_emmcphy_phynode_methods, 0, phynode_class); static int rk_emmcphy_enable(struct phynode *phynode, bool enable) { struct rk_emmcphy_softc *sc; device_t dev; intptr_t phy; uint64_t rate, frqsel; uint32_t mask, val; int error; dev = phynode_get_device(phynode); phy = phynode_get_id(phynode); sc = device_get_softc(dev); if (bootverbose) device_printf(dev, "Phy id: %ld\n", phy); if (phy != 0) { device_printf(dev, "Unknown phy: %ld\n", phy); return (ERANGE); } if (enable) { /* Drive strength */ mask = PHYCTRL_DR_TY; val = SHIFTIN(0, PHYCTRL_DR_TY); SYSCON_WRITE_4(sc->syscon, GRF_EMMCPHY_CON6, (mask << 16) | val); /* Enable output tap delay */ mask = PHYCTRL_OTAPDLYENA | PHYCTRL_OTAPDLYSEL; val = PHYCTRL_OTAPDLYENA | SHIFTIN(4, PHYCTRL_OTAPDLYSEL); SYSCON_WRITE_4(sc->syscon, GRF_EMMCPHY_CON0, (mask << 16) | val); } /* Power down PHY and disable DLL before making changes */ mask = PHYCTRL_ENDLL | PHYCTRL_PDB; val = 0; SYSCON_WRITE_4(sc->syscon, GRF_EMMCPHY_CON6, (mask << 16) | val); if (enable == false) return (0); sc->phy_conf = (struct rk_emmcphy_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; /* Get clock */ error = clk_get_by_ofw_name(dev, 0, "emmcclk", &sc->clk); if (error != 0) { device_printf(dev, "cannot get emmcclk clock, continue\n"); sc->clk = NULL; } else device_printf(dev, "got emmcclk clock\n"); if (sc->clk) { error = clk_get_freq(sc->clk, &rate); if (error != 0) { device_printf(dev, "cannot get clock frequency\n"); return (ENXIO); } } else rate = 0; if (rate != 0) { if (rate < 75000000) frqsel = PHYCTRL_FRQSEL_50M; else if (rate < 125000000) frqsel = PHYCTRL_FRQSEL_100M; else if (rate < 175000000) frqsel = PHYCTRL_FRQSEL_150M; else frqsel = PHYCTRL_FRQSEL_200M; } else frqsel = PHYCTRL_FRQSEL_200M; DELAY(3); /* Power up PHY */ mask = PHYCTRL_PDB; val = PHYCTRL_PDB; SYSCON_WRITE_4(sc->syscon, GRF_EMMCPHY_CON6, (mask << 16) | val); /* Wait for calibration */ DELAY(10); val = SYSCON_READ_4(sc->syscon, GRF_EMMCPHY_STATUS); if ((val & PHYCTRL_CALDONE) == 0) { device_printf(dev, "PHY calibration did not complete\n"); return (ENXIO); } /* Set DLL frequency */ mask = PHYCTRL_FRQSEL; val = SHIFTIN(frqsel, PHYCTRL_FRQSEL); SYSCON_WRITE_4(sc->syscon, GRF_EMMCPHY_CON0, (mask << 16) | val); /* Enable DLL */ mask = PHYCTRL_ENDLL; val = PHYCTRL_ENDLL; SYSCON_WRITE_4(sc->syscon, GRF_EMMCPHY_CON6, (mask << 16) | val); if (rate != 0) { /* * Rockchip RK3399 TRM V1.3 Part2.pdf says in page 698: * After the DLL control loop reaches steady state a DLL * ready signal is generated by the DLL circuits * 'phyctrl_dllrdy'. * The time from 'phyctrl_endll' to DLL ready signal * 'phyctrl_dllrdy' varies with the clock frequency. * At 200MHz clock frequency the DLL ready delay is 2.56us, * at 100MHz clock frequency the DLL ready delay is 5.112us and * at 50 MHz clock frequency the DLL ready delay is 10.231us. * We could use safe values for wait, 12us, 8us, 6us and 4us * respectively. * However due to some unknown reason it is not working and * DLL seems to take extra long time to lock. * So we will use more safe value 50ms here. */ /* Wait for DLL ready */ DELAY(50000); val = SYSCON_READ_4(sc->syscon, GRF_EMMCPHY_STATUS); if ((val & PHYCTRL_DLLRDY) == 0) { device_printf(dev, "DLL loop failed to lock\n"); return (ENXIO); } } return (0); } static int rk_emmcphy_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Rockchip RK3399 eMMC PHY"); return (BUS_PROBE_DEFAULT); } static int rk_emmcphy_attach(device_t dev) { struct phynode_init_def phy_init; struct phynode *phynode; struct rk_emmcphy_softc *sc; phandle_t node; phandle_t xnode; pcell_t handle; intptr_t phy; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); if (OF_getencprop(node, "clocks", (void *)&handle, sizeof(handle)) <= 0) { device_printf(dev, "cannot get clocks handle\n"); return (ENXIO); } xnode = OF_node_from_xref(handle); if (OF_hasprop(xnode, "arasan,soc-ctl-syscon") && syscon_get_by_ofw_property(dev, xnode, "arasan,soc-ctl-syscon", &sc->syscon) != 0) { device_printf(dev, "cannot get grf driver handle\n"); return (ENXIO); } if (sc->syscon == NULL) { device_printf(dev, "failed to get syscon\n"); return (ENXIO); } /* Create and register phy */ bzero(&phy_init, sizeof(phy_init)); phy_init.id = 0; phy_init.ofw_node = ofw_bus_get_node(dev); phynode = phynode_create(dev, &rk_emmcphy_phynode_class, &phy_init); if (phynode == NULL) { device_printf(dev, "failed to create eMMC PHY\n"); return (ENXIO); } if (phynode_register(phynode) == NULL) { device_printf(dev, "failed to register eMMC PHY\n"); return (ENXIO); } if (bootverbose) { phy = phynode_get_id(phynode); device_printf(dev, "Attached phy id: %ld\n", phy); } return (0); } static device_method_t rk_emmcphy_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk_emmcphy_probe), DEVMETHOD(device_attach, rk_emmcphy_attach), DEVMETHOD_END }; static driver_t rk_emmcphy_driver = { "rk_emmcphy", rk_emmcphy_methods, sizeof(struct rk_emmcphy_softc) }; EARLY_DRIVER_MODULE(rk_emmcphy, simplebus, rk_emmcphy_driver, 0, 0, BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(rk_emmcphy, 1); diff --git a/sys/arm64/rockchip/rk3568_combphy.c b/sys/arm64/rockchip/rk3568_combphy.c index d89e3b5f9973..f9ba38e663ad 100644 --- a/sys/arm64/rockchip/rk3568_combphy.c +++ b/sys/arm64/rockchip/rk3568_combphy.c @@ -1,469 +1,469 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021, 2022 Soren Schmidt * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include "syscon_if.h" #include "phydev_if.h" #include "phynode_if.h" static struct ofw_compat_data compat_data[] = { {"rockchip,rk3568-naneng-combphy", 1}, {NULL, 0} }; struct rk3568_combphy_softc { device_t dev; phandle_t node; struct resource *mem; struct phynode *phynode; struct syscon *pipe_grf; struct syscon *pipe_phy_grf; clk_t ref_clk; clk_t apb_clk; clk_t pipe_clk; hwreset_t phy_reset; int mode; }; #define PHYREG6 0x14 #define PHYREG6_PLL_DIV_MASK 0xc0 #define PHYREG6_PLL_DIV_2 (1 << 6) #define PHYREG7 0x18 #define PHYREG7_TX_RTERM_50OHM (8 << 4) #define PHYREG7_RX_RTERM_44OHM (15 << 0) #define PHYREG8 0x1c #define PHYREG8_SSC_EN 0x10 #define PHYREG11 0x28 #define PHYREG11_SU_TRIM_0_7 0xf0 #define PHYREG12 0x2c #define PHYREG12_PLL_LPF_ADJ_VALUE 4 #define PHYREG15 0x38 #define PHYREG15_CTLE_EN 0x01 #define PHYREG15_SSC_CNT_MASK 0xc0 #define PHYREG15_SSC_CNT_VALUE (1 << 6) #define PHYREG16 0x3c #define PHYREG16_SSC_CNT_VALUE 0x5f #define PHYREG18 0x44 #define PHYREG18_PLL_LOOP 0x32 #define PHYREG32 0x7c #define PHYREG32_SSC_MASK 0xf0 #define PHYREG32_SSC_UPWARD (0 << 4) #define PHYREG32_SSC_DOWNWARD (1 << 4) #define PHYREG32_SSC_OFFSET_500PPM (1 << 6) #define PHYREG33 0x80 #define PHYREG33_PLL_KVCO_MASK 0x1c #define PHYREG33_PLL_KVCO_VALUE (2 << 2) #define PIPE_MASK_ALL (0xffff << 16) #define PIPE_PHY_GRF_PIPE_CON0 0x00 #define PIPE_DATABUSWIDTH_MASK 0x3 #define PIPE_DATABUSWIDTH_32BIT 0 #define PIPE_DATABUSWIDTH_16BIT 1 #define PIPE_PHYMODE_MASK (3 << 2) #define PIPE_PHYMODE_PCIE (0 << 2) #define PIPE_PHYMODE_USB3 (1 << 2) #define PIPE_PHYMODE_SATA (2 << 2) #define PIPE_RATE_MASK (3 << 4) #define PIPE_RATE_PCIE_2_5GBPS (0 << 4) #define PIPE_RATE_PCIE_5GBPS (1 << 4) #define PIPE_RATE_USB3_5GBPS (0 << 4) #define PIPE_RATE_SATA_1GBPS5 (0 << 4) #define PIPE_RATE_SATA_3GBPS (1 << 4) #define PIPE_RATE_SATA_6GBPS (2 << 4) #define PIPE_MAC_PCLKREQ_N (1 << 8) #define PIPE_L1SUB_ENTREQ (1 << 9) #define PIPE_RXTERM (1 << 12) #define PIPE_PHY_GRF_PIPE_CON1 0x04 #define PHY_CLK_SEL_MASK (3 << 13) #define PHY_CLK_SEL_24M (0 << 13) #define PHY_CLK_SEL_25M (1 << 13) #define PHY_CLK_SEL_100M (2 << 13) #define PIPE_PHY_GRF_PIPE_CON2 0x08 #define SEL_PIPE_TXCOMPLIANCE_I (1 << 15) #define SEL_PIPE_TXELECIDLE (1 << 12) #define SEL_PIPE_RXTERM (1 << 8) #define SEL_PIPE_BYPASS_CODEC (1 << 7) #define SEL_PIPE_PIPE_EBUF (1 << 6) #define SEL_PIPE_PIPE_PHYMODE (1 << 1) #define SEL_PIPE_DATABUSWIDTH (1 << 0) #define PIPE_PHY_GRF_PIPE_CON3 0x0c #define PIPE_SEL_MASK (3 << 13) #define PIPE_SEL_PCIE (0 << 13) #define PIPE_SEL_USB3 (1 << 13) #define PIPE_SEL_SATA (2 << 13) #define PIPE_CLK_REF_SRC_I_MASK (3 << 8) #define PIPE_CLK_REF_SRC_I_PLL_CKREF_INNER (2 << 8) #define PIPE_RXELECIDLE (1 << 10) #define PIPE_FROM_PCIE_IO (1 << 11) #define PIPE_GRF_PIPE_CON0 0x00 #define SATA2_PHY_SPDMODE_1GBPS5 (0 << 12) #define SATA2_PHY_SPDMODE_3GBPS (1 << 12) #define SATA2_PHY_SPDMODE_6GBPS (2 << 12) #define SATA1_PHY_SPDMODE_1GBPS5 (0 << 8) #define SATA1_PHY_SPDMODE_3GBPS (1 << 8) #define SATA1_PHY_SPDMODE_6GBPS (2 << 8) #define SATA0_PHY_SPDMODE_1GBPS5 (0 << 4) #define SATA0_PHY_SPDMODE_3GBPS (1 << 4) #define SATA0_PHY_SPDMODE_6GBPS (2 << 4) #define PIPE_GRF_SATA_CON0 0x10 #define PIPE_GRF_SATA_CON1 0x14 #define PIPE_GRF_SATA_CON2 0x18 #define PIPE_GRF_XPCS_CON0 0x40 /* PHY class and methods */ static int rk3568_combphy_enable(struct phynode *phynode, bool enable) { device_t dev = phynode_get_device(phynode); struct rk3568_combphy_softc *sc = device_get_softc(dev); uint64_t rate; if (enable == false) return (0); switch (sc->mode) { case PHY_TYPE_SATA: device_printf(dev, "configuring for SATA"); /* tx_rterm 50 ohm & rx_rterm 44 ohm */ bus_write_4(sc->mem, PHYREG7, PHYREG7_TX_RTERM_50OHM | PHYREG7_RX_RTERM_44OHM); /* Adaptive CTLE */ bus_write_4(sc->mem, PHYREG15, bus_read_4(sc->mem, PHYREG15) | PHYREG15_CTLE_EN); /* config grf_pipe for PCIe */ SYSCON_WRITE_4(sc->pipe_phy_grf, PIPE_PHY_GRF_PIPE_CON3, PIPE_MASK_ALL | PIPE_SEL_SATA | PIPE_RXELECIDLE | 0x7); SYSCON_WRITE_4(sc->pipe_phy_grf, PIPE_PHY_GRF_PIPE_CON2, PIPE_MASK_ALL | SEL_PIPE_TXCOMPLIANCE_I | SEL_PIPE_DATABUSWIDTH | 0xc3); SYSCON_WRITE_4(sc->pipe_phy_grf, PIPE_PHY_GRF_PIPE_CON0, PIPE_MASK_ALL | PIPE_RXTERM | PIPE_DATABUSWIDTH_16BIT | PIPE_RATE_SATA_3GBPS | PIPE_PHYMODE_SATA); SYSCON_WRITE_4(sc->pipe_grf, PIPE_GRF_PIPE_CON0, PIPE_MASK_ALL | SATA0_PHY_SPDMODE_6GBPS | SATA1_PHY_SPDMODE_6GBPS | SATA2_PHY_SPDMODE_6GBPS); break; case PHY_TYPE_PCIE: device_printf(dev, "configuring for PCIe"); /* Set SSC downward spread spectrum */ bus_write_4(sc->mem, PHYREG32, (bus_read_4(sc->mem, PHYREG32) & PHYREG32_SSC_MASK) | PHYREG32_SSC_DOWNWARD); /* config grf_pipe for PCIe */ SYSCON_WRITE_4(sc->pipe_phy_grf, PIPE_PHY_GRF_PIPE_CON3, PIPE_MASK_ALL | PIPE_SEL_PCIE | PIPE_CLK_REF_SRC_I_PLL_CKREF_INNER); SYSCON_WRITE_4(sc->pipe_phy_grf, PIPE_PHY_GRF_PIPE_CON2, PIPE_MASK_ALL | SEL_PIPE_RXTERM | SEL_PIPE_DATABUSWIDTH); SYSCON_WRITE_4(sc->pipe_phy_grf, PIPE_PHY_GRF_PIPE_CON0, PIPE_MASK_ALL | PIPE_RXTERM | PIPE_DATABUSWIDTH_32BIT | PIPE_RATE_PCIE_2_5GBPS | PIPE_PHYMODE_PCIE); break; case PHY_TYPE_USB3: device_printf(dev, "configuring for USB3"); /* Set SSC downward spread spectrum */ bus_write_4(sc->mem, PHYREG32, (bus_read_4(sc->mem, PHYREG32) & PHYREG32_SSC_MASK) | PHYREG32_SSC_DOWNWARD); /* Adaptive CTLE */ bus_write_4(sc->mem, PHYREG15, bus_read_4(sc->mem, PHYREG15) | PHYREG15_CTLE_EN); /* Set PLL KVCO fine tuning signals */ bus_write_4(sc->mem, PHYREG33, (bus_read_4(sc->mem, PHYREG33) & PHYREG33_PLL_KVCO_MASK) | PHYREG33_PLL_KVCO_VALUE); /* Enable controlling random jitter. */ bus_write_4(sc->mem, PHYREG12, PHYREG12_PLL_LPF_ADJ_VALUE); /* Set PLL input clock divider 1/2 */ bus_write_4(sc->mem, PHYREG6, (bus_read_4(sc->mem, PHYREG6) & PHYREG6_PLL_DIV_MASK) | PHYREG6_PLL_DIV_2); /* Set PLL loop divider */ bus_write_4(sc->mem, PHYREG18, PHYREG18_PLL_LOOP); /* Set PLL LPF R1 to su_trim[0:7] */ bus_write_4(sc->mem, PHYREG11, PHYREG11_SU_TRIM_0_7); /* config grf_pipe for USB3 */ SYSCON_WRITE_4(sc->pipe_phy_grf, PIPE_PHY_GRF_PIPE_CON3, PIPE_MASK_ALL | PIPE_SEL_USB3); SYSCON_WRITE_4(sc->pipe_phy_grf, PIPE_PHY_GRF_PIPE_CON2, PIPE_MASK_ALL); SYSCON_WRITE_4(sc->pipe_phy_grf, PIPE_PHY_GRF_PIPE_CON0, PIPE_MASK_ALL | PIPE_DATABUSWIDTH_16BIT | PIPE_PHYMODE_USB3 | PIPE_RATE_USB3_5GBPS); break; default: printf("Unsupported mode=%d\n", sc->mode); return (-1); } clk_get_freq(sc->ref_clk, &rate); printf(" ref_clk=%lu\n", rate); switch (rate) { case 24000000: SYSCON_WRITE_4(sc->pipe_phy_grf, PIPE_PHY_GRF_PIPE_CON1, (PHY_CLK_SEL_MASK << 16) | PHY_CLK_SEL_24M); if (sc->mode == PHY_TYPE_USB3 || sc->mode == PHY_TYPE_SATA) { /* Adaptive CTLE */ bus_write_4(sc->mem, PHYREG15, (bus_read_4(sc->mem, PHYREG15) & PHYREG15_SSC_CNT_MASK) | PHYREG15_SSC_CNT_VALUE); /* SSC control period */ bus_write_4(sc->mem, PHYREG16, PHYREG16_SSC_CNT_VALUE); } break; case 25000000: SYSCON_WRITE_4(sc->pipe_phy_grf, PIPE_PHY_GRF_PIPE_CON1, (PHY_CLK_SEL_MASK << 16) | PHY_CLK_SEL_25M); break; case 100000000: SYSCON_WRITE_4(sc->pipe_phy_grf, PIPE_PHY_GRF_PIPE_CON1, (PHY_CLK_SEL_MASK << 16) | PHY_CLK_SEL_100M); if (sc->mode == PHY_TYPE_PCIE) { /* Set PLL KVCO fine tuning signals */ bus_write_4(sc->mem, PHYREG33, (bus_read_4(sc->mem, PHYREG33) & PHYREG33_PLL_KVCO_MASK) | PHYREG33_PLL_KVCO_VALUE); /* Enable controlling random jitter. */ bus_write_4(sc->mem, PHYREG12, PHYREG12_PLL_LPF_ADJ_VALUE); /* Set PLL input clock divider 1/2 */ bus_write_4(sc->mem, PHYREG6, (bus_read_4(sc->mem, PHYREG6) & PHYREG6_PLL_DIV_MASK) | PHYREG6_PLL_DIV_2); /* Set PLL loop divider */ bus_write_4(sc->mem, PHYREG18, PHYREG18_PLL_LOOP); /* Set PLL LPF R1 to su_trim[0:7] */ bus_write_4(sc->mem, PHYREG11, PHYREG11_SU_TRIM_0_7); } if (sc->mode == PHY_TYPE_SATA) { /* Set SSC downward spread spectrum */ bus_write_4(sc->mem, PHYREG32, (bus_read_4(sc->mem, PHYREG32) & ~0x000000f0) | PHYREG32_SSC_DOWNWARD | PHYREG32_SSC_OFFSET_500PPM); } break; default: device_printf(dev, "unknown ref rate=%lu\n", rate); break; } if (OF_hasprop(sc->node, "rockchip,ext-refclk")) { device_printf(dev, "UNSUPPORTED rockchip,ext-refclk\n"); } if (OF_hasprop(sc->node, "rockchip,enable-ssc")) { device_printf(dev, "setting rockchip,enable-ssc\n"); bus_write_4(sc->mem, PHYREG8, bus_read_4(sc->mem, PHYREG8) | PHYREG8_SSC_EN); } if (hwreset_deassert(sc->phy_reset)) device_printf(dev, "phy_reset failed to clear\n"); return (0); } static phynode_method_t rk3568_combphy_phynode_methods[] = { PHYNODEMETHOD(phynode_enable, rk3568_combphy_enable), PHYNODEMETHOD_END }; DEFINE_CLASS_1(rk3568_combphy_phynode, rk3568_combphy_phynode_class, rk3568_combphy_phynode_methods, 0, phynode_class); /* Device class and methods */ static int rk3568_combphy_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "RockChip combo PHY"); return (BUS_PROBE_DEFAULT); } static int rk3568_combphy_attach(device_t dev) { struct rk3568_combphy_softc *sc = device_get_softc(dev); struct phynode_init_def phy_init; struct phynode *phynode; int rid = 0; sc->dev = dev; sc->node = ofw_bus_get_node(dev); /* Get memory resource */ if (!(sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE))) { device_printf(dev, "Cannot allocate memory resources\n"); return (ENXIO); } /* Get syncons handles */ if (OF_hasprop(sc->node, "rockchip,pipe-grf") && syscon_get_by_ofw_property(dev, sc->node, "rockchip,pipe-grf", &sc->pipe_grf)) return (ENXIO); if (OF_hasprop(sc->node, "rockchip,pipe-phy-grf") && syscon_get_by_ofw_property(dev, sc->node, "rockchip,pipe-phy-grf", &sc->pipe_phy_grf)) return (ENXIO); /* Get & enable clocks */ if (clk_get_by_ofw_name(dev, 0, "ref", &sc->ref_clk)) { device_printf(dev, "getting ref failed\n"); return (ENXIO); } if (clk_enable(sc->ref_clk)) device_printf(dev, "enable ref failed\n"); if (clk_get_by_ofw_name(dev, 0, "apb", &sc->apb_clk)) { device_printf(dev, "getting apb failed\n"); return (ENXIO); } if (clk_enable(sc->apb_clk)) device_printf(dev, "enable apb failed\n"); if (clk_get_by_ofw_name(dev, 0, "pipe", &sc->pipe_clk)) { device_printf(dev, "getting pipe failed\n"); return (ENXIO); } if (clk_enable(sc->pipe_clk)) device_printf(dev, "enable pipe failed\n"); /* get & assert reset */ if (hwreset_get_by_ofw_idx(dev, sc->node, 0, &sc->phy_reset)) { device_printf(dev, "Cannot get reset\n"); return (ENXIO); } hwreset_assert(sc->phy_reset); bzero(&phy_init, sizeof(phy_init)); phy_init.id = 0; phy_init.ofw_node = sc->node; if (!(phynode = phynode_create(dev, &rk3568_combphy_phynode_class, &phy_init))) { device_printf(dev, "failed to create combphy PHY\n"); return (ENXIO); } if (!phynode_register(phynode)) { device_printf(dev, "failed to register combphy PHY\n"); return (ENXIO); } sc->phynode = phynode; sc->mode = 0; return (0); } static int rk3568_combphy_map(device_t dev, phandle_t xref, int ncells, pcell_t *cells, intptr_t *id) { struct rk3568_combphy_softc *sc = device_get_softc(dev); if (phydev_default_ofw_map(dev, xref, ncells, cells, id)) return (ERANGE); /* Store the phy mode that is handed to us in id */ sc->mode = *id; /* Set our id to 0 so the std phy_get_*() works as usual */ *id = 0; return (0); } static device_method_t rk3568_combphy_methods[] = { DEVMETHOD(device_probe, rk3568_combphy_probe), DEVMETHOD(device_attach, rk3568_combphy_attach), DEVMETHOD(phydev_map, rk3568_combphy_map), DEVMETHOD_END }; DEFINE_CLASS_1(rk3568_combphy, rk3568_combphy_driver, rk3568_combphy_methods, sizeof(struct simple_mfd_softc), simple_mfd_driver); EARLY_DRIVER_MODULE(rk3568_combphy, simplebus, rk3568_combphy_driver, 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_LATE); diff --git a/sys/arm64/rockchip/rk3568_pcie.c b/sys/arm64/rockchip/rk3568_pcie.c index 525b1356ae2e..b8ad5b8a33f5 100644 --- a/sys/arm64/rockchip/rk3568_pcie.c +++ b/sys/arm64/rockchip/rk3568_pcie.c @@ -1,397 +1,397 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021, 2022 Soren Schmidt * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" /* APB Registers */ #define PCIE_CLIENT_GENERAL_CON 0x0000 #define DEVICE_TYPE_MASK 0x00f0 #define DEVICE_TYPE_RC (1<<6) #define LINK_REQ_RST_GRT (1<<3) #define LTSSM_ENABLE (1<<2) #define PCIE_CLIENT_INTR_MASK_MSG_RX 0x0018 #define PCIE_CLIENT_INTR_MASK_LEGACY 0x001c #define PCIE_CLIENT_INTR_MASK_ERR 0x0020 #define PCIE_CLIENT_INTR_MASK_MISC 0x0024 #define PCIE_CLIENT_INTR_MASK_PMC 0x0028 #define PCIE_CLIENT_GENERAL_DEBUG_INFO 0x0104 #define PCIE_CLIENT_HOT_RESET_CTRL 0x0180 #define APP_LSSTM_ENABLE_ENHANCE (1<<4) #define PCIE_CLIENT_LTSSM_STATUS 0x0300 #define RDLH_LINK_UP (1<<17) #define SMLH_LINK_UP (1<<16) #define SMLH_LTSSM_STATE_MASK 0x003f #define SMLH_LTSSM_STATE_LINK_UP ((1<<4) | (1<<0)) struct rk3568_pcie_softc { struct pci_dw_softc dw_sc; /* Must be first */ device_t dev; int apb_rid; struct resource *apb_res; int dbi_rid; struct resource *dbi_res; int irq_rid; struct resource *irq_res; void *irq_handle; phandle_t node; struct gpiobus_pin *reset_gpio; clk_t aclk_mst, aclk_slv, aclk_dbi, pclk, aux; regulator_t regulator; hwreset_t hwreset; phy_t phy; }; static struct ofw_compat_data compat_data[] = { {"rockchip,rk3568-pcie", 1}, {NULL, 0} }; static void rk3568_intr(void *data) { struct rk3568_pcie_softc *sc = data; device_printf(sc->dev, "INTERRUPT!!\n"); } static int rk3568_pcie_get_link(device_t dev, bool *status) { struct rk3568_pcie_softc *sc = device_get_softc(dev); uint32_t val; val = bus_read_4(sc->apb_res, PCIE_CLIENT_LTSSM_STATUS); if (((val & (RDLH_LINK_UP | SMLH_LINK_UP)) == (RDLH_LINK_UP | SMLH_LINK_UP)) && ((val & SMLH_LTSSM_STATE_MASK) == SMLH_LTSSM_STATE_LINK_UP)) *status = true; else *status = false; return (0); } static int rk3568_pcie_init_soc(device_t dev) { struct rk3568_pcie_softc *sc = device_get_softc(dev); int err, count; bool status; /* Assert reset */ if (hwreset_assert(sc->hwreset)) device_printf(dev, "Could not assert reset\n"); /* Powerup PCIe */ if (regulator_enable(sc->regulator)) device_printf(dev, "Cannot enable regulator\n"); /* Enable PHY */ if (phy_enable(sc->phy)) device_printf(dev, "Cannot enable phy\n"); /* Deassert reset */ if (hwreset_deassert(sc->hwreset)) device_printf(dev, "Could not deassert reset\n"); /* Enable clocks */ if ((err = clk_enable(sc->aclk_mst))) { device_printf(dev, "Could not enable aclk_mst clk\n"); return (ENXIO); } if ((err = clk_enable(sc->aclk_slv))) { device_printf(dev, "Could not enable aclk_slv clk\n"); return (ENXIO); } if ((err = clk_enable(sc->aclk_dbi))) { device_printf(dev, "Could not enable aclk_dbi clk\n"); return (ENXIO); } if ((err = clk_enable(sc->pclk))) { device_printf(dev, "Could not enable pclk clk\n"); return (ENXIO); } if ((err = clk_enable(sc->aux))) { device_printf(dev, "Could not enable aux clk\n"); return (ENXIO); } /* Set Root Complex (RC) mode */ bus_write_4(sc->apb_res, PCIE_CLIENT_HOT_RESET_CTRL, (APP_LSSTM_ENABLE_ENHANCE << 16) | APP_LSSTM_ENABLE_ENHANCE); bus_write_4(sc->apb_res, PCIE_CLIENT_GENERAL_CON, (DEVICE_TYPE_MASK << 16) | DEVICE_TYPE_RC); /* Assert reset PCIe */ if ((err = gpio_pin_set_active(sc->reset_gpio, false))) device_printf(dev, "reset_gpio set failed\n"); /* Start Link Training and Status State Machine (LTSSM) */ bus_write_4(sc->apb_res, PCIE_CLIENT_GENERAL_CON, (LINK_REQ_RST_GRT | LTSSM_ENABLE) << 16 | (LINK_REQ_RST_GRT | LTSSM_ENABLE)); DELAY(100000); /* Release reset */ if ((err = gpio_pin_set_active(sc->reset_gpio, true))) device_printf(dev, "reset_gpio release failed\n"); /* Wait for link up/stable */ for (count = 20; count; count--) { rk3568_pcie_get_link(dev, &status); if (status) break; DELAY(100000); if (count == 0) { device_printf(dev, "Link up timeout!\n"); return (ENXIO); } } if ((err = pci_dw_init(dev))) return (ENXIO); /* Delay to have things settle */ DELAY(100000); /* Enable all MSG interrupts */ bus_write_4(sc->apb_res, PCIE_CLIENT_INTR_MASK_MSG_RX, 0x7fff0000); /* Enable all Legacy interrupts */ bus_write_4(sc->apb_res, PCIE_CLIENT_INTR_MASK_LEGACY, 0x00ff0000); /* Enable all Error interrupts */ bus_write_4(sc->apb_res, PCIE_CLIENT_INTR_MASK_ERR, 0x0fff0000); return (0); } static int rk3568_pcie_detach(device_t dev) { struct rk3568_pcie_softc *sc = device_get_softc(dev); /* Release allocated resources */ if (sc->irq_handle) bus_teardown_intr(dev, sc->irq_res, sc->irq_handle); if (sc->phy) phy_release(sc->phy); if (sc->aux) clk_release(sc->aux); if (sc->pclk) clk_release(sc->pclk); if (sc->aclk_dbi) clk_release(sc->aclk_dbi); if (sc->aclk_slv) clk_release(sc->aclk_slv); if (sc->aclk_mst) clk_release(sc->aclk_mst); if (sc->hwreset) hwreset_release(sc->hwreset); if (sc->regulator) regulator_release(sc->regulator); if (sc->irq_res) bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); if (sc->dbi_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->dbi_rid, sc->dbi_res); if (sc->apb_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->apb_rid, sc->apb_res); return (0); } static int rk3568_pcie_attach(device_t dev) { struct rk3568_pcie_softc *sc = device_get_softc(dev); int error; sc->dev = dev; sc->node = ofw_bus_get_node(dev); /* Setup resources */ if ((error = ofw_bus_find_string_index(sc->node, "reg-names", "apb", &sc->apb_rid))) { device_printf(dev, "Cannot get APB memory: %d\n", error); goto fail; } if (!(sc->apb_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->apb_rid, RF_ACTIVE))) { device_printf(dev, "Cannot allocate APB resource\n"); goto fail; } if ((error = ofw_bus_find_string_index(sc->node, "reg-names", "dbi", &sc->dbi_rid))) { device_printf(dev, "Cannot get DBI memory: %d\n", error); goto fail; } if (!(sc->dw_sc.dbi_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->dbi_rid, RF_ACTIVE))) { device_printf(dev, "Cannot allocate DBI resource\n"); goto fail; } if (!(sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE | RF_SHAREABLE))) { device_printf(dev, "Cannot allocate IRQ resource\n"); goto fail; } /* Get regulator if present */ if (regulator_get_by_ofw_property(dev, 0, "vpcie3v3-supply", &sc->regulator)) { device_printf(dev, "Cannot get regulator\n"); goto fail; } /* Get reset */ if (hwreset_get_by_ofw_name(dev, 0, "pipe", &sc->hwreset)) { device_printf(dev, "Can not get reset\n"); goto fail; } /* Get GPIO reset */ if (OF_hasprop(sc->node, "reset-gpios")) { if (gpio_pin_get_by_ofw_property(dev, sc->node, "reset-gpios", &sc->reset_gpio)) { device_printf(dev, "Cannot get reset-gpios\n"); goto fail; } gpio_pin_setflags(sc->reset_gpio, GPIO_PIN_OUTPUT); gpio_pin_set_active(sc->reset_gpio, true); } /* Get clocks */ if (clk_get_by_ofw_name(dev, 0, "aclk_mst", &sc->aclk_mst)) { device_printf(dev, "Can not get aclk_mst clk\n"); goto fail; } if (clk_get_by_ofw_name(dev, 0, "aclk_slv", &sc->aclk_slv)) { device_printf(dev, "Can not get aclk_slv clk\n"); goto fail; } if (clk_get_by_ofw_name(dev, 0, "aclk_dbi", &sc->aclk_dbi)) { device_printf(dev, "Can not get aclk_dbi clk\n"); goto fail; } if (clk_get_by_ofw_name(dev, 0, "pclk", &sc->pclk)) { device_printf(dev, "Can not get pclk clk\n"); goto fail; } if (clk_get_by_ofw_name(dev, 0, "aux", &sc->aux)) { device_printf(dev, "Can not get aux clk\n"); goto fail; } /* Get PHY */ if (phy_get_by_ofw_name(dev, 0, "pcie-phy", &sc->phy)) { device_printf(dev, "Cannot get 'pcie-phy'\n"); goto fail; } if ((error = rk3568_pcie_init_soc(dev))) goto fail; /* Enable interrupt */ if ((bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, rk3568_intr, sc, &sc->irq_handle))) { device_printf(dev, "unable to setup interrupt\n"); goto fail; } return (bus_generic_attach(dev)); fail: rk3568_pcie_detach(dev); return (ENXIO); } static int rk3568_pcie_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "RockChip RK3568 PCI-express controller"); return (BUS_PROBE_DEFAULT); } static device_method_t rk3568_pcie_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk3568_pcie_probe), DEVMETHOD(device_attach, rk3568_pcie_attach), DEVMETHOD(device_detach, rk3568_pcie_detach), /* PCI DW interface */ DEVMETHOD(pci_dw_get_link, rk3568_pcie_get_link), DEVMETHOD_END }; DEFINE_CLASS_1(pcib, rk3568_pcie_driver, rk3568_pcie_methods, sizeof(struct rk3568_pcie_softc), pci_dw_driver); DRIVER_MODULE(rk3568_pcie, simplebus, rk3568_pcie_driver, NULL, NULL); diff --git a/sys/arm64/rockchip/rk3568_pciephy.c b/sys/arm64/rockchip/rk3568_pciephy.c index 91993bf69890..0f1aa5d280a8 100644 --- a/sys/arm64/rockchip/rk3568_pciephy.c +++ b/sys/arm64/rockchip/rk3568_pciephy.c @@ -1,263 +1,263 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021, 2022 Soren Schmidt * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include "syscon_if.h" #include "phydev_if.h" #include "phynode_if.h" #define GRF_PCIE30PHY_CON1 0x04 #define GRF_PCIE30PHY_CON4 0x10 #define GRF_PCIE30PHY_CON5 0x14 #define GRF_PCIE30PHY_CON6 0x18 #define GRF_BIFURCATION_LANE_1 0 #define GRF_BIFURCATION_LANE_2 1 #define GRF_PCIE30PHY_WR_EN (0xf << 16) #define GRF_PCIE30PHY_CON9 0x24 #define GRF_PCIE30PHY_DA_OCM_MASK (1 << (15 + 16)) #define GRF_PCIE30PHY_DA_OCM ((1 << 15) | GRF_PCIE30PHY_DA_OCM_MASK) #define GRF_PCIE30PHY_STATUS0 0x80 #define SRAM_INIT_DONE (1 << 14) static struct ofw_compat_data compat_data[] = { {"rockchip,rk3568-pcie3-phy", 1}, {NULL, 0} }; struct rk3568_pciephy_softc { device_t dev; phandle_t node; struct resource *mem; struct phynode *phynode; struct syscon *phy_grf; clk_t refclk_m; clk_t refclk_n; clk_t pclk; hwreset_t phy_reset; }; static void rk3568_pciephy_bifurcate(device_t dev, int control, uint32_t lane) { struct rk3568_pciephy_softc *sc = device_get_softc(dev); switch (lane) { case 0: SYSCON_WRITE_4(sc->phy_grf, control, GRF_PCIE30PHY_WR_EN); return; case 1: SYSCON_WRITE_4(sc->phy_grf, control, GRF_PCIE30PHY_WR_EN | GRF_BIFURCATION_LANE_1); break; case 2: SYSCON_WRITE_4(sc->phy_grf, control, GRF_PCIE30PHY_WR_EN | GRF_BIFURCATION_LANE_2); break; default: device_printf(dev, "Illegal lane %d\n", lane); return; } if (bootverbose) device_printf(dev, "lane %d @ pcie3x%d\n", lane, (control == GRF_PCIE30PHY_CON5) ? 1 : 2); } /* PHY class and methods */ static int rk3568_pciephy_enable(struct phynode *phynode, bool enable) { device_t dev = phynode_get_device(phynode); struct rk3568_pciephy_softc *sc = device_get_softc(dev); int count; if (enable) { /* Pull PHY out of reset */ hwreset_deassert(sc->phy_reset); /* Poll for SRAM loaded and ready */ for (count = 100; count; count--) { if (SYSCON_READ_4(sc->phy_grf, GRF_PCIE30PHY_STATUS0) & SRAM_INIT_DONE) break; DELAY(10000); if (count == 0) { device_printf(dev, "SRAM init timeout!\n"); return (ENXIO); } } } return (0); } static phynode_method_t rk3568_pciephy_phynode_methods[] = { PHYNODEMETHOD(phynode_enable, rk3568_pciephy_enable), PHYNODEMETHOD_END }; DEFINE_CLASS_1(rk3568_pciephy_phynode, rk3568_pciephy_phynode_class, rk3568_pciephy_phynode_methods, 0, phynode_class); /* Device class and methods */ static int rk3568_pciephy_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "RockChip PCIe PHY"); return (BUS_PROBE_DEFAULT); } static int rk3568_pciephy_attach(device_t dev) { struct rk3568_pciephy_softc *sc = device_get_softc(dev); struct phynode_init_def phy_init; struct phynode *phynode; uint32_t data_lanes[2] = { 0, 0 }; int rid = 0; sc->dev = dev; sc->node = ofw_bus_get_node(dev); /* Get memory resource */ if (!(sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE))) { device_printf(dev, "Cannot allocate memory resources\n"); return (ENXIO); } /* Get syncons handle */ if (OF_hasprop(sc->node, "rockchip,phy-grf") && syscon_get_by_ofw_property(dev, sc->node, "rockchip,phy-grf", &sc->phy_grf)) return (ENXIO); /* Get & enable clocks */ if (clk_get_by_ofw_name(dev, 0, "refclk_m", &sc->refclk_m)) { device_printf(dev, "getting refclk_m failed\n"); return (ENXIO); } if (clk_enable(sc->refclk_m)) device_printf(dev, "enable refclk_m failed\n"); if (clk_get_by_ofw_name(dev, 0, "refclk_n", &sc->refclk_n)) { device_printf(dev, "getting refclk_n failed\n"); return (ENXIO); } if (clk_enable(sc->refclk_n)) device_printf(dev, "enable refclk_n failed\n"); if (clk_get_by_ofw_name(dev, 0, "pclk", &sc->pclk)) { device_printf(dev, "getting pclk failed\n"); return (ENXIO); } if (clk_enable(sc->pclk)) device_printf(dev, "enable pclk failed\n"); /* Get & assert reset */ if (hwreset_get_by_ofw_idx(dev, sc->node, 0, &sc->phy_reset)) { device_printf(dev, "Cannot get reset\n"); } else hwreset_assert(sc->phy_reset); /* Set RC/EP mode not implemented yet (RC mode only) */ /* Set bifurcation according to "data-lanes" entry */ if (OF_hasprop(sc->node, "data-lanes")) { OF_getencprop(sc->node, "data-lanes", data_lanes, sizeof(data_lanes)); } else if (bootverbose) device_printf(dev, "lane 1 & 2 @pcie3x2\n"); /* Deassert PCIe PMA output clamp mode */ SYSCON_WRITE_4(sc->phy_grf, GRF_PCIE30PHY_CON9, GRF_PCIE30PHY_DA_OCM); /* Configure PHY HW accordingly */ rk3568_pciephy_bifurcate(dev, GRF_PCIE30PHY_CON5, data_lanes[0]); rk3568_pciephy_bifurcate(dev, GRF_PCIE30PHY_CON6, data_lanes[1]); if (data_lanes[0] || data_lanes[1]) SYSCON_WRITE_4(sc->phy_grf, GRF_PCIE30PHY_CON1, GRF_PCIE30PHY_DA_OCM); else SYSCON_WRITE_4(sc->phy_grf, GRF_PCIE30PHY_CON1, GRF_PCIE30PHY_DA_OCM_MASK); bzero(&phy_init, sizeof(phy_init)); phy_init.id = PHY_NONE; phy_init.ofw_node = sc->node; if (!(phynode = phynode_create(dev, &rk3568_pciephy_phynode_class, &phy_init))) { device_printf(dev, "failed to create pciephy PHY\n"); return (ENXIO); } if (!phynode_register(phynode)) { device_printf(dev, "failed to register pciephy PHY\n"); return (ENXIO); } sc->phynode = phynode; return (0); } static device_method_t rk3568_pciephy_methods[] = { DEVMETHOD(device_probe, rk3568_pciephy_probe), DEVMETHOD(device_attach, rk3568_pciephy_attach), DEVMETHOD_END }; DEFINE_CLASS_1(rk3568_pciephy, rk3568_pciephy_driver, rk3568_pciephy_methods, sizeof(struct simple_mfd_softc), simple_mfd_driver); EARLY_DRIVER_MODULE(rk3568_pciephy, simplebus, rk3568_pciephy_driver, 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_LATE); diff --git a/sys/arm64/rockchip/rk_gpio.c b/sys/arm64/rockchip/rk_gpio.c index cc50b5ce302b..2d3b4a03fe19 100644 --- a/sys/arm64/rockchip/rk_gpio.c +++ b/sys/arm64/rockchip/rk_gpio.c @@ -1,819 +1,819 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * Copyright (c) 2021 Soren Schmidt * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include "gpio_if.h" #include "pic_if.h" #include "fdt_pinctrl_if.h" enum gpio_regs { RK_GPIO_SWPORTA_DR = 1, /* Data register */ RK_GPIO_SWPORTA_DDR, /* Data direction register */ RK_GPIO_INTEN, /* Interrupt enable register */ RK_GPIO_INTMASK, /* Interrupt mask register */ RK_GPIO_INTTYPE_LEVEL, /* Interrupt level register */ RK_GPIO_INTTYPE_BOTH, /* Both rise and falling edge */ RK_GPIO_INT_POLARITY, /* Interrupt polarity register */ RK_GPIO_INT_STATUS, /* Interrupt status register */ RK_GPIO_INT_RAWSTATUS, /* Raw Interrupt status register */ RK_GPIO_DEBOUNCE, /* Debounce enable register */ RK_GPIO_PORTA_EOI, /* Clear interrupt register */ RK_GPIO_EXT_PORTA, /* External port register */ RK_GPIO_REGNUM }; #define RK_GPIO_LS_SYNC 0x60 /* Level sensitive syncronization enable register */ #define RK_GPIO_DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | \ GPIO_PIN_PULLUP | GPIO_PIN_PULLDOWN | GPIO_INTR_EDGE_BOTH | \ GPIO_INTR_EDGE_RISING | GPIO_INTR_EDGE_FALLING | \ GPIO_INTR_LEVEL_HIGH | GPIO_INTR_LEVEL_LOW) #define GPIO_FLAGS_PINCTRL GPIO_PIN_PULLUP | GPIO_PIN_PULLDOWN #define RK_GPIO_MAX_PINS 32 struct pin_cached { uint8_t is_gpio; uint32_t flags; }; struct rk_pin_irqsrc { struct intr_irqsrc isrc; uint32_t irq; uint32_t mode; }; struct rk_gpio_softc { device_t sc_dev; device_t sc_busdev; struct mtx sc_mtx; struct resource *sc_res[2]; bus_space_tag_t sc_bst; bus_space_handle_t sc_bsh; clk_t clk; device_t pinctrl; uint32_t swporta; uint32_t swporta_ddr; uint32_t version; struct pin_cached pin_cached[RK_GPIO_MAX_PINS]; uint8_t regs[RK_GPIO_REGNUM]; void *ihandle; struct rk_pin_irqsrc isrcs[RK_GPIO_MAX_PINS]; }; static struct ofw_compat_data compat_data[] = { {"rockchip,gpio-bank", 1}, {NULL, 0} }; static struct resource_spec rk_gpio_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; #define RK_GPIO_VERSION 0x78 #define RK_GPIO_TYPE_V1 0x00000000 #define RK_GPIO_TYPE_V2 0x01000c2b #define RK_GPIO_ISRC(sc, irq) (&(sc->isrcs[irq].isrc)) static int rk_gpio_detach(device_t dev); #define RK_GPIO_LOCK(_sc) mtx_lock_spin(&(_sc)->sc_mtx) #define RK_GPIO_UNLOCK(_sc) mtx_unlock_spin(&(_sc)->sc_mtx) #define RK_GPIO_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED) #define RK_GPIO_WRITE(_sc, _off, _val) \ bus_space_write_4(_sc->sc_bst, _sc->sc_bsh, _off, _val) #define RK_GPIO_READ(_sc, _off) \ bus_space_read_4(_sc->sc_bst, _sc->sc_bsh, _off) static int rk_gpio_read_bit(struct rk_gpio_softc *sc, int reg, int bit) { int offset = sc->regs[reg]; uint32_t value; if (sc->version == RK_GPIO_TYPE_V1) { value = RK_GPIO_READ(sc, offset); value >>= bit; } else { value = RK_GPIO_READ(sc, bit > 15 ? offset + 4 : offset); value >>= (bit % 16); } return (value & 1); } static void rk_gpio_write_bit(struct rk_gpio_softc *sc, int reg, int bit, int data) { int offset = sc->regs[reg]; uint32_t value; if (sc->version == RK_GPIO_TYPE_V1) { value = RK_GPIO_READ(sc, offset); if (data) value |= (1 << bit); else value &= ~(1 << bit); RK_GPIO_WRITE(sc, offset, value); } else { if (data) value = (1 << (bit % 16)); else value = 0; value |= (1 << ((bit % 16) + 16)); RK_GPIO_WRITE(sc, bit > 15 ? offset + 4 : offset, value); } } static uint32_t rk_gpio_read_4(struct rk_gpio_softc *sc, int reg) { int offset = sc->regs[reg]; uint32_t value; if (sc->version == RK_GPIO_TYPE_V1) value = RK_GPIO_READ(sc, offset); else value = (RK_GPIO_READ(sc, offset) & 0xffff) | (RK_GPIO_READ(sc, offset + 4) << 16); return (value); } static void rk_gpio_write_4(struct rk_gpio_softc *sc, int reg, uint32_t value) { int offset = sc->regs[reg]; if (sc->version == RK_GPIO_TYPE_V1) RK_GPIO_WRITE(sc, offset, value); else { RK_GPIO_WRITE(sc, offset, (value & 0xffff) | 0xffff0000); RK_GPIO_WRITE(sc, offset + 4, (value >> 16) | 0xffff0000); } } static int rk_gpio_intr(void *arg) { struct rk_gpio_softc *sc = (struct rk_gpio_softc *)arg;; struct trapframe *tf = curthread->td_intr_frame; uint32_t status; RK_GPIO_LOCK(sc); status = rk_gpio_read_4(sc, RK_GPIO_INT_STATUS); rk_gpio_write_4(sc, RK_GPIO_PORTA_EOI, status); RK_GPIO_UNLOCK(sc); while (status) { int pin = ffs(status) - 1; status &= ~(1 << pin); if (intr_isrc_dispatch(RK_GPIO_ISRC(sc, pin), tf)) { device_printf(sc->sc_dev, "Interrupt pin=%d unhandled\n", pin); continue; } if ((sc->version == RK_GPIO_TYPE_V1) && (sc->isrcs[pin].mode & GPIO_INTR_EDGE_BOTH)) { RK_GPIO_LOCK(sc); if (rk_gpio_read_bit(sc, RK_GPIO_EXT_PORTA, pin)) rk_gpio_write_bit(sc, RK_GPIO_INT_POLARITY, (1 << pin), 0); else rk_gpio_write_bit(sc, RK_GPIO_INT_POLARITY, (1 << pin), 1); RK_GPIO_UNLOCK(sc); } } return (FILTER_HANDLED); } static int rk_gpio_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "RockChip GPIO Bank controller"); return (BUS_PROBE_DEFAULT); } static int rk_gpio_attach(device_t dev) { struct rk_gpio_softc *sc; phandle_t parent_node, node; int err, i; sc = device_get_softc(dev); sc->sc_dev = dev; sc->pinctrl = device_get_parent(dev); parent_node = ofw_bus_get_node(sc->pinctrl); node = ofw_bus_get_node(sc->sc_dev); if (!OF_hasprop(node, "gpio-controller")) return (ENXIO); mtx_init(&sc->sc_mtx, "rk gpio", "gpio", MTX_SPIN); if (bus_alloc_resources(dev, rk_gpio_spec, sc->sc_res)) { device_printf(dev, "could not allocate resources\n"); bus_release_resources(dev, rk_gpio_spec, sc->sc_res); mtx_destroy(&sc->sc_mtx); return (ENXIO); } sc->sc_bst = rman_get_bustag(sc->sc_res[0]); sc->sc_bsh = rman_get_bushandle(sc->sc_res[0]); if (clk_get_by_ofw_index(dev, 0, 0, &sc->clk) != 0) { device_printf(dev, "Cannot get clock\n"); rk_gpio_detach(dev); return (ENXIO); } err = clk_enable(sc->clk); if (err != 0) { device_printf(dev, "Could not enable clock %s\n", clk_get_name(sc->clk)); rk_gpio_detach(dev); return (ENXIO); } if ((err = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_MISC | INTR_MPSAFE, rk_gpio_intr, NULL, sc, &sc->ihandle))) { device_printf(dev, "Can not setup IRQ\n"); rk_gpio_detach(dev); return (ENXIO); } /* * RK3568 has GPIO_VER_ID register, however both * RK3328 and RK3399 doesn't have. So choose the * version based on parent's compat string. */ if (ofw_bus_node_is_compatible(parent_node, "rockchip,rk3568-pinctrl")) sc->version = RK_GPIO_TYPE_V2; else sc->version = RK_GPIO_TYPE_V1; switch (sc->version) { case RK_GPIO_TYPE_V1: sc->regs[RK_GPIO_SWPORTA_DR] = 0x00; sc->regs[RK_GPIO_SWPORTA_DDR] = 0x04; sc->regs[RK_GPIO_INTEN] = 0x30; sc->regs[RK_GPIO_INTMASK] = 0x34; sc->regs[RK_GPIO_INTTYPE_LEVEL] = 0x38; sc->regs[RK_GPIO_INT_POLARITY] = 0x3c; sc->regs[RK_GPIO_INT_STATUS] = 0x40; sc->regs[RK_GPIO_INT_RAWSTATUS] = 0x44; sc->regs[RK_GPIO_DEBOUNCE] = 0x48; sc->regs[RK_GPIO_PORTA_EOI] = 0x4c; sc->regs[RK_GPIO_EXT_PORTA] = 0x50; break; case RK_GPIO_TYPE_V2: sc->regs[RK_GPIO_SWPORTA_DR] = 0x00; sc->regs[RK_GPIO_SWPORTA_DDR] = 0x08; sc->regs[RK_GPIO_INTEN] = 0x10; sc->regs[RK_GPIO_INTMASK] = 0x18; sc->regs[RK_GPIO_INTTYPE_LEVEL] = 0x20; sc->regs[RK_GPIO_INTTYPE_BOTH] = 0x30; sc->regs[RK_GPIO_INT_POLARITY] = 0x28; sc->regs[RK_GPIO_INT_STATUS] = 0x50; sc->regs[RK_GPIO_INT_RAWSTATUS] = 0x58; sc->regs[RK_GPIO_DEBOUNCE] = 0x38; sc->regs[RK_GPIO_PORTA_EOI] = 0x60; sc->regs[RK_GPIO_EXT_PORTA] = 0x70; break; default: device_printf(dev, "Unknown gpio version %08x\n", sc->version); rk_gpio_detach(dev); return (ENXIO); } for (i = 0; i < RK_GPIO_MAX_PINS; i++) { sc->isrcs[i].irq = i; sc->isrcs[i].mode = GPIO_INTR_CONFORM; if ((err = intr_isrc_register(RK_GPIO_ISRC(sc, i), dev, 0, "%s", device_get_nameunit(dev)))) { device_printf(dev, "Can not register isrc %d\n", err); rk_gpio_detach(dev); return (ENXIO); } } if (intr_pic_register(dev, OF_xref_from_node(node)) == NULL) { device_printf(dev, "Can not register pic\n"); rk_gpio_detach(dev); return (ENXIO); } sc->sc_busdev = gpiobus_attach_bus(dev); if (sc->sc_busdev == NULL) { rk_gpio_detach(dev); return (ENXIO); } /* Set the cached value to unknown */ for (i = 0; i < RK_GPIO_MAX_PINS; i++) sc->pin_cached[i].is_gpio = 2; RK_GPIO_LOCK(sc); sc->swporta = rk_gpio_read_4(sc, RK_GPIO_SWPORTA_DR); sc->swporta_ddr = rk_gpio_read_4(sc, RK_GPIO_SWPORTA_DDR); RK_GPIO_UNLOCK(sc); return (0); } static int rk_gpio_detach(device_t dev) { struct rk_gpio_softc *sc; sc = device_get_softc(dev); if (sc->sc_busdev) gpiobus_detach_bus(dev); bus_release_resources(dev, rk_gpio_spec, sc->sc_res); mtx_destroy(&sc->sc_mtx); clk_disable(sc->clk); return(0); } static device_t rk_gpio_get_bus(device_t dev) { struct rk_gpio_softc *sc; sc = device_get_softc(dev); return (sc->sc_busdev); } static int rk_gpio_pin_max(device_t dev, int *maxpin) { /* Each bank have always 32 pins */ /* XXX not true*/ *maxpin = 31; return (0); } static int rk_gpio_pin_getname(device_t dev, uint32_t pin, char *name) { struct rk_gpio_softc *sc; uint32_t bank; sc = device_get_softc(dev); if (pin >= 32) return (EINVAL); bank = pin / 8; pin = pin - (bank * 8); RK_GPIO_LOCK(sc); snprintf(name, GPIOMAXNAME, "P%c%d", bank + 'A', pin); RK_GPIO_UNLOCK(sc); return (0); } static int rk_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags) { struct rk_gpio_softc *sc; int rv; sc = device_get_softc(dev); if (__predict_false(sc->pin_cached[pin].is_gpio != 1)) { rv = FDT_PINCTRL_IS_GPIO(sc->pinctrl, dev, pin, (bool *)&sc->pin_cached[pin].is_gpio); if (rv != 0) return (rv); if (sc->pin_cached[pin].is_gpio == 0) return (EINVAL); } *flags = 0; rv = FDT_PINCTRL_GET_FLAGS(sc->pinctrl, dev, pin, flags); if (rv != 0) return (rv); sc->pin_cached[pin].flags = *flags; if (sc->swporta_ddr & (1 << pin)) *flags |= GPIO_PIN_OUTPUT; else *flags |= GPIO_PIN_INPUT; return (0); } static int rk_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps) { if (pin >= RK_GPIO_MAX_PINS) return EINVAL; *caps = RK_GPIO_DEFAULT_CAPS; return (0); } static int rk_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags) { struct rk_gpio_softc *sc; int rv; sc = device_get_softc(dev); if (pin >= RK_GPIO_MAX_PINS) return (EINVAL); if (__predict_false(sc->pin_cached[pin].is_gpio != 1)) { rv = FDT_PINCTRL_IS_GPIO(sc->pinctrl, dev, pin, (bool *)&sc->pin_cached[pin].is_gpio); if (rv != 0) return (rv); if (sc->pin_cached[pin].is_gpio == 0) return (EINVAL); } if (__predict_false((flags & GPIO_PIN_INPUT) && ((flags & GPIO_FLAGS_PINCTRL) != sc->pin_cached[pin].flags))) { rv = FDT_PINCTRL_SET_FLAGS(sc->pinctrl, dev, pin, flags); sc->pin_cached[pin].flags = flags & GPIO_FLAGS_PINCTRL; if (rv != 0) return (rv); } RK_GPIO_LOCK(sc); if (flags & GPIO_PIN_INPUT) sc->swporta_ddr &= ~(1 << pin); else if (flags & GPIO_PIN_OUTPUT) sc->swporta_ddr |= (1 << pin); rk_gpio_write_4(sc, RK_GPIO_SWPORTA_DDR, sc->swporta_ddr); RK_GPIO_UNLOCK(sc); return (0); } static int rk_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val) { struct rk_gpio_softc *sc; sc = device_get_softc(dev); if (pin >= RK_GPIO_MAX_PINS) return (EINVAL); RK_GPIO_LOCK(sc); *val = rk_gpio_read_bit(sc, RK_GPIO_EXT_PORTA, pin); RK_GPIO_UNLOCK(sc); return (0); } static int rk_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value) { struct rk_gpio_softc *sc; sc = device_get_softc(dev); if (pin >= RK_GPIO_MAX_PINS) return (EINVAL); RK_GPIO_LOCK(sc); if (value) sc->swporta |= (1 << pin); else sc->swporta &= ~(1 << pin); rk_gpio_write_4(sc, RK_GPIO_SWPORTA_DR, sc->swporta); RK_GPIO_UNLOCK(sc); return (0); } static int rk_gpio_pin_toggle(device_t dev, uint32_t pin) { struct rk_gpio_softc *sc; sc = device_get_softc(dev); if (pin >= RK_GPIO_MAX_PINS) return (EINVAL); RK_GPIO_LOCK(sc); if (sc->swporta & (1 << pin)) sc->swporta &= ~(1 << pin); else sc->swporta |= (1 << pin); rk_gpio_write_4(sc, RK_GPIO_SWPORTA_DR, sc->swporta); RK_GPIO_UNLOCK(sc); return (0); } static int rk_gpio_pin_access_32(device_t dev, uint32_t first_pin, uint32_t clear_pins, uint32_t change_pins, uint32_t *orig_pins) { struct rk_gpio_softc *sc; uint32_t reg; sc = device_get_softc(dev); RK_GPIO_LOCK(sc); reg = rk_gpio_read_4(sc, RK_GPIO_SWPORTA_DR); if (orig_pins) *orig_pins = reg; sc->swporta = reg; if ((clear_pins | change_pins) != 0) { reg = (reg & ~clear_pins) ^ change_pins; rk_gpio_write_4(sc, RK_GPIO_SWPORTA_DR, reg); } RK_GPIO_UNLOCK(sc); return (0); } static int rk_gpio_pin_config_32(device_t dev, uint32_t first_pin, uint32_t num_pins, uint32_t *pin_flags) { struct rk_gpio_softc *sc; uint32_t reg, set, mask, flags; int i; sc = device_get_softc(dev); if (first_pin != 0 || num_pins > 32) return (EINVAL); set = 0; mask = 0; for (i = 0; i < num_pins; i++) { mask = (mask << 1) | 1; flags = pin_flags[i]; if (flags & GPIO_PIN_INPUT) { set &= ~(1 << i); } else if (flags & GPIO_PIN_OUTPUT) { set |= (1 << i); } } RK_GPIO_LOCK(sc); reg = rk_gpio_read_4(sc, RK_GPIO_SWPORTA_DDR); reg &= ~mask; reg |= set; rk_gpio_write_4(sc, RK_GPIO_SWPORTA_DDR, reg); sc->swporta_ddr = reg; RK_GPIO_UNLOCK(sc); return (0); } static int rk_gpio_map_gpios(device_t bus, phandle_t dev, phandle_t gparent, int gcells, pcell_t *gpios, uint32_t *pin, uint32_t *flags) { /* The gpios are mapped as */ *pin = gpios[0]; *flags = gpios[1]; return (0); } static phandle_t rk_gpio_get_node(device_t bus, device_t dev) { /* We only have one child, the GPIO bus, which needs our own node. */ return (ofw_bus_get_node(bus)); } static int rk_pic_map_intr(device_t dev, struct intr_map_data *data, struct intr_irqsrc **isrcp) { struct rk_gpio_softc *sc = device_get_softc(dev); struct intr_map_data_gpio *gdata; uint32_t irq; if (data->type != INTR_MAP_DATA_GPIO) { device_printf(dev, "Wrong type\n"); return (ENOTSUP); } gdata = (struct intr_map_data_gpio *)data; irq = gdata->gpio_pin_num; if (irq >= RK_GPIO_MAX_PINS) { device_printf(dev, "Invalid interrupt %u\n", irq); return (EINVAL); } *isrcp = RK_GPIO_ISRC(sc, irq); return (0); } static int rk_pic_setup_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct rk_gpio_softc *sc = device_get_softc(dev); struct rk_pin_irqsrc *rkisrc = (struct rk_pin_irqsrc *)isrc; struct intr_map_data_gpio *gdata; uint32_t mode; uint8_t pin; if (!data) { device_printf(dev, "No map data\n"); return (ENOTSUP); } gdata = (struct intr_map_data_gpio *)data; mode = gdata->gpio_intr_mode; pin = gdata->gpio_pin_num; if (rkisrc->irq != gdata->gpio_pin_num) { device_printf(dev, "Interrupts don't match\n"); return (EINVAL); } if (isrc->isrc_handlers != 0) { device_printf(dev, "Handler already attached\n"); return (rkisrc->mode == mode ? 0 : EINVAL); } rkisrc->mode = mode; RK_GPIO_LOCK(sc); switch (mode & GPIO_INTR_MASK) { case GPIO_INTR_EDGE_RISING: rk_gpio_write_bit(sc, RK_GPIO_SWPORTA_DDR, pin, 0); rk_gpio_write_bit(sc, RK_GPIO_INTTYPE_LEVEL, pin, 1); rk_gpio_write_bit(sc, RK_GPIO_INT_POLARITY, pin, 1); break; case GPIO_INTR_EDGE_FALLING: rk_gpio_write_bit(sc, RK_GPIO_SWPORTA_DDR, pin, 0); rk_gpio_write_bit(sc, RK_GPIO_INTTYPE_LEVEL, pin, 1); rk_gpio_write_bit(sc, RK_GPIO_INT_POLARITY, pin, 0); break; case GPIO_INTR_EDGE_BOTH: rk_gpio_write_bit(sc, RK_GPIO_SWPORTA_DDR, pin, 0); rk_gpio_write_bit(sc, RK_GPIO_INTTYPE_LEVEL, pin, 1); if (sc->version == RK_GPIO_TYPE_V1) { if (rk_gpio_read_bit(sc, RK_GPIO_EXT_PORTA, pin)) rk_gpio_write_bit(sc, RK_GPIO_INT_POLARITY, pin, 0); else rk_gpio_write_bit(sc, RK_GPIO_INT_POLARITY, pin, 1); } else rk_gpio_write_bit(sc, RK_GPIO_INTTYPE_BOTH, pin, 1); break; case GPIO_INTR_LEVEL_HIGH: rk_gpio_write_bit(sc, RK_GPIO_SWPORTA_DDR, pin, 0); rk_gpio_write_bit(sc, RK_GPIO_INTTYPE_LEVEL, pin, 0); rk_gpio_write_bit(sc, RK_GPIO_INT_POLARITY, pin, 1); break; case GPIO_INTR_LEVEL_LOW: rk_gpio_write_bit(sc, RK_GPIO_SWPORTA_DDR, pin, 0); rk_gpio_write_bit(sc, RK_GPIO_INTTYPE_LEVEL, pin, 0); rk_gpio_write_bit(sc, RK_GPIO_INT_POLARITY, pin, 0); break; default: rk_gpio_write_bit(sc, RK_GPIO_INTMASK, pin, 1); rk_gpio_write_bit(sc, RK_GPIO_INTEN, pin, 0); RK_GPIO_UNLOCK(sc); return (EINVAL); } rk_gpio_write_bit(sc, RK_GPIO_DEBOUNCE, pin, 1); rk_gpio_write_bit(sc, RK_GPIO_INTMASK, pin, 0); rk_gpio_write_bit(sc, RK_GPIO_INTEN, pin, 1); RK_GPIO_UNLOCK(sc); return (0); } static int rk_pic_teardown_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct rk_gpio_softc *sc = device_get_softc(dev); struct rk_pin_irqsrc *irqsrc; irqsrc = (struct rk_pin_irqsrc *)isrc; if (isrc->isrc_handlers == 0) { irqsrc->mode = GPIO_INTR_CONFORM; RK_GPIO_LOCK(sc); rk_gpio_write_bit(sc, RK_GPIO_INTEN, irqsrc->irq, 0); rk_gpio_write_bit(sc, RK_GPIO_INTMASK, irqsrc->irq, 0); rk_gpio_write_bit(sc, RK_GPIO_DEBOUNCE, irqsrc->irq, 0); RK_GPIO_UNLOCK(sc); } return (0); } static device_method_t rk_gpio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk_gpio_probe), DEVMETHOD(device_attach, rk_gpio_attach), DEVMETHOD(device_detach, rk_gpio_detach), /* GPIO protocol */ DEVMETHOD(gpio_get_bus, rk_gpio_get_bus), DEVMETHOD(gpio_pin_max, rk_gpio_pin_max), DEVMETHOD(gpio_pin_getname, rk_gpio_pin_getname), DEVMETHOD(gpio_pin_getflags, rk_gpio_pin_getflags), DEVMETHOD(gpio_pin_getcaps, rk_gpio_pin_getcaps), DEVMETHOD(gpio_pin_setflags, rk_gpio_pin_setflags), DEVMETHOD(gpio_pin_get, rk_gpio_pin_get), DEVMETHOD(gpio_pin_set, rk_gpio_pin_set), DEVMETHOD(gpio_pin_toggle, rk_gpio_pin_toggle), DEVMETHOD(gpio_pin_access_32, rk_gpio_pin_access_32), DEVMETHOD(gpio_pin_config_32, rk_gpio_pin_config_32), DEVMETHOD(gpio_map_gpios, rk_gpio_map_gpios), /* Interrupt controller interface */ DEVMETHOD(pic_map_intr, rk_pic_map_intr), DEVMETHOD(pic_setup_intr, rk_pic_setup_intr), DEVMETHOD(pic_teardown_intr, rk_pic_teardown_intr), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, rk_gpio_get_node), DEVMETHOD_END }; static driver_t rk_gpio_driver = { "gpio", rk_gpio_methods, sizeof(struct rk_gpio_softc), }; /* * GPIO driver is always a child of rk_pinctrl driver and should be probed * and attached within rk_pinctrl_attach function. Due to this, bus pass order * must be same as bus pass order of rk_pinctrl driver. */ EARLY_DRIVER_MODULE(rk_gpio, simplebus, rk_gpio_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/arm64/rockchip/rk_i2s.c b/sys/arm64/rockchip/rk_i2s.c index 049f27505261..fb1c23b80eb9 100644 --- a/sys/arm64/rockchip/rk_i2s.c +++ b/sys/arm64/rockchip/rk_i2s.c @@ -1,651 +1,651 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Oleksandr Tymoshenko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include "syscon_if.h" #include "opt_snd.h" #include #include #include "audio_dai_if.h" #define AUDIO_BUFFER_SIZE 48000 * 4 #define I2S_TXCR 0x0000 #define I2S_CSR_2 (0 << 15) #define I2S_CSR_4 (1 << 15) #define I2S_CSR_6 (2 << 15) #define I2S_CSR_8 (3 << 15) #define I2S_TXCR_IBM_NORMAL (0 << 9) #define I2S_TXCR_IBM_LJ (1 << 9) #define I2S_TXCR_IBM_RJ (2 << 9) #define I2S_TXCR_PBM_NODELAY (0 << 7) #define I2S_TXCR_PBM_1 (1 << 7) #define I2S_TXCR_PBM_2 (2 << 7) #define I2S_TXCR_PBM_3 (3 << 7) #define I2S_TXCR_TFS_I2S (0 << 5) #define I2S_TXCR_TFS_PCM (1 << 5) #define I2S_TXCR_VDW_16 (0xf << 0) #define I2S_RXCR 0x0004 #define I2S_RXCR_IBM_NORMAL (0 << 9) #define I2S_RXCR_IBM_LJ (1 << 9) #define I2S_RXCR_IBM_RJ (2 << 9) #define I2S_RXCR_PBM_NODELAY (0 << 7) #define I2S_RXCR_PBM_1 (1 << 7) #define I2S_RXCR_PBM_2 (2 << 7) #define I2S_RXCR_PBM_3 (3 << 7) #define I2S_RXCR_TFS_I2S (0 << 5) #define I2S_RXCR_TFS_PCM (1 << 5) #define I2S_RXCR_VDW_16 (0xf << 0) #define I2S_CKR 0x0008 #define I2S_CKR_MSS_MASK (1 << 27) #define I2S_CKR_MSS_MASTER (0 << 27) #define I2S_CKR_MSS_SLAVE (1 << 27) #define I2S_CKR_CKP (1 << 26) #define I2S_CKR_MDIV(n) (((n) - 1) << 16) #define I2S_CKR_MDIV_MASK (0xff << 16) #define I2S_CKR_RSD(n) (((n) - 1) << 8) #define I2S_CKR_RSD_MASK (0xff << 8) #define I2S_CKR_TSD(n) (((n) - 1) << 0) #define I2S_CKR_TSD_MASK (0xff << 0) #define I2S_TXFIFOLR 0x000c #define TXFIFO0LR_MASK 0x3f #define I2S_DMACR 0x0010 #define I2S_DMACR_RDE_ENABLE (1 << 24) #define I2S_DMACR_RDL(n) ((n) << 16) #define I2S_DMACR_TDE_ENABLE (1 << 8) #define I2S_DMACR_TDL(n) ((n) << 0) #define I2S_INTCR 0x0014 #define I2S_INTCR_RFT(n) (((n) - 1) << 20) #define I2S_INTCR_TFT(n) (((n) - 1) << 4) #define I2S_INTCR_RXFIE (1 << 16) #define I2S_INTCR_TXUIC (1 << 2) #define I2S_INTCR_TXEIE (1 << 0) #define I2S_INTSR 0x0018 #define I2S_INTSR_RXFI (1 << 16) #define I2S_INTSR_TXUI (1 << 1) #define I2S_INTSR_TXEI (1 << 0) #define I2S_XFER 0x001c #define I2S_XFER_RXS_START (1 << 1) #define I2S_XFER_TXS_START (1 << 0) #define I2S_CLR 0x0020 #define I2S_CLR_RXC (1 << 1) #define I2S_CLR_TXC (1 << 0) #define I2S_TXDR 0x0024 #define I2S_RXDR 0x0028 #define I2S_RXFIFOLR 0x002c #define RXFIFO0LR_MASK 0x3f /* syscon */ #define GRF_SOC_CON8 0xe220 #define I2S_IO_DIRECTION_MASK 7 #define I2S_IO_DIRECTION_SHIFT 11 #define I2S_IO_8CH_OUT_2CH_IN 0 #define I2S_IO_6CH_OUT_4CH_IN 4 #define I2S_IO_4CH_OUT_6CH_IN 6 #define I2S_IO_2CH_OUT_8CH_IN 7 #define DIV_ROUND_CLOSEST(n,d) (((n) + (d) / 2) / (d)) #define RK_I2S_SAMPLING_RATE 48000 #define FIFO_SIZE 32 static struct ofw_compat_data compat_data[] = { { "rockchip,rk3066-i2s", 1 }, { "rockchip,rk3399-i2s", 1 }, { NULL, 0 } }; static struct resource_spec rk_i2s_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; struct rk_i2s_softc { device_t dev; struct resource *res[2]; struct mtx mtx; clk_t clk; clk_t hclk; void * intrhand; struct syscon *grf; /* pointers to playback/capture buffers */ uint32_t play_ptr; uint32_t rec_ptr; }; #define RK_I2S_LOCK(sc) mtx_lock(&(sc)->mtx) #define RK_I2S_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define RK_I2S_READ_4(sc, reg) bus_read_4((sc)->res[0], (reg)) #define RK_I2S_WRITE_4(sc, reg, val) bus_write_4((sc)->res[0], (reg), (val)) static int rk_i2s_probe(device_t dev); static int rk_i2s_attach(device_t dev); static int rk_i2s_detach(device_t dev); static uint32_t sc_fmt[] = { SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps rk_i2s_caps = {RK_I2S_SAMPLING_RATE, RK_I2S_SAMPLING_RATE, sc_fmt, 0}; static int rk_i2s_init(struct rk_i2s_softc *sc) { uint32_t val; int error; clk_set_freq(sc->clk, RK_I2S_SAMPLING_RATE * 256, CLK_SET_ROUND_DOWN); error = clk_enable(sc->clk); if (error != 0) { device_printf(sc->dev, "cannot enable i2s_clk clock\n"); return (ENXIO); } val = I2S_INTCR_TFT(FIFO_SIZE/2); val |= I2S_INTCR_RFT(FIFO_SIZE/2); RK_I2S_WRITE_4(sc, I2S_INTCR, val); if (sc->grf && ofw_bus_is_compatible(sc->dev, "rockchip,rk3399-i2s")) { val = (I2S_IO_2CH_OUT_8CH_IN << I2S_IO_DIRECTION_SHIFT); val |= (I2S_IO_DIRECTION_MASK << I2S_IO_DIRECTION_SHIFT) << 16; SYSCON_WRITE_4(sc->grf, GRF_SOC_CON8, val); #if 0 // HACK: enable IO domain val = (1 << 1); val |= (1 << 1) << 16; SYSCON_WRITE_4(sc->grf, 0xe640, val); #endif } RK_I2S_WRITE_4(sc, I2S_XFER, 0); return (0); } static int rk_i2s_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Rockchip I2S"); return (BUS_PROBE_DEFAULT); } static int rk_i2s_attach(device_t dev) { struct rk_i2s_softc *sc; int error; phandle_t node; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); if (bus_alloc_resources(dev, rk_i2s_spec, sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); error = ENXIO; goto fail; } error = clk_get_by_ofw_name(dev, 0, "i2s_hclk", &sc->hclk); if (error != 0) { device_printf(dev, "cannot get i2s_hclk clock\n"); goto fail; } error = clk_get_by_ofw_name(dev, 0, "i2s_clk", &sc->clk); if (error != 0) { device_printf(dev, "cannot get i2s_clk clock\n"); goto fail; } /* Activate the module clock. */ error = clk_enable(sc->hclk); if (error != 0) { device_printf(dev, "cannot enable i2s_hclk clock\n"); goto fail; } node = ofw_bus_get_node(dev); if (OF_hasprop(node, "rockchip,grf") && syscon_get_by_ofw_property(dev, node, "rockchip,grf", &sc->grf) != 0) { device_printf(dev, "cannot get grf driver handle\n"); return (ENXIO); } rk_i2s_init(sc); OF_device_register_xref(OF_xref_from_node(node), dev); return (0); fail: rk_i2s_detach(dev); return (error); } static int rk_i2s_detach(device_t dev) { struct rk_i2s_softc *i2s; i2s = device_get_softc(dev); if (i2s->hclk != NULL) clk_release(i2s->hclk); if (i2s->clk) clk_release(i2s->clk); if (i2s->intrhand != NULL) bus_teardown_intr(i2s->dev, i2s->res[1], i2s->intrhand); bus_release_resources(dev, rk_i2s_spec, i2s->res); mtx_destroy(&i2s->mtx); return (0); } static int rk_i2s_dai_init(device_t dev, uint32_t format) { uint32_t val, txcr, rxcr; struct rk_i2s_softc *sc; int fmt, pol, clk; sc = device_get_softc(dev); fmt = AUDIO_DAI_FORMAT_FORMAT(format); pol = AUDIO_DAI_FORMAT_POLARITY(format); clk = AUDIO_DAI_FORMAT_CLOCK(format); /* Set format */ val = RK_I2S_READ_4(sc, I2S_CKR); val &= ~(I2S_CKR_MSS_MASK); switch (clk) { case AUDIO_DAI_CLOCK_CBM_CFM: val |= I2S_CKR_MSS_MASTER; break; case AUDIO_DAI_CLOCK_CBS_CFS: val |= I2S_CKR_MSS_SLAVE; break; default: return (EINVAL); } switch (pol) { case AUDIO_DAI_POLARITY_IB_NF: val |= I2S_CKR_CKP; break; case AUDIO_DAI_POLARITY_NB_NF: val &= ~I2S_CKR_CKP; break; default: return (EINVAL); } RK_I2S_WRITE_4(sc, I2S_CKR, val); txcr = I2S_TXCR_VDW_16 | I2S_CSR_2; rxcr = I2S_RXCR_VDW_16 | I2S_CSR_2; switch (fmt) { case AUDIO_DAI_FORMAT_I2S: txcr |= I2S_TXCR_IBM_NORMAL; rxcr |= I2S_RXCR_IBM_NORMAL; break; case AUDIO_DAI_FORMAT_LJ: txcr |= I2S_TXCR_IBM_LJ; rxcr |= I2S_RXCR_IBM_LJ; break; case AUDIO_DAI_FORMAT_RJ: txcr |= I2S_TXCR_IBM_RJ; rxcr |= I2S_RXCR_IBM_RJ; break; case AUDIO_DAI_FORMAT_DSPA: txcr |= I2S_TXCR_TFS_PCM; rxcr |= I2S_RXCR_TFS_PCM; txcr |= I2S_TXCR_PBM_1; rxcr |= I2S_RXCR_PBM_1; break; case AUDIO_DAI_FORMAT_DSPB: txcr |= I2S_TXCR_TFS_PCM; rxcr |= I2S_RXCR_TFS_PCM; txcr |= I2S_TXCR_PBM_2; rxcr |= I2S_RXCR_PBM_2; break; default: return EINVAL; } RK_I2S_WRITE_4(sc, I2S_TXCR, txcr); RK_I2S_WRITE_4(sc, I2S_RXCR, rxcr); RK_I2S_WRITE_4(sc, I2S_XFER, 0); return (0); } static int rk_i2s_dai_intr(device_t dev, struct snd_dbuf *play_buf, struct snd_dbuf *rec_buf) { struct rk_i2s_softc *sc; uint32_t status; uint32_t level; uint32_t val = 0x00; int ret = 0; sc = device_get_softc(dev); RK_I2S_LOCK(sc); status = RK_I2S_READ_4(sc, I2S_INTSR); if (status & I2S_INTSR_TXEI) { level = RK_I2S_READ_4(sc, I2S_TXFIFOLR) & TXFIFO0LR_MASK; uint8_t *samples; uint32_t count, size, readyptr, written; count = sndbuf_getready(play_buf); if (count > FIFO_SIZE - 1) count = FIFO_SIZE - 1; size = sndbuf_getsize(play_buf); readyptr = sndbuf_getreadyptr(play_buf); samples = (uint8_t*)sndbuf_getbuf(play_buf); written = 0; for (; level < count; level++) { val = (samples[readyptr++ % size] << 0); val |= (samples[readyptr++ % size] << 8); val |= (samples[readyptr++ % size] << 16); val |= (samples[readyptr++ % size] << 24); written += 4; RK_I2S_WRITE_4(sc, I2S_TXDR, val); } sc->play_ptr += written; sc->play_ptr %= size; ret |= AUDIO_DAI_PLAY_INTR; } if (status & I2S_INTSR_RXFI) { level = RK_I2S_READ_4(sc, I2S_RXFIFOLR) & RXFIFO0LR_MASK; uint8_t *samples; uint32_t count, size, freeptr, recorded; count = sndbuf_getfree(rec_buf); size = sndbuf_getsize(rec_buf); freeptr = sndbuf_getfreeptr(rec_buf); samples = (uint8_t*)sndbuf_getbuf(rec_buf); recorded = 0; if (level > count / 4) level = count / 4; for (; level > 0; level--) { val = RK_I2S_READ_4(sc, I2S_RXDR); samples[freeptr++ % size] = val & 0xff; samples[freeptr++ % size] = (val >> 8) & 0xff; samples[freeptr++ % size] = (val >> 16) & 0xff; samples[freeptr++ % size] = (val >> 24) & 0xff; recorded += 4; } sc->rec_ptr += recorded; sc->rec_ptr %= size; ret |= AUDIO_DAI_REC_INTR; } RK_I2S_UNLOCK(sc); return (ret); } static struct pcmchan_caps * rk_i2s_dai_get_caps(device_t dev) { return (&rk_i2s_caps); } static int rk_i2s_dai_trigger(device_t dev, int go, int pcm_dir) { struct rk_i2s_softc *sc = device_get_softc(dev); uint32_t val; uint32_t clear_bit; if ((pcm_dir != PCMDIR_PLAY) && (pcm_dir != PCMDIR_REC)) return (EINVAL); switch (go) { case PCMTRIG_START: val = RK_I2S_READ_4(sc, I2S_INTCR); if (pcm_dir == PCMDIR_PLAY) val |= I2S_INTCR_TXEIE; else if (pcm_dir == PCMDIR_REC) val |= I2S_INTCR_RXFIE; RK_I2S_WRITE_4(sc, I2S_INTCR, val); val = I2S_XFER_TXS_START | I2S_XFER_RXS_START; RK_I2S_WRITE_4(sc, I2S_XFER, val); break; case PCMTRIG_STOP: case PCMTRIG_ABORT: val = RK_I2S_READ_4(sc, I2S_INTCR); if (pcm_dir == PCMDIR_PLAY) val &= ~I2S_INTCR_TXEIE; else if (pcm_dir == PCMDIR_REC) val &= ~I2S_INTCR_RXFIE; RK_I2S_WRITE_4(sc, I2S_INTCR, val); /* * If there is no other activity going on, stop transfers */ if ((val & (I2S_INTCR_TXEIE | I2S_INTCR_RXFIE)) == 0) { RK_I2S_WRITE_4(sc, I2S_XFER, 0); if (pcm_dir == PCMDIR_PLAY) clear_bit = I2S_CLR_TXC; else if (pcm_dir == PCMDIR_REC) clear_bit = I2S_CLR_RXC; else return (EINVAL); val = RK_I2S_READ_4(sc, I2S_CLR); val |= clear_bit; RK_I2S_WRITE_4(sc, I2S_CLR, val); while ((RK_I2S_READ_4(sc, I2S_CLR) & clear_bit) != 0) DELAY(1); } RK_I2S_LOCK(sc); if (pcm_dir == PCMDIR_PLAY) sc->play_ptr = 0; else sc->rec_ptr = 0; RK_I2S_UNLOCK(sc); break; } return (0); } static uint32_t rk_i2s_dai_get_ptr(device_t dev, int pcm_dir) { struct rk_i2s_softc *sc; uint32_t ptr; sc = device_get_softc(dev); RK_I2S_LOCK(sc); if (pcm_dir == PCMDIR_PLAY) ptr = sc->play_ptr; else ptr = sc->rec_ptr; RK_I2S_UNLOCK(sc); return ptr; } static int rk_i2s_dai_setup_intr(device_t dev, driver_intr_t intr_handler, void *intr_arg) { struct rk_i2s_softc *sc = device_get_softc(dev); if (bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE, NULL, intr_handler, intr_arg, &sc->intrhand)) { device_printf(dev, "cannot setup interrupt handler\n"); return (ENXIO); } return (0); } static uint32_t rk_i2s_dai_set_chanformat(device_t dev, uint32_t format) { return (0); } static int rk_i2s_dai_set_sysclk(device_t dev, unsigned int rate, int dai_dir) { struct rk_i2s_softc *sc; int error; sc = device_get_softc(dev); error = clk_disable(sc->clk); if (error != 0) { device_printf(sc->dev, "could not disable i2s_clk clock\n"); return (error); } error = clk_set_freq(sc->clk, rate, CLK_SET_ROUND_DOWN); if (error != 0) device_printf(sc->dev, "could not set i2s_clk freq\n"); error = clk_enable(sc->clk); if (error != 0) { device_printf(sc->dev, "could not enable i2s_clk clock\n"); return (error); } return (0); } static uint32_t rk_i2s_dai_set_chanspeed(device_t dev, uint32_t speed) { struct rk_i2s_softc *sc; int error; uint32_t val; uint32_t bus_clock_div, lr_clock_div; uint64_t bus_clk_freq; uint64_t clk_freq; sc = device_get_softc(dev); /* Set format */ val = RK_I2S_READ_4(sc, I2S_CKR); if ((val & I2S_CKR_MSS_SLAVE) == 0) { error = clk_get_freq(sc->clk, &clk_freq); if (error != 0) { device_printf(sc->dev, "failed to get clk frequency: err=%d\n", error); return (error); } bus_clk_freq = 2 * 32 * speed; bus_clock_div = DIV_ROUND_CLOSEST(clk_freq, bus_clk_freq); lr_clock_div = bus_clk_freq / speed; val &= ~(I2S_CKR_MDIV_MASK | I2S_CKR_RSD_MASK | I2S_CKR_TSD_MASK); val |= I2S_CKR_MDIV(bus_clock_div); val |= I2S_CKR_RSD(lr_clock_div); val |= I2S_CKR_TSD(lr_clock_div); RK_I2S_WRITE_4(sc, I2S_CKR, val); } return (speed); } static device_method_t rk_i2s_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk_i2s_probe), DEVMETHOD(device_attach, rk_i2s_attach), DEVMETHOD(device_detach, rk_i2s_detach), DEVMETHOD(audio_dai_init, rk_i2s_dai_init), DEVMETHOD(audio_dai_setup_intr, rk_i2s_dai_setup_intr), DEVMETHOD(audio_dai_set_sysclk, rk_i2s_dai_set_sysclk), DEVMETHOD(audio_dai_set_chanspeed, rk_i2s_dai_set_chanspeed), DEVMETHOD(audio_dai_set_chanformat, rk_i2s_dai_set_chanformat), DEVMETHOD(audio_dai_intr, rk_i2s_dai_intr), DEVMETHOD(audio_dai_get_caps, rk_i2s_dai_get_caps), DEVMETHOD(audio_dai_trigger, rk_i2s_dai_trigger), DEVMETHOD(audio_dai_get_ptr, rk_i2s_dai_get_ptr), DEVMETHOD_END }; static driver_t rk_i2s_driver = { "i2s", rk_i2s_methods, sizeof(struct rk_i2s_softc), }; DRIVER_MODULE(rk_i2s, simplebus, rk_i2s_driver, 0, 0); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/arm64/rockchip/rk_pcie.c b/sys/arm64/rockchip/rk_pcie.c index 5b3b23e24913..cccb04ffdcd9 100644 --- a/sys/arm64/rockchip/rk_pcie.c +++ b/sys/arm64/rockchip/rk_pcie.c @@ -1,1432 +1,1432 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* Rockchip PCIe controller driver */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #define ATU_CFG_BUS(x) (((x) & 0x0ff) << 20) #define ATU_CFG_SLOT(x) (((x) & 0x01f) << 15) #define ATU_CFG_FUNC(x) (((x) & 0x007) << 12) #define ATU_CFG_REG(x) (((x) & 0xfff) << 0) #define ATU_TYPE_MEM 0x2 #define ATU_TYPE_IO 0x6 #define ATU_TYPE_CFG0 0xA #define ATU_TYPE_CFG1 0xB #define ATY_TYPE_NOR_MSG 0xC #define ATU_OB_REGIONS 33 #define ATU_OB_REGION_SHIFT 20 #define ATU_OB_REGION_SIZE (1 << ATU_OB_REGION_SHIFT) #define ATU_OB_REGION_0_SIZE (( ATU_OB_REGIONS - 1) * ATU_OB_REGION_SIZE) #define ATU_IB_REGIONS 3 #define PCIE_CLIENT_BASIC_STRAP_CONF 0x000000 #define STRAP_CONF_GEN_2 (1 << 7) #define STRAP_CONF_MODE_RC (1 << 6) #define STRAP_CONF_LANES(n) ((((n) / 2) & 0x3) << 4) #define STRAP_CONF_ARI_EN (1 << 3) #define STRAP_CONF_SR_IOV_EN (1 << 2) #define STRAP_CONF_LINK_TRAIN_EN (1 << 1) #define STRAP_CONF_CONF_EN (1 << 0) #define PCIE_CLIENT_HOT_RESET_CTRL 0x000018 #define HOT_RESET_CTRL_LINK_DOWN_RESET (1 << 1) #define HOT_RESET_CTRL_HOT_RESET_IN (1 << 0) #define PCIE_CLIENT_BASIC_STATUS0 0x000044 #define PCIE_CLIENT_BASIC_STATUS1 0x000048 #define STATUS1_LINK_ST_GET(x) (((x) >> 20) & 0x3) #define STATUS1_LINK_ST_UP 3 #define PCIE_CLIENT_INT_MASK 0x00004C #define PCIE_CLIENT_INT_STATUS 0x000050 #define PCIE_CLIENT_INT_LEGACY_DONE (1 << 15) #define PCIE_CLIENT_INT_MSG (1 << 14) #define PCIE_CLIENT_INT_HOT_RST (1 << 13) #define PCIE_CLIENT_INT_DPA (1 << 12) #define PCIE_CLIENT_INT_FATAL_ERR (1 << 11) #define PCIE_CLIENT_INT_NFATAL_ERR (1 << 10) #define PCIE_CLIENT_INT_CORR_ERR (1 << 9) #define PCIE_CLIENT_INT_INTD (1 << 8) #define PCIE_CLIENT_INT_INTC (1 << 7) #define PCIE_CLIENT_INT_INTB (1 << 6) #define PCIE_CLIENT_INT_INTA (1 << 5) #define PCIE_CLIENT_INT_LOCAL (1 << 4) #define PCIE_CLIENT_INT_UDMA (1 << 3) #define PCIE_CLIENT_INT_PHY (1 << 2) #define PCIE_CLIENT_INT_HOT_PLUG (1 << 1) #define PCIE_CLIENT_INT_PWR_STCG (1 << 0) #define PCIE_CLIENT_INT_LEGACY (PCIE_CLIENT_INT_INTA | \ PCIE_CLIENT_INT_INTB | \ PCIE_CLIENT_INT_INTC | \ PCIE_CLIENT_INT_INTD) #define PCIE_CORE_CTRL0 0x900000 #define CORE_CTRL_LANES_GET(x) (((x) >> 20) & 0x3) #define PCIE_CORE_CTRL1 0x900004 #define PCIE_CORE_CONFIG_VENDOR 0x900044 #define PCIE_CORE_INT_STATUS 0x90020c #define PCIE_CORE_INT_PRFPE (1 << 0) #define PCIE_CORE_INT_CRFPE (1 << 1) #define PCIE_CORE_INT_RRPE (1 << 2) #define PCIE_CORE_INT_PRFO (1 << 3) #define PCIE_CORE_INT_CRFO (1 << 4) #define PCIE_CORE_INT_RT (1 << 5) #define PCIE_CORE_INT_RTR (1 << 6) #define PCIE_CORE_INT_PE (1 << 7) #define PCIE_CORE_INT_MTR (1 << 8) #define PCIE_CORE_INT_UCR (1 << 9) #define PCIE_CORE_INT_FCE (1 << 10) #define PCIE_CORE_INT_CT (1 << 11) #define PCIE_CORE_INT_UTC (1 << 18) #define PCIE_CORE_INT_MMVC (1 << 19) #define PCIE_CORE_INT_MASK 0x900210 #define PCIE_CORE_PHY_FUNC_CONF 0x9002C0 #define PCIE_CORE_RC_BAR_CONF 0x900300 #define PCIE_RC_CONFIG_STD_BASE 0x800000 #define PCIE_RC_CONFIG_PRIV_BASE 0xA00000 #define PCIE_RC_CONFIG_DCSR 0xA000C8 #define PCIE_RC_CONFIG_DCSR_MPS_MASK (0x7 << 5) #define PCIE_RC_CONFIG_DCSR_MPS_128 (0 << 5) #define PCIE_RC_CONFIG_DCSR_MPS_256 (1 << 5) #define PCIE_RC_CONFIG_LINK_CAP 0xA00CC #define PCIE_RC_CONFIG_LINK_CAP_L0S (1 << 10) #define PCIE_RC_CONFIG_LCS 0xA000D0 #define PCIE_RC_CONFIG_THP_CAP 0xA00274 #define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK 0xFFF00000 #define PCIE_CORE_OB_ADDR0(n) (0xC00000 + 0x20 * (n) + 0x00) #define PCIE_CORE_OB_ADDR1(n) (0xC00000 + 0x20 * (n) + 0x04) #define PCIE_CORE_OB_DESC0(n) (0xC00000 + 0x20 * (n) + 0x08) #define PCIE_CORE_OB_DESC1(n) (0xC00000 + 0x20 * (n) + 0x0C) #define PCIE_CORE_OB_DESC2(n) (0xC00000 + 0x20 * (n) + 0x10) #define PCIE_CORE_OB_DESC3(n) (0xC00000 + 0x20 * (n) + 0x14) #define PCIE_CORE_IB_ADDR0(n) (0xC00800 + 0x8 * (n) + 0x00) #define PCIE_CORE_IB_ADDR1(n) (0xC00800 + 0x8 * (n) + 0x04) #define PRIV_CFG_RD4(sc, reg) \ (uint32_t)rk_pcie_local_cfg_read(sc, true, reg, 4) #define PRIV_CFG_RD2(sc, reg) \ (uint16_t)rk_pcie_local_cfg_read(sc, true, reg, 2) #define PRIV_CFG_RD1(sc, reg) \ (uint8_t)rk_pcie_local_cfg_read(sc, true, reg, 1) #define PRIV_CFG_WR4(sc, reg, val) \ rk_pcie_local_cfg_write(sc, true, reg, val, 4) #define PRIV_CFG_WR2(sc, reg, val) \ rk_pcie_local_cfg_write(sc, true, reg, val, 2) #define PRIV_CFG_WR1(sc, reg, val) \ rk_pcie_local_cfg_write(sc, true, reg, val, 1) #define APB_WR4(_sc, _r, _v) bus_write_4((_sc)->apb_mem_res, (_r), (_v)) #define APB_RD4(_sc, _r) bus_read_4((_sc)->apb_mem_res, (_r)) #define MAX_LANES 4 #define RK_PCIE_ENABLE_MSI #define RK_PCIE_ENABLE_MSIX struct rk_pcie_softc { struct ofw_pci_softc ofw_pci; /* Must be first */ struct resource *axi_mem_res; struct resource *apb_mem_res; struct resource *client_irq_res; struct resource *legacy_irq_res; struct resource *sys_irq_res; void *client_irq_cookie; void *legacy_irq_cookie; void *sys_irq_cookie; device_t dev; phandle_t node; struct mtx mtx; struct ofw_pci_range mem_range; struct ofw_pci_range pref_mem_range; struct ofw_pci_range io_range; bool coherent; bus_dma_tag_t dmat; int num_lanes; bool link_is_gen2; bool no_l0s; u_int bus_start; u_int bus_end; u_int root_bus; u_int sub_bus; regulator_t supply_12v; regulator_t supply_3v3; regulator_t supply_1v8; regulator_t supply_0v9; hwreset_t hwreset_core; hwreset_t hwreset_mgmt; hwreset_t hwreset_mgmt_sticky; hwreset_t hwreset_pipe; hwreset_t hwreset_pm; hwreset_t hwreset_aclk; hwreset_t hwreset_pclk; clk_t clk_aclk; clk_t clk_aclk_perf; clk_t clk_hclk; clk_t clk_pm; phy_t phys[MAX_LANES]; gpio_pin_t gpio_ep; }; /* Compatible devices. */ static struct ofw_compat_data compat_data[] = { {"rockchip,rk3399-pcie", 1}, {NULL, 0}, }; static uint32_t rk_pcie_local_cfg_read(struct rk_pcie_softc *sc, bool priv, u_int reg, int bytes) { uint32_t val; bus_addr_t base; if (priv) base = PCIE_RC_CONFIG_PRIV_BASE; else base = PCIE_RC_CONFIG_STD_BASE; switch (bytes) { case 4: val = bus_read_4(sc->apb_mem_res, base + reg); break; case 2: val = bus_read_2(sc->apb_mem_res, base + reg); break; case 1: val = bus_read_1(sc->apb_mem_res, base + reg); break; default: val = 0xFFFFFFFF; } return (val); } static void rk_pcie_local_cfg_write(struct rk_pcie_softc *sc, bool priv, u_int reg, uint32_t val, int bytes) { uint32_t val2; bus_addr_t base; if (priv) base = PCIE_RC_CONFIG_PRIV_BASE; else base = PCIE_RC_CONFIG_STD_BASE; switch (bytes) { case 4: bus_write_4(sc->apb_mem_res, base + reg, val); break; case 2: val2 = bus_read_4(sc->apb_mem_res, base + (reg & ~3)); val2 &= ~(0xffff << ((reg & 3) << 3)); val2 |= ((val & 0xffff) << ((reg & 3) << 3)); bus_write_4(sc->apb_mem_res, base + (reg & ~3), val2); break; case 1: val2 = bus_read_4(sc->apb_mem_res, base + (reg & ~3)); val2 &= ~(0xff << ((reg & 3) << 3)); val2 |= ((val & 0xff) << ((reg & 3) << 3)); bus_write_4(sc->apb_mem_res, base + (reg & ~3), val2); break; } } static bool rk_pcie_check_dev(struct rk_pcie_softc *sc, u_int bus, u_int slot, u_int func, u_int reg) { uint32_t val; if (bus < sc->bus_start || bus > sc->bus_end || slot > PCI_SLOTMAX || func > PCI_FUNCMAX || reg > PCIE_REGMAX) return (false); if (bus == sc->root_bus) { /* we have only 1 device with 1 function root port */ if (slot > 0 || func > 0) return (false); return (true); } /* link is needed for accessing non-root busses */ val = APB_RD4(sc, PCIE_CLIENT_BASIC_STATUS1); if (STATUS1_LINK_ST_GET(val) != STATUS1_LINK_ST_UP) return (false); /* only one device can be on first subordinate bus */ if (bus == sc->sub_bus && slot != 0 ) return (false); return (true); } static void rk_pcie_map_out_atu(struct rk_pcie_softc *sc, int idx, int type, int num_bits, uint64_t pa) { uint32_t addr0; uint64_t max_size __diagused; /* Check HW constrains */ max_size = idx == 0 ? ATU_OB_REGION_0_SIZE: ATU_OB_REGION_SIZE; KASSERT(idx < ATU_OB_REGIONS, ("Invalid region index: %d\n", idx)); KASSERT(num_bits >= 7 && num_bits <= 63, ("Bit width of region is invalid: %d\n", num_bits)); KASSERT(max_size <= (1ULL << (num_bits + 1)), ("Bit width is invalid for given region[%d]: %d\n", idx, num_bits)); addr0 = (uint32_t)pa & 0xFFFFFF00; addr0 |= num_bits; APB_WR4(sc, PCIE_CORE_OB_ADDR0(idx), addr0); APB_WR4(sc, PCIE_CORE_OB_ADDR1(idx), (uint32_t)(pa >> 32)); APB_WR4(sc, PCIE_CORE_OB_DESC0(idx), 1 << 23 | type); APB_WR4(sc, PCIE_CORE_OB_DESC1(idx), sc->root_bus); /* Readback for sync */ APB_RD4(sc, PCIE_CORE_OB_DESC1(idx)); } static void rk_pcie_map_cfg_atu(struct rk_pcie_softc *sc, int idx, int type) { /* Check HW constrains */ KASSERT(idx < ATU_OB_REGIONS, ("Invalid region index: %d\n", idx)); /* * Config window is only 25 bits width, so we cannot encode full bus * range into it. Remaining bits of bus number should be taken from * DESC1 field. */ APB_WR4(sc, PCIE_CORE_OB_ADDR0(idx), 25 - 1); APB_WR4(sc, PCIE_CORE_OB_ADDR1(idx), 0); APB_WR4(sc, PCIE_CORE_OB_DESC0(idx), 1 << 23 | type); APB_WR4(sc, PCIE_CORE_OB_DESC1(idx), sc->root_bus); /* Readback for sync */ APB_RD4(sc, PCIE_CORE_OB_DESC1(idx)); } static void rk_pcie_map_in_atu(struct rk_pcie_softc *sc, int idx, int num_bits, uint64_t pa) { uint32_t addr0; /* Check HW constrains */ KASSERT(idx < ATU_IB_REGIONS, ("Invalid region index: %d\n", idx)); KASSERT(num_bits >= 7 && num_bits <= 63, ("Bit width of region is invalid: %d\n", num_bits)); addr0 = (uint32_t)pa & 0xFFFFFF00; addr0 |= num_bits; APB_WR4(sc, PCIE_CORE_IB_ADDR0(idx), addr0); APB_WR4(sc, PCIE_CORE_IB_ADDR1(idx), (uint32_t)(pa >> 32)); /* Readback for sync */ APB_RD4(sc, PCIE_CORE_IB_ADDR1(idx)); } static int rk_pcie_decode_ranges(struct rk_pcie_softc *sc, struct ofw_pci_range *ranges, int nranges) { int i; for (i = 0; i < nranges; i++) { switch(ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { case OFW_PCI_PHYS_HI_SPACE_IO: if (sc->io_range.size != 0) { device_printf(sc->dev, "Duplicated IO range found in DT\n"); return (ENXIO); } sc->io_range = ranges[i]; break; case OFW_PCI_PHYS_HI_SPACE_MEM32: case OFW_PCI_PHYS_HI_SPACE_MEM64: if (ranges[i].pci_hi & OFW_PCI_PHYS_HI_PREFETCHABLE) { if (sc->pref_mem_range.size != 0) { device_printf(sc->dev, "Duplicated memory range found " "in DT\n"); return (ENXIO); } sc->pref_mem_range = ranges[i]; } else { if (sc->mem_range.size != 0) { device_printf(sc->dev, "Duplicated memory range found " "in DT\n"); return (ENXIO); } sc->mem_range = ranges[i]; } } } if (sc->mem_range.size == 0) { device_printf(sc->dev, " At least memory range should be defined in DT.\n"); return (ENXIO); } return (0); } /*----------------------------------------------------------------------------- * * P C I B I N T E R F A C E */ static uint32_t rk_pcie_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct rk_pcie_softc *sc; uint32_t d32, data; uint16_t d16; uint8_t d8; uint64_t addr; int type, ret; sc = device_get_softc(dev); if (!rk_pcie_check_dev(sc, bus, slot, func, reg)) return (0xFFFFFFFFU); if (bus == sc->root_bus) return (rk_pcie_local_cfg_read(sc, false, reg, bytes)); addr = ATU_CFG_BUS(bus) | ATU_CFG_SLOT(slot) | ATU_CFG_FUNC(func) | ATU_CFG_REG(reg); type = bus == sc->sub_bus ? ATU_TYPE_CFG0: ATU_TYPE_CFG1; rk_pcie_map_cfg_atu(sc, 0, type); ret = -1; switch (bytes) { case 1: ret = bus_peek_1(sc->axi_mem_res, addr, &d8); data = d8; break; case 2: ret = bus_peek_2(sc->axi_mem_res, addr, &d16); data = d16; break; case 4: ret = bus_peek_4(sc->axi_mem_res, addr, &d32); data = d32; break; } if (ret != 0) data = 0xFFFFFFFF; return (data); } static void rk_pcie_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { struct rk_pcie_softc *sc; uint64_t addr; int type; sc = device_get_softc(dev); if (!rk_pcie_check_dev(sc, bus, slot, func, reg)) return; if (bus == sc->root_bus) return (rk_pcie_local_cfg_write(sc, false, reg, val, bytes)); addr = ATU_CFG_BUS(bus) | ATU_CFG_SLOT(slot) | ATU_CFG_FUNC(func) | ATU_CFG_REG(reg); type = bus == sc->sub_bus ? ATU_TYPE_CFG0: ATU_TYPE_CFG1; rk_pcie_map_cfg_atu(sc, 0, type); switch (bytes) { case 1: bus_poke_1(sc->axi_mem_res, addr, (uint8_t)val); break; case 2: bus_poke_2(sc->axi_mem_res, addr, (uint16_t)val); break; case 4: bus_poke_4(sc->axi_mem_res, addr, val); break; default: break; } } #ifdef RK_PCIE_ENABLE_MSI static int rk_pcie_alloc_msi(device_t pci, device_t child, int count, int maxcount, int *irqs) { phandle_t msi_parent; int rv; rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, NULL); if (rv != 0) return (rv); rv = intr_alloc_msi(pci, child, msi_parent, count, maxcount,irqs); return (rv); } static int rk_pcie_release_msi(device_t pci, device_t child, int count, int *irqs) { phandle_t msi_parent; int rv; rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, NULL); if (rv != 0) return (rv); rv = intr_release_msi(pci, child, msi_parent, count, irqs); return (rv); } #endif static int rk_pcie_map_msi(device_t pci, device_t child, int irq, uint64_t *addr, uint32_t *data) { phandle_t msi_parent; int rv; rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, NULL); if (rv != 0) return (rv); rv = intr_map_msi(pci, child, msi_parent, irq, addr, data); return (rv); } #ifdef RK_PCIE_ENABLE_MSIX static int rk_pcie_alloc_msix(device_t pci, device_t child, int *irq) { phandle_t msi_parent; int rv; rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, NULL); if (rv != 0) return (rv); rv = intr_alloc_msix(pci, child, msi_parent, irq); return (rv); } static int rk_pcie_release_msix(device_t pci, device_t child, int irq) { phandle_t msi_parent; int rv; rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, NULL); if (rv != 0) return (rv); rv = intr_release_msix(pci, child, msi_parent, irq); return (rv); } #endif static int rk_pcie_get_id(device_t pci, device_t child, enum pci_id_type type, uintptr_t *id) { phandle_t node; int rv; uint32_t rid; uint16_t pci_rid; if (type != PCI_ID_MSI) return (pcib_get_id(pci, child, type, id)); node = ofw_bus_get_node(pci); pci_rid = pci_get_rid(child); rv = ofw_bus_msimap(node, pci_rid, NULL, &rid); if (rv != 0) return (rv); *id = rid; return (0); } static int rk_pcie_route_interrupt(device_t bus, device_t dev, int pin) { struct rk_pcie_softc *sc; u_int irq; sc = device_get_softc(bus); irq = intr_map_clone_irq(rman_get_start(sc->legacy_irq_res)); device_printf(bus, "route pin %d for device %d.%d to %u\n", pin, pci_get_slot(dev), pci_get_function(dev), irq); return (irq); } /*----------------------------------------------------------------------------- * * B U S / D E V I C E I N T E R F A C E */ static int rk_pcie_parse_fdt_resources(struct rk_pcie_softc *sc) { int i, rv; char buf[16]; /* Regulators. All are optional. */ rv = regulator_get_by_ofw_property(sc->dev, 0, "vpcie12v-supply", &sc->supply_12v); if (rv != 0 && rv != ENOENT) { device_printf(sc->dev,"Cannot get 'vpcie12' regulator\n"); return (ENXIO); } rv = regulator_get_by_ofw_property(sc->dev, 0, "vpcie3v3-supply", &sc->supply_3v3); if (rv != 0 && rv != ENOENT) { device_printf(sc->dev,"Cannot get 'vpcie3v3' regulator\n"); return (ENXIO); } rv = regulator_get_by_ofw_property(sc->dev, 0, "vpcie1v8-supply", &sc->supply_1v8); if (rv != 0 && rv != ENOENT) { device_printf(sc->dev,"Cannot get 'vpcie1v8' regulator\n"); return (ENXIO); } rv = regulator_get_by_ofw_property(sc->dev, 0, "vpcie0v9-supply", &sc->supply_0v9); if (rv != 0 && rv != ENOENT) { device_printf(sc->dev,"Cannot get 'vpcie0v9' regulator\n"); return (ENXIO); } /* Resets. */ rv = hwreset_get_by_ofw_name(sc->dev, 0, "core", &sc->hwreset_core); if (rv != 0) { device_printf(sc->dev, "Cannot get 'core' reset\n"); return (ENXIO); } rv = hwreset_get_by_ofw_name(sc->dev, 0, "mgmt", &sc->hwreset_mgmt); if (rv != 0) { device_printf(sc->dev, "Cannot get 'mgmt' reset\n"); return (ENXIO); } rv = hwreset_get_by_ofw_name(sc->dev, 0, "mgmt-sticky", &sc->hwreset_mgmt_sticky); if (rv != 0) { device_printf(sc->dev, "Cannot get 'mgmt-sticky' reset\n"); return (ENXIO); } rv = hwreset_get_by_ofw_name(sc->dev, 0, "pipe", &sc->hwreset_pipe); if (rv != 0) { device_printf(sc->dev, "Cannot get 'pipe' reset\n"); return (ENXIO); } rv = hwreset_get_by_ofw_name(sc->dev, 0, "pm", &sc->hwreset_pm); if (rv != 0) { device_printf(sc->dev, "Cannot get 'pm' reset\n"); return (ENXIO); } rv = hwreset_get_by_ofw_name(sc->dev, 0, "aclk", &sc->hwreset_aclk); if (rv != 0) { device_printf(sc->dev, "Cannot get 'aclk' reset\n"); return (ENXIO); } rv = hwreset_get_by_ofw_name(sc->dev, 0, "pclk", &sc->hwreset_pclk); if (rv != 0) { device_printf(sc->dev, "Cannot get 'pclk' reset\n"); return (ENXIO); } /* Clocks. */ rv = clk_get_by_ofw_name(sc->dev, 0, "aclk", &sc->clk_aclk); if (rv != 0) { device_printf(sc->dev, "Cannot get 'aclk' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "aclk-perf", &sc->clk_aclk_perf); if (rv != 0) { device_printf(sc->dev, "Cannot get 'aclk-perf' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "hclk", &sc->clk_hclk); if (rv != 0) { device_printf(sc->dev, "Cannot get 'hclk' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "pm", &sc->clk_pm); if (rv != 0) { device_printf(sc->dev, "Cannot get 'pm' clock\n"); return (ENXIO); } /* Phys. */ for (i = 0; i < MAX_LANES; i++ ) { sprintf (buf, "pcie-phy-%d", i); rv = phy_get_by_ofw_name(sc->dev, 0, buf, sc->phys + i); if (rv != 0) { device_printf(sc->dev, "Cannot get '%s' phy\n", buf); return (ENXIO); } } /* GPIO for PERST#. Optional */ rv = gpio_pin_get_by_ofw_property(sc->dev, sc->node, "ep-gpios", &sc->gpio_ep); if (rv != 0 && rv != ENOENT) { device_printf(sc->dev, "Cannot get 'ep-gpios' gpio\n"); return (ENXIO); } return (0); } static int rk_pcie_enable_resources(struct rk_pcie_softc *sc) { int i, rv; uint32_t val; /* Assert all resets */ rv = hwreset_assert(sc->hwreset_pclk); if (rv != 0) { device_printf(sc->dev, "Cannot assert 'pclk' reset\n"); return (rv); } rv = hwreset_assert(sc->hwreset_aclk); if (rv != 0) { device_printf(sc->dev, "Cannot assert 'aclk' reset\n"); return (rv); } rv = hwreset_assert(sc->hwreset_pm); if (rv != 0) { device_printf(sc->dev, "Cannot assert 'pm' reset\n"); return (rv); } rv = hwreset_assert(sc->hwreset_pipe); if (rv != 0) { device_printf(sc->dev, "Cannot assert 'pipe' reset\n"); return (rv); } rv = hwreset_assert(sc->hwreset_mgmt_sticky); if (rv != 0) { device_printf(sc->dev, "Cannot assert 'mgmt_sticky' reset\n"); return (rv); } rv = hwreset_assert(sc->hwreset_mgmt); if (rv != 0) { device_printf(sc->dev, "Cannot assert 'hmgmt' reset\n"); return (rv); } rv = hwreset_assert(sc->hwreset_core); if (rv != 0) { device_printf(sc->dev, "Cannot assert 'hcore' reset\n"); return (rv); } DELAY(10000); /* Enable clockls */ rv = clk_enable(sc->clk_aclk); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'aclk' clock\n"); return (rv); } rv = clk_enable(sc->clk_aclk_perf); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'aclk_perf' clock\n"); return (rv); } rv = clk_enable(sc->clk_hclk); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'hclk' clock\n"); return (rv); } rv = clk_enable(sc->clk_pm); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'pm' clock\n"); return (rv); } /* Power up regulators */ if (sc->supply_12v != NULL) { rv = regulator_enable(sc->supply_12v); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'vpcie12' regulator\n"); return (rv); } } if (sc->supply_3v3 != NULL) { rv = regulator_enable(sc->supply_3v3); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'vpcie3v3' regulator\n"); return (rv); } } if (sc->supply_1v8 != NULL) { rv = regulator_enable(sc->supply_1v8); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'vpcie1v8' regulator\n"); return (rv); } } if (sc->supply_0v9 != NULL) { rv = regulator_enable(sc->supply_0v9); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'vpcie1v8' regulator\n"); return (rv); } } DELAY(1000); /* Deassert basic resets*/ rv = hwreset_deassert(sc->hwreset_pm); if (rv != 0) { device_printf(sc->dev, "Cannot deassert 'pm' reset\n"); return (rv); } rv = hwreset_deassert(sc->hwreset_aclk); if (rv != 0) { device_printf(sc->dev, "Cannot deassert 'aclk' reset\n"); return (rv); } rv = hwreset_deassert(sc->hwreset_pclk); if (rv != 0) { device_printf(sc->dev, "Cannot deassert 'pclk' reset\n"); return (rv); } /* Set basic PCIe core mode (RC, lanes, gen1 or 2) */ val = STRAP_CONF_GEN_2 << 16 | (sc->link_is_gen2 ? STRAP_CONF_GEN_2: 0); val |= STRAP_CONF_MODE_RC << 16 | STRAP_CONF_MODE_RC; val |= STRAP_CONF_LANES(~0) << 16 | STRAP_CONF_LANES(sc->num_lanes); val |= STRAP_CONF_ARI_EN << 16 | STRAP_CONF_ARI_EN; val |= STRAP_CONF_CONF_EN << 16 | STRAP_CONF_CONF_EN; APB_WR4(sc, PCIE_CLIENT_BASIC_STRAP_CONF, val); for (i = 0; i < MAX_LANES; i++) { rv = phy_enable(sc->phys[i]); if (rv != 0) { device_printf(sc->dev, "Cannot enable phy %d\n", i); return (rv); } } /* Deassert rest of resets - order is important ! */ rv = hwreset_deassert(sc->hwreset_mgmt_sticky); if (rv != 0) { device_printf(sc->dev, "Cannot deassert 'mgmt_sticky' reset\n"); return (rv); } rv = hwreset_deassert(sc->hwreset_core); if (rv != 0) { device_printf(sc->dev, "Cannot deassert 'core' reset\n"); return (rv); } rv = hwreset_deassert(sc->hwreset_mgmt); if (rv != 0) { device_printf(sc->dev, "Cannot deassert 'mgmt' reset\n"); return (rv); } rv = hwreset_deassert(sc->hwreset_pipe); if (rv != 0) { device_printf(sc->dev, "Cannot deassert 'pipe' reset\n"); return (rv); } return (0); } static int rk_pcie_setup_hw(struct rk_pcie_softc *sc) { uint32_t val; int i, rv; /* Assert PERST# if defined */ if (sc->gpio_ep != NULL) { rv = gpio_pin_set_active(sc->gpio_ep, 0); if (rv != 0) { device_printf(sc->dev, "Cannot clear 'gpio-ep' gpio\n"); return (rv); } } rv = rk_pcie_enable_resources(sc); if (rv != 0) return(rv); /* Fix wrong default value for transmited FTS for L0s exit */ val = APB_RD4(sc, PCIE_CORE_CTRL1); val |= 0xFFFF << 8; APB_WR4(sc, PCIE_CORE_CTRL1, val); /* Setup PCIE Link Status & Control register */ val = APB_RD4(sc, PCIE_RC_CONFIG_LCS); val |= PCIEM_LINK_CTL_COMMON_CLOCK; APB_WR4(sc, PCIE_RC_CONFIG_LCS, val); val = APB_RD4(sc, PCIE_RC_CONFIG_LCS); val |= PCIEM_LINK_CTL_RCB; APB_WR4(sc, PCIE_RC_CONFIG_LCS, val); /* Enable training for GEN1 */ APB_WR4(sc, PCIE_CLIENT_BASIC_STRAP_CONF, STRAP_CONF_LINK_TRAIN_EN << 16 | STRAP_CONF_LINK_TRAIN_EN); /* Deassert PERST# if defined */ if (sc->gpio_ep != NULL) { rv = gpio_pin_set_active(sc->gpio_ep, 1); if (rv != 0) { device_printf(sc->dev, "Cannot set 'gpio-ep' gpio\n"); return (rv); } } /* Wait for link */ for (i = 500; i > 0; i--) { val = APB_RD4(sc, PCIE_CLIENT_BASIC_STATUS1); if (STATUS1_LINK_ST_GET(val) == STATUS1_LINK_ST_UP) break; DELAY(1000); } if (i <= 0) { device_printf(sc->dev, "Gen1 link training timeouted: 0x%08X.\n", val); return (0); } if (sc->link_is_gen2) { val = APB_RD4(sc, PCIE_RC_CONFIG_LCS); val |= PCIEM_LINK_CTL_RETRAIN_LINK; APB_WR4(sc, PCIE_RC_CONFIG_LCS, val); /* Wait for link */ for (i = 500; i > 0; i--) { val = APB_RD4(sc, PCIE_CLIENT_BASIC_STATUS1); if (STATUS1_LINK_ST_GET(val) == STATUS1_LINK_ST_UP) break; DELAY(1000); } if (i <= 0) device_printf(sc->dev, "Gen2 link training " "timeouted: 0x%08X.\n", val); } val = APB_RD4(sc, PCIE_CORE_CTRL0); val = CORE_CTRL_LANES_GET(val); if (bootverbose) device_printf(sc->dev, "Link width: %d\n", 1 << val); return (0); } static int rk_pcie_setup_sw(struct rk_pcie_softc *sc) { uint32_t val; int i, region; pcib_bridge_init(sc->dev); /* Setup config registers */ APB_WR4(sc, PCIE_CORE_CONFIG_VENDOR, 0x1D87); /* Rockchip vendor ID*/ PRIV_CFG_WR1(sc, PCIR_CLASS, PCIC_BRIDGE); PRIV_CFG_WR1(sc, PCIR_SUBCLASS, PCIS_BRIDGE_PCI); PRIV_CFG_WR1(sc, PCIR_PRIBUS_1, sc->root_bus); PRIV_CFG_WR1(sc, PCIR_SECBUS_1, sc->sub_bus); PRIV_CFG_WR1(sc, PCIR_SUBBUS_1, sc->bus_end); PRIV_CFG_WR2(sc, PCIR_COMMAND, PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_SERRESPEN); /* Don't advertise L1 power substate */ val = APB_RD4(sc, PCIE_RC_CONFIG_THP_CAP); val &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK; APB_WR4(sc, PCIE_RC_CONFIG_THP_CAP, val); /* Don't advertise L0s */ if (sc->no_l0s) { val = APB_RD4(sc, PCIE_RC_CONFIG_LINK_CAP); val &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK; APB_WR4(sc, PCIE_RC_CONFIG_LINK_CAP_L0S, val); } /*Adjust maximum payload size*/ val = APB_RD4(sc, PCIE_RC_CONFIG_DCSR); val &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK; val |= PCIE_RC_CONFIG_DCSR_MPS_128; APB_WR4(sc, PCIE_RC_CONFIG_DCSR, val); /* * Prepare IB ATU * map whole address range in 1:1 mappings */ rk_pcie_map_in_atu(sc, 2, 64 - 1, 0); /* Prepare OB ATU */ /* - region 0 (32 MB) is used for config access */ region = 0; rk_pcie_map_out_atu(sc, region++, ATU_TYPE_CFG0, 25 - 1, 0); /* - then map memory (by using 1MB regions */ for (i = 0; i < sc->mem_range.size / ATU_OB_REGION_SIZE; i++) { rk_pcie_map_out_atu(sc, region++, ATU_TYPE_MEM, ATU_OB_REGION_SHIFT - 1, sc->mem_range.pci + ATU_OB_REGION_SIZE * i); } /* - IO space is next, one region typically*/ for (i = 0; i < sc->io_range.size / ATU_OB_REGION_SIZE; i++) { rk_pcie_map_out_atu(sc, region++, ATU_TYPE_IO, ATU_OB_REGION_SHIFT - 1, sc->io_range.pci + ATU_OB_REGION_SIZE * i); } APB_WR4(sc, PCIE_CORE_RC_BAR_CONF, 0); return (0); } static int rk_pcie_sys_irq(void *arg) { struct rk_pcie_softc *sc; uint32_t irq; sc = (struct rk_pcie_softc *)arg; irq = APB_RD4(sc, PCIE_CLIENT_INT_STATUS); if (irq & PCIE_CLIENT_INT_LOCAL) { irq = APB_RD4(sc, PCIE_CORE_INT_STATUS); APB_WR4(sc, PCIE_CORE_INT_STATUS, irq); APB_WR4(sc, PCIE_CLIENT_INT_STATUS, PCIE_CLIENT_INT_LOCAL); device_printf(sc->dev, "'sys' interrupt received: 0x%04X\n", irq); } return (FILTER_HANDLED); } static int rk_pcie_client_irq(void *arg) { struct rk_pcie_softc *sc; uint32_t irq; sc = (struct rk_pcie_softc *)arg; irq = APB_RD4(sc, PCIE_CLIENT_INT_STATUS); /* Clear causes handled by other interrups */ irq &= ~PCIE_CLIENT_INT_LOCAL; irq &= ~PCIE_CLIENT_INT_LEGACY; APB_WR4(sc, PCIE_CLIENT_INT_STATUS, irq); device_printf(sc->dev, "'client' interrupt received: 0x%04X\n", irq); return (FILTER_HANDLED); } static int rk_pcie_legacy_irq(void *arg) { struct rk_pcie_softc *sc; uint32_t irq; sc = (struct rk_pcie_softc *)arg; irq = APB_RD4(sc, PCIE_CLIENT_INT_STATUS); irq &= PCIE_CLIENT_INT_LEGACY; APB_WR4(sc, PCIE_CLIENT_INT_STATUS, irq); /* all legacy interrupt are shared, do nothing */ return (FILTER_STRAY); } static bus_dma_tag_t rk_pcie_get_dma_tag(device_t dev, device_t child) { struct rk_pcie_softc *sc; sc = device_get_softc(dev); return (sc->dmat); } static int rk_pcie_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Rockchip PCIe controller"); return (BUS_PROBE_DEFAULT); } static int rk_pcie_attach(device_t dev) { struct resource_map_request req; struct resource_map map; struct rk_pcie_softc *sc; uint32_t val; int rv, rid, max_speed; sc = device_get_softc(dev); sc->dev = dev; sc->node = ofw_bus_get_node(dev); mtx_init(&sc->mtx, "rk_pcie_mtx", NULL, MTX_DEF); /* XXX Should not be this configurable ? */ sc->bus_start = 0; sc->bus_end = 0x1F; sc->root_bus = sc->bus_start; sc->sub_bus = 1; /* Read FDT properties */ rv = rk_pcie_parse_fdt_resources(sc); if (rv != 0) goto out; sc->coherent = OF_hasprop(sc->node, "dma-coherent"); sc->no_l0s = OF_hasprop(sc->node, "aspm-no-l0s"); rv = OF_getencprop(sc->node, "num-lanes", &sc->num_lanes, sizeof(sc->num_lanes)); if (rv != sizeof(sc->num_lanes)) sc->num_lanes = 1; if (sc->num_lanes != 1 && sc->num_lanes != 2 && sc->num_lanes != 4) { device_printf(dev, "invalid number of lanes: %d\n",sc->num_lanes); sc->num_lanes = 0; rv = ENXIO; goto out; } rv = OF_getencprop(sc->node, "max-link-speed", &max_speed, sizeof(max_speed)); if (rv != sizeof(max_speed) || max_speed != 1) sc->link_is_gen2 = true; else sc->link_is_gen2 = false; rv = ofw_bus_find_string_index(sc->node, "reg-names", "axi-base", &rid); if (rv != 0) { device_printf(dev, "Cannot get 'axi-base' memory\n"); rv = ENXIO; goto out; } sc->axi_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE | RF_UNMAPPED); if (sc->axi_mem_res == NULL) { device_printf(dev, "Cannot allocate 'axi-base' (rid: %d)\n", rid); rv = ENXIO; goto out; } resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE_NP; rv = bus_map_resource(dev, SYS_RES_MEMORY, sc->axi_mem_res, &req, &map); if (rv != 0) { device_printf(dev, "Cannot map 'axi-base' (rid: %d)\n", rid); goto out; } rman_set_mapping(sc->axi_mem_res, &map); rv = ofw_bus_find_string_index(sc->node, "reg-names", "apb-base", &rid); if (rv != 0) { device_printf(dev, "Cannot get 'apb-base' memory\n"); rv = ENXIO; goto out; } sc->apb_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->apb_mem_res == NULL) { device_printf(dev, "Cannot allocate 'apb-base' (rid: %d)\n", rid); rv = ENXIO; goto out; } rv = ofw_bus_find_string_index(sc->node, "interrupt-names", "client", &rid); if (rv != 0) { device_printf(dev, "Cannot get 'client' IRQ\n"); rv = ENXIO; goto out; } sc->client_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->client_irq_res == NULL) { device_printf(dev, "Cannot allocate 'client' IRQ resource\n"); rv = ENXIO; goto out; } rv = ofw_bus_find_string_index(sc->node, "interrupt-names", "legacy", &rid); if (rv != 0) { device_printf(dev, "Cannot get 'legacy' IRQ\n"); rv = ENXIO; goto out; } sc->legacy_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->legacy_irq_res == NULL) { device_printf(dev, "Cannot allocate 'legacy' IRQ resource\n"); rv = ENXIO; goto out; } rv = ofw_bus_find_string_index(sc->node, "interrupt-names", "sys", &rid); if (rv != 0) { device_printf(dev, "Cannot get 'sys' IRQ\n"); rv = ENXIO; goto out; } sc->sys_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->sys_irq_res == NULL) { device_printf(dev, "Cannot allocate 'sys' IRQ resource\n"); rv = ENXIO; goto out; } if (bootverbose) device_printf(dev, "Bus is%s cache-coherent\n", sc->coherent ? "" : " not"); rv = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->dmat); if (rv != 0) goto out; rv = ofw_pcib_init(dev); if (rv != 0) goto out; rv = rk_pcie_decode_ranges(sc, sc->ofw_pci.sc_range, sc->ofw_pci.sc_nrange); if (rv != 0) goto out_full; rv = rk_pcie_setup_hw(sc); if (rv != 0) goto out_full; rv = rk_pcie_setup_sw(sc); if (rv != 0) goto out_full; rv = bus_setup_intr(dev, sc->client_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, rk_pcie_client_irq, NULL, sc, &sc->client_irq_cookie); if (rv != 0) { device_printf(dev, "cannot setup client interrupt handler\n"); rv = ENXIO; goto out_full; } rv = bus_setup_intr(dev, sc->legacy_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, rk_pcie_legacy_irq, NULL, sc, &sc->legacy_irq_cookie); if (rv != 0) { device_printf(dev, "cannot setup client interrupt handler\n"); rv = ENXIO; goto out_full; } rv = bus_setup_intr(dev, sc->sys_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, rk_pcie_sys_irq, NULL, sc, &sc->sys_irq_cookie); if (rv != 0) { device_printf(dev, "cannot setup client interrupt handler\n"); rv = ENXIO; goto out_full; } /* Enable interrupts */ val = PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD | PCIE_CLIENT_INT_PHY; APB_WR4(sc, PCIE_CLIENT_INT_MASK, (val << 16) & ~val); val = PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | PCIE_CORE_INT_MMVC; APB_WR4(sc, PCIE_CORE_INT_MASK, ~(val)); val = APB_RD4(sc, PCIE_RC_CONFIG_LCS); val |= PCIEM_LINK_CTL_LBMIE | PCIEM_LINK_CTL_LABIE; APB_WR4(sc, PCIE_RC_CONFIG_LCS, val); DELAY(250000); device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); out_full: bus_teardown_intr(dev, sc->sys_irq_res, sc->sys_irq_cookie); bus_teardown_intr(dev, sc->legacy_irq_res, sc->legacy_irq_cookie); bus_teardown_intr(dev, sc->client_irq_res, sc->client_irq_cookie); ofw_pcib_fini(dev); out: bus_dma_tag_destroy(sc->dmat); bus_free_resource(dev, SYS_RES_IRQ, sc->sys_irq_res); bus_free_resource(dev, SYS_RES_IRQ, sc->legacy_irq_res); bus_free_resource(dev, SYS_RES_IRQ, sc->client_irq_res); bus_free_resource(dev, SYS_RES_MEMORY, sc->apb_mem_res); bus_free_resource(dev, SYS_RES_MEMORY, sc->axi_mem_res); /* GPIO */ gpio_pin_release(sc->gpio_ep); /* Phys */ for (int i = 0; i < MAX_LANES; i++) { phy_release(sc->phys[i]); } /* Clocks */ clk_release(sc->clk_aclk); clk_release(sc->clk_aclk_perf); clk_release(sc->clk_hclk); clk_release(sc->clk_pm); /* Resets */ hwreset_release(sc->hwreset_core); hwreset_release(sc->hwreset_mgmt); hwreset_release(sc->hwreset_pipe); hwreset_release(sc->hwreset_pm); hwreset_release(sc->hwreset_aclk); hwreset_release(sc->hwreset_pclk); /* Regulators */ regulator_release(sc->supply_12v); regulator_release(sc->supply_3v3); regulator_release(sc->supply_1v8); regulator_release(sc->supply_0v9); return (rv); } static device_method_t rk_pcie_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk_pcie_probe), DEVMETHOD(device_attach, rk_pcie_attach), /* Bus interface */ DEVMETHOD(bus_get_dma_tag, rk_pcie_get_dma_tag), /* pcib interface */ DEVMETHOD(pcib_read_config, rk_pcie_read_config), DEVMETHOD(pcib_write_config, rk_pcie_write_config), DEVMETHOD(pcib_route_interrupt, rk_pcie_route_interrupt), #ifdef RK_PCIE_ENABLE_MSI DEVMETHOD(pcib_alloc_msi, rk_pcie_alloc_msi), DEVMETHOD(pcib_release_msi, rk_pcie_release_msi), #endif #ifdef RK_PCIE_ENABLE_MSIX DEVMETHOD(pcib_alloc_msix, rk_pcie_alloc_msix), DEVMETHOD(pcib_release_msix, rk_pcie_release_msix), #endif DEVMETHOD(pcib_map_msi, rk_pcie_map_msi), DEVMETHOD(pcib_get_id, rk_pcie_get_id), /* OFW bus interface */ DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; DEFINE_CLASS_1(pcib, rk_pcie_driver, rk_pcie_methods, sizeof(struct rk_pcie_softc), ofw_pcib_driver); DRIVER_MODULE( rk_pcie, simplebus, rk_pcie_driver, NULL, NULL); diff --git a/sys/arm64/rockchip/rk_pcie_phy.c b/sys/arm64/rockchip/rk_pcie_phy.c index e12fa4970f5a..d4c8c3eb2587 100644 --- a/sys/arm64/rockchip/rk_pcie_phy.c +++ b/sys/arm64/rockchip/rk_pcie_phy.c @@ -1,365 +1,365 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Rockchip PHY TYPEC */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include "syscon_if.h" #define GRF_HIWORD_SHIFT 16 #define GRF_SOC_CON_5_PCIE 0xE214 #define CON_5_PCIE_IDLE_OFF(x) (1 <<(((x) & 0x3) + 3)) #define GRF_SOC_CON8 0xE220 #define GRF_SOC_STATUS1 0xE2A4 /* PHY config registers - write */ #define PHY_CFG_CLK_TEST 0x10 #define CLK_TEST_SEPE_RATE (1 << 3) #define PHY_CFG_CLK_SCC 0x12 #define CLK_SCC_PLL_100M (1 << 3) /* PHY config registers - read */ #define PHY_CFG_PLL_LOCK 0x10 #define CLK_PLL_LOCKED (1 << 1) #define PHY_CFG_SCC_LOCK 0x12 #define CLK_SCC_100M_GATE (1 << 2) #define STATUS1_PLL_LOCKED (1 << 9) static struct ofw_compat_data compat_data[] = { {"rockchip,rk3399-pcie-phy", 1}, {NULL, 0} }; struct rk_pcie_phy_softc { device_t dev; struct syscon *syscon; struct mtx mtx; clk_t clk_ref; hwreset_t hwreset_phy; int enable_count; }; #define PHY_LOCK(_sc) mtx_lock(&(_sc)->mtx) #define PHY_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define PHY_LOCK_INIT(_sc) mtx_init(&(_sc)->mtx, \ device_get_nameunit(_sc->dev), "rk_pcie_phyc", MTX_DEF) #define PHY_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx); #define PHY_ASSERT_LOCKED(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED); #define PHY_ASSERT_UNLOCKED(_sc) mtx_assert(&(_sc)->mtx, MA_NOTOWNED); #define RD4(sc, reg) SYSCON_READ_4((sc)->syscon, (reg)) #define WR4(sc, reg, mask, val) \ SYSCON_WRITE_4((sc)->syscon, (reg), ((mask) << GRF_HIWORD_SHIFT) | (val)) #define MAX_LANE 4 static void cfg_write(struct rk_pcie_phy_softc *sc, uint32_t reg, uint32_t data) { /* setup register address and data first */ WR4(sc, GRF_SOC_CON8, 0x7FF, (reg & 0x3F) << 1 | (data & 0x0F) << 7); /* dummy readback for sync */ RD4(sc, GRF_SOC_CON8); /* Do write pulse */ WR4(sc, GRF_SOC_CON8, 1, 1); RD4(sc, GRF_SOC_CON8); DELAY(10); WR4(sc, GRF_SOC_CON8, 1, 0); RD4(sc, GRF_SOC_CON8); DELAY(10); } static uint32_t cfg_read(struct rk_pcie_phy_softc *sc, uint32_t reg) { uint32_t val; WR4(sc, GRF_SOC_CON8, 0x3FF, reg << 1); RD4(sc, GRF_SOC_CON8); DELAY(10); val = RD4(sc, GRF_SOC_STATUS1); return ((val >> 8) & 0x0f); } static int rk_pcie_phy_up(struct rk_pcie_phy_softc *sc, int id) { uint32_t val; int i, rv; PHY_LOCK(sc); sc->enable_count++; if (sc->enable_count != 1) { PHY_UNLOCK(sc); return (0); } rv = hwreset_deassert(sc->hwreset_phy); if (rv != 0) { device_printf(sc->dev, "Cannot deassert 'phy' reset\n"); PHY_UNLOCK(sc); return (rv); } /* Un-idle all lanes */ for (i = 0; i < MAX_LANE; i++) WR4(sc, GRF_SOC_CON_5_PCIE, CON_5_PCIE_IDLE_OFF(i), 0); /* Wait for PLL lock */ for (i = 100; i > 0; i--) { val = cfg_read(sc, PHY_CFG_PLL_LOCK); if (val & CLK_PLL_LOCKED) break; DELAY(1000); } if (i <= 0) { device_printf(sc->dev, "PLL lock timeouted, 0x%02X\n", val); PHY_UNLOCK(sc); return (ETIMEDOUT); } /* Switch PLL to stable 5GHz, rate adjustment is done by divider */ cfg_write(sc, PHY_CFG_CLK_TEST, CLK_TEST_SEPE_RATE); /* Enable 100MHz output for PCIe ref clock */ cfg_write(sc, PHY_CFG_CLK_SCC, CLK_SCC_PLL_100M); /* Wait for ungating of ref clock */ for (i = 100; i > 0; i--) { val = cfg_read(sc, PHY_CFG_SCC_LOCK); if ((val & CLK_SCC_100M_GATE) == 0) break; DELAY(1000); } if (i <= 0) { device_printf(sc->dev, "PLL output enable timeouted\n"); PHY_UNLOCK(sc); return (ETIMEDOUT); } /* Wait for PLL relock (to 5GHz) */ for (i = 100; i > 0; i--) { val = cfg_read(sc, PHY_CFG_PLL_LOCK); if (val & CLK_PLL_LOCKED) break; DELAY(1000); } if (i <= 0) { device_printf(sc->dev, "PLL relock timeouted\n"); PHY_UNLOCK(sc); return (ETIMEDOUT); } PHY_UNLOCK(sc); return (rv); } static int rk_pcie_phy_down(struct rk_pcie_phy_softc *sc, int id) { int rv; PHY_LOCK(sc); rv = 0; if (sc->enable_count <= 0) panic("unpaired enable/disable"); sc->enable_count--; /* Idle given lane */ WR4(sc, GRF_SOC_CON_5_PCIE, CON_5_PCIE_IDLE_OFF(id), CON_5_PCIE_IDLE_OFF(id)); if (sc->enable_count == 0) { rv = hwreset_assert(sc->hwreset_phy); if (rv != 0) device_printf(sc->dev, "Cannot assert 'phy' reset\n"); } PHY_UNLOCK(sc); return (rv); } static int rk_pcie_phy_enable(struct phynode *phynode, bool enable) { struct rk_pcie_phy_softc *sc; device_t dev; intptr_t phy; int rv; dev = phynode_get_device(phynode); phy = phynode_get_id(phynode); sc = device_get_softc(dev); if (enable) rv = rk_pcie_phy_up(sc, (int)phy); else rv = rk_pcie_phy_down(sc, (int) phy); return (rv); } /* Phy class and methods. */ static phynode_method_t rk_pcie_phy_phynode_methods[] = { PHYNODEMETHOD(phynode_enable, rk_pcie_phy_enable), PHYNODEMETHOD_END }; DEFINE_CLASS_1( rk_pcie_phy_phynode, rk_pcie_phy_phynode_class, rk_pcie_phy_phynode_methods, 0, phynode_class); static int rk_pcie_phy_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Rockchip RK3399 PCIe PHY"); return (BUS_PROBE_DEFAULT); } static int rk_pcie_phy_attach(device_t dev) { struct rk_pcie_phy_softc *sc; struct phynode_init_def phy_init; struct phynode *phynode; phandle_t node; int i, rv; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); PHY_LOCK_INIT(sc); if (SYSCON_GET_HANDLE(sc->dev, &sc->syscon) != 0 || sc->syscon == NULL) { device_printf(dev, "cannot get syscon for device\n"); rv = ENXIO; goto fail; } rv = clk_set_assigned(dev, ofw_bus_get_node(dev)); if (rv != 0 && rv != ENOENT) { device_printf(dev, "clk_set_assigned failed: %d\n", rv); rv = ENXIO; goto fail; } rv = clk_get_by_ofw_name(sc->dev, 0, "refclk", &sc->clk_ref); if (rv != 0) { device_printf(sc->dev, "Cannot get 'refclk' clock\n"); rv = ENXIO; goto fail; } rv = hwreset_get_by_ofw_name(sc->dev, 0, "phy", &sc->hwreset_phy); if (rv != 0) { device_printf(sc->dev, "Cannot get 'phy' reset\n"); rv = ENXIO; goto fail; } rv = hwreset_assert(sc->hwreset_phy); if (rv != 0) { device_printf(sc->dev, "Cannot assert 'phy' reset\n"); rv = ENXIO; goto fail; } rv = clk_enable(sc->clk_ref); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'ref' clock\n"); rv = ENXIO; goto fail; } for (i = 0; i < MAX_LANE; i++) { phy_init.id = i; phy_init.ofw_node = node; phynode = phynode_create(dev, &rk_pcie_phy_phynode_class, &phy_init); if (phynode == NULL) { device_printf(dev, "Cannot create phy[%d]\n", i); rv = ENXIO; goto fail; } if (phynode_register(phynode) == NULL) { device_printf(dev, "Cannot register phy[%d]\n", i); rv = ENXIO; goto fail; } } return (0); fail: return (rv); } static device_method_t rk_pcie_phy_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk_pcie_phy_probe), DEVMETHOD(device_attach, rk_pcie_phy_attach), DEVMETHOD_END }; DEFINE_CLASS_0(rk_pcie_phy, rk_pcie_phy_driver, rk_pcie_phy_methods, sizeof(struct rk_pcie_phy_softc)); EARLY_DRIVER_MODULE(rk_pcie_phy, simplebus, rk_pcie_phy_driver, NULL, NULL, BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/arm64/rockchip/rk_tsadc.c b/sys/arm64/rockchip/rk_tsadc.c index 0d0d5d130b7f..8b99c384da48 100644 --- a/sys/arm64/rockchip/rk_tsadc.c +++ b/sys/arm64/rockchip/rk_tsadc.c @@ -1,880 +1,880 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Thermometer and thermal zones driver for RockChip SoCs. * Calibration data are taken from Linux, because this part of SoC * is undocumented in TRM. */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include "syscon_if.h" #include "rk_tsadc_if.h" /* Version of HW */ #define TSADC_V2 1 #define TSADC_V3 2 #define TSADC_V7 3 /* Global registers */ #define TSADC_USER_CON 0x000 #define TSADC_AUTO_CON 0x004 #define TSADC_AUTO_CON_POL_HI (1 << 8) #define TSADC_AUTO_SRC_EN(x) (1 << (4 + (x))) #define TSADC_AUTO_Q_SEL (1 << 1) /* V3 only */ #define TSADC_AUTO_CON_AUTO (1 << 0) #define TSADC_INT_EN 0x008 #define TSADC_INT_EN_2CRU_EN_SRC(x) (1 << (8 + (x))) #define TSADC_INT_EN_2GPIO_EN_SRC(x) (1 << (4 + (x))) #define TSADC_INT_PD 0x00c #define TSADC_DATA(x) (0x20 + (x) * 0x04) #define TSADC_COMP_INT(x) (0x30 + (x) * 0x04) #define TSADC_COMP_INT_SRC_EN(x) (1 << (0 + (x))) #define TSADC_COMP_SHUT(x) (0x40 + (x) * 0x04) #define TSADC_HIGHT_INT_DEBOUNCE 0x060 #define TSADC_HIGHT_TSHUT_DEBOUNCE 0x064 #define TSADC_AUTO_PERIOD 0x068 #define TSADC_AUTO_PERIOD_HT 0x06c #define TSADC_COMP0_LOW_INT 0x080 /* V3 only */ #define TSADC_COMP1_LOW_INT 0x084 /* V3 only */ /* V3 GFR registers */ #define GRF_SARADC_TESTBIT 0x0e644 #define GRF_SARADC_TESTBIT_ON (0x10001 << 2) #define GRF_TSADC_TESTBIT_L 0x0e648 #define GRF_TSADC_VCM_EN_L (0x10001 << 7) #define GRF_TSADC_TESTBIT_H 0x0e64c #define GRF_TSADC_VCM_EN_H (0x10001 << 7) #define GRF_TSADC_TESTBIT_H_ON (0x10001 << 2) /* V7 GRF register */ #define GRF_TSADC_CON 0x0600 #define GRF_TSADC_ANA_REG0 (0x10001 << 0) #define GRF_TSADC_ANA_REG1 (0x10001 << 1) #define GRF_TSADC_ANA_REG2 (0x10001 << 2) #define GRF_TSADC_TSEN (0x10001 << 8) #define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, (_r)) static struct sysctl_ctx_list tsadc_sysctl_ctx; struct tsensor { char *name; int id; int channel; }; struct rk_calib_entry { uint32_t raw; int temp; }; struct tsadc_calib_info { struct rk_calib_entry *table; int nentries; }; struct tsadc_conf { int version; int q_sel_ntc; int shutdown_temp; int shutdown_mode; int shutdown_pol; struct tsensor *tsensors; int ntsensors; struct tsadc_calib_info calib_info; }; struct tsadc_softc { device_t dev; struct resource *mem_res; struct resource *irq_res; void *irq_ih; clk_t tsadc_clk; clk_t apb_pclk_clk; hwreset_array_t hwreset; struct syscon *grf; struct tsadc_conf *conf; int shutdown_temp; int shutdown_mode; int shutdown_pol; int alarm_temp; }; static struct rk_calib_entry rk3288_calib_data[] = { {3800, -40000}, {3792, -35000}, {3783, -30000}, {3774, -25000}, {3765, -20000}, {3756, -15000}, {3747, -10000}, {3737, -5000}, {3728, 0}, {3718, 5000}, {3708, 10000}, {3698, 15000}, {3688, 20000}, {3678, 25000}, {3667, 30000}, {3656, 35000}, {3645, 40000}, {3634, 45000}, {3623, 50000}, {3611, 55000}, {3600, 60000}, {3588, 65000}, {3575, 70000}, {3563, 75000}, {3550, 80000}, {3537, 85000}, {3524, 90000}, {3510, 95000}, {3496, 100000}, {3482, 105000}, {3467, 110000}, {3452, 115000}, {3437, 120000}, {3421, 125000}, }; struct tsensor rk3288_tsensors[] = { { .channel = 0, .id = 2, .name = "reserved"}, { .channel = 1, .id = 0, .name = "CPU"}, { .channel = 2, .id = 1, .name = "GPU"}, }; struct tsadc_conf rk3288_tsadc_conf = { .version = TSADC_V2, .q_sel_ntc = 0, .shutdown_temp = 95000, .shutdown_mode = 1, /* GPIO */ .shutdown_pol = 0, /* Low */ .tsensors = rk3288_tsensors, .ntsensors = nitems(rk3288_tsensors), .calib_info = { .table = rk3288_calib_data, .nentries = nitems(rk3288_calib_data), } }; static struct rk_calib_entry rk3328_calib_data[] = { {296, -40000}, {304, -35000}, {313, -30000}, {331, -20000}, {340, -15000}, {349, -10000}, {359, -5000}, {368, 0}, {378, 5000}, {388, 10000}, {398, 15000}, {408, 20000}, {418, 25000}, {429, 30000}, {440, 35000}, {451, 40000}, {462, 45000}, {473, 50000}, {485, 55000}, {496, 60000}, {508, 65000}, {521, 70000}, {533, 75000}, {546, 80000}, {559, 85000}, {572, 90000}, {586, 95000}, {600, 100000}, {614, 105000}, {629, 110000}, {644, 115000}, {659, 120000}, {675, 125000}, }; static struct tsensor rk3328_tsensors[] = { { .channel = 0, .id = 0, .name = "CPU"}, }; static struct tsadc_conf rk3328_tsadc_conf = { .version = TSADC_V2, .q_sel_ntc = 1, .shutdown_temp = 95000, .shutdown_mode = 0, /* CRU */ .shutdown_pol = 0, /* Low */ .tsensors = rk3328_tsensors, .ntsensors = nitems(rk3328_tsensors), .calib_info = { .table = rk3328_calib_data, .nentries = nitems(rk3328_calib_data), } }; static struct rk_calib_entry rk3399_calib_data[] = { {402, -40000}, {410, -35000}, {419, -30000}, {427, -25000}, {436, -20000}, {444, -15000}, {453, -10000}, {461, -5000}, {470, 0}, {478, 5000}, {487, 10000}, {496, 15000}, {504, 20000}, {513, 25000}, {521, 30000}, {530, 35000}, {538, 40000}, {547, 45000}, {555, 50000}, {564, 55000}, {573, 60000}, {581, 65000}, {590, 70000}, {599, 75000}, {607, 80000}, {616, 85000}, {624, 90000}, {633, 95000}, {642, 100000}, {650, 105000}, {659, 110000}, {668, 115000}, {677, 120000}, {685, 125000}, }; static struct tsensor rk3399_tsensors[] = { { .channel = 0, .id = 0, .name = "CPU"}, { .channel = 1, .id = 1, .name = "GPU"}, }; static struct tsadc_conf rk3399_tsadc_conf = { .version = TSADC_V3, .q_sel_ntc = 1, .shutdown_temp = 95000, .shutdown_mode = 1, /* GPIO */ .shutdown_pol = 0, /* Low */ .tsensors = rk3399_tsensors, .ntsensors = nitems(rk3399_tsensors), .calib_info = { .table = rk3399_calib_data, .nentries = nitems(rk3399_calib_data), } }; static struct rk_calib_entry rk3568_calib_data[] = { {0, -40000}, {1584, -40000}, {1620, -35000}, {1652, -30000}, {1688, -25000}, {1720, -20000}, {1756, -15000}, {1788, -10000}, {1824, -5000}, {1856, 0}, {1892, 5000}, {1924, 10000}, {1956, 15000}, {1992, 20000}, {2024, 25000}, {2060, 30000}, {2092, 35000}, {2128, 40000}, {2160, 45000}, {2196, 50000}, {2228, 55000}, {2264, 60000}, {2300, 65000}, {2332, 70000}, {2368, 75000}, {2400, 80000}, {2436, 85000}, {2468, 90000}, {2500, 95000}, {2536, 100000}, {2572, 105000}, {2604, 110000}, {2636, 115000}, {2672, 120000}, {2704, 125000}, }; static struct tsensor rk3568_tsensors[] = { { .channel = 0, .id = 0, .name = "CPU"}, { .channel = 1, .id = 1, .name = "GPU"}, }; static struct tsadc_conf rk3568_tsadc_conf = { .version = TSADC_V7, .q_sel_ntc = 1, .shutdown_temp = 95000, .shutdown_mode = 1, /* GPIO */ .shutdown_pol = 0, /* Low */ .tsensors = rk3568_tsensors, .ntsensors = nitems(rk3568_tsensors), .calib_info = { .table = rk3568_calib_data, .nentries = nitems(rk3568_calib_data), } }; static struct ofw_compat_data compat_data[] = { {"rockchip,rk3288-tsadc", (uintptr_t)&rk3288_tsadc_conf}, {"rockchip,rk3328-tsadc", (uintptr_t)&rk3328_tsadc_conf}, {"rockchip,rk3399-tsadc", (uintptr_t)&rk3399_tsadc_conf}, {"rockchip,rk3568-tsadc", (uintptr_t)&rk3568_tsadc_conf}, {NULL, 0} }; static uint32_t tsadc_temp_to_raw(struct tsadc_softc *sc, int temp) { struct rk_calib_entry *tbl; int denom, ntbl, raw, i; tbl = sc->conf->calib_info.table; ntbl = sc->conf->calib_info.nentries; if (temp <= tbl[0].temp) return (tbl[0].raw); if (temp >= tbl[ntbl - 1].temp) return (tbl[ntbl - 1].raw); for (i = 1; i < (ntbl - 1); i++) { /* Exact match */ if (temp == tbl[i].temp) return (tbl[i].raw); if (temp < tbl[i].temp) break; } /* * Translated value is between i and i - 1 table entries. * Do linear interpolation for it. */ raw = (int)tbl[i - 1].raw - (int)tbl[i].raw; raw *= temp - tbl[i - 1].temp; denom = tbl[i - 1].temp - tbl[i].temp; raw = tbl[i - 1].raw + raw / denom; return (raw); } static int tsadc_raw_to_temp(struct tsadc_softc *sc, uint32_t raw) { struct rk_calib_entry *tbl; int denom, ntbl, temp, i; bool descending; tbl = sc->conf->calib_info.table; ntbl = sc->conf->calib_info.nentries; descending = tbl[0].raw > tbl[1].raw; if (descending) { /* Raw column is in descending order. */ if (raw >= tbl[0].raw) return (tbl[0].temp); if (raw <= tbl[ntbl - 1].raw) return (tbl[ntbl - 1].temp); for (i = ntbl - 2; i > 0; i--) { /* Exact match */ if (raw == tbl[i].raw) return (tbl[i].temp); if (raw < tbl[i].raw) break; } } else { /* Raw column is in ascending order. */ if (raw <= tbl[0].raw) return (tbl[0].temp); if (raw >= tbl[ntbl - 1].raw) return (tbl[ntbl - 1].temp); for (i = 1; i < (ntbl - 1); i++) { /* Exact match */ if (raw == tbl[i].raw) return (tbl[i].temp); if (raw < tbl[i].raw) break; } } /* * Translated value is between i and i - 1 table entries. * Do linear interpolation for it. */ temp = (int)tbl[i - 1].temp - (int)tbl[i].temp; temp *= raw - tbl[i - 1].raw; denom = tbl[i - 1].raw - tbl[i].raw; temp = tbl[i - 1].temp + temp / denom; return (temp); } static void tsadc_init_tsensor(struct tsadc_softc *sc, struct tsensor *sensor) { uint32_t val; /* Shutdown mode */ val = RD4(sc, TSADC_INT_EN); if (sc->shutdown_mode != 0) { /* Signal shutdown of GPIO pin */ val &= ~TSADC_INT_EN_2CRU_EN_SRC(sensor->channel); val |= TSADC_INT_EN_2GPIO_EN_SRC(sensor->channel); } else { val |= TSADC_INT_EN_2CRU_EN_SRC(sensor->channel); val &= ~TSADC_INT_EN_2GPIO_EN_SRC(sensor->channel); } WR4(sc, TSADC_INT_EN, val); /* Shutdown temperature */ val = tsadc_raw_to_temp(sc, sc->shutdown_temp); WR4(sc, TSADC_COMP_SHUT(sensor->channel), val); val = RD4(sc, TSADC_AUTO_CON); val |= TSADC_AUTO_SRC_EN(sensor->channel); WR4(sc, TSADC_AUTO_CON, val); /* Alarm temperature */ val = tsadc_temp_to_raw(sc, sc->alarm_temp); WR4(sc, TSADC_COMP_INT(sensor->channel), val); val = RD4(sc, TSADC_INT_EN); val |= TSADC_COMP_INT_SRC_EN(sensor->channel); WR4(sc, TSADC_INT_EN, val); } static void tsadc_init(struct tsadc_softc *sc) { uint32_t val; /* Common part */ val = 0; /* XXX Is this right? */ if (sc->shutdown_pol != 0) val |= TSADC_AUTO_CON_POL_HI; else val &= ~TSADC_AUTO_CON_POL_HI; if (sc->conf->q_sel_ntc) val |= TSADC_AUTO_Q_SEL; WR4(sc, TSADC_AUTO_CON, val); switch (sc->conf->version) { case TSADC_V2: /* V2 init */ WR4(sc, TSADC_AUTO_PERIOD, 250); /* 250 ms */ WR4(sc, TSADC_AUTO_PERIOD_HT, 50); /* 50 ms */ WR4(sc, TSADC_HIGHT_INT_DEBOUNCE, 4); WR4(sc, TSADC_HIGHT_TSHUT_DEBOUNCE, 4); break; case TSADC_V3: /* V3 init */ if (sc->grf == NULL) { /* Errata: adjust interleave to working value */ WR4(sc, TSADC_USER_CON, 13 << 6); /* 13 clks */ } else { SYSCON_WRITE_4(sc->grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_VCM_EN_L); SYSCON_WRITE_4(sc->grf, GRF_TSADC_TESTBIT_H, GRF_TSADC_VCM_EN_H); DELAY(30); /* 15 usec min */ SYSCON_WRITE_4(sc->grf, GRF_SARADC_TESTBIT, GRF_SARADC_TESTBIT_ON); SYSCON_WRITE_4(sc->grf, GRF_TSADC_TESTBIT_H, GRF_TSADC_TESTBIT_H_ON); DELAY(180); /* 90 usec min */ } WR4(sc, TSADC_AUTO_PERIOD, 1875); /* 2.5 ms */ WR4(sc, TSADC_AUTO_PERIOD_HT, 1875); /* 2.5 ms */ WR4(sc, TSADC_HIGHT_INT_DEBOUNCE, 4); WR4(sc, TSADC_HIGHT_TSHUT_DEBOUNCE, 4); break; case TSADC_V7: /* V7 init */ WR4(sc, TSADC_USER_CON, 0xfc0); /* 97us, at least 90us */ WR4(sc, TSADC_AUTO_PERIOD, 1622); /* 2.5ms */ WR4(sc, TSADC_HIGHT_INT_DEBOUNCE, 4); WR4(sc, TSADC_AUTO_PERIOD_HT, 1622); /* 2.5ms */ WR4(sc, TSADC_HIGHT_TSHUT_DEBOUNCE, 4); if (sc->grf) { SYSCON_WRITE_4(sc->grf, GRF_TSADC_CON, GRF_TSADC_TSEN); DELAY(15); /* 10 usec min */ SYSCON_WRITE_4(sc->grf, GRF_TSADC_CON, GRF_TSADC_ANA_REG0); SYSCON_WRITE_4(sc->grf, GRF_TSADC_CON, GRF_TSADC_ANA_REG1); SYSCON_WRITE_4(sc->grf, GRF_TSADC_CON, GRF_TSADC_ANA_REG2); DELAY(100); /* 90 usec min */ } break; } } static int tsadc_read_temp(struct tsadc_softc *sc, struct tsensor *sensor, int *temp) { uint32_t val; val = RD4(sc, TSADC_DATA(sensor->channel)); *temp = tsadc_raw_to_temp(sc, val); #ifdef DEBUG device_printf(sc->dev, "%s: Sensor(id: %d, ch: %d), val: %d temp: %d\n", __func__, sensor->id, sensor->channel, val, *temp); device_printf(sc->dev, "%s: user_con=0x%08x auto_con=0x%08x " "comp_int=0x%08x comp_shut=0x%08x\n", __func__, RD4(sc, TSADC_USER_CON), RD4(sc, TSADC_AUTO_CON), RD4(sc, TSADC_COMP_INT(sensor->channel)), RD4(sc, TSADC_COMP_SHUT(sensor->channel))); #endif return (0); } static int tsadc_get_temp(device_t dev, device_t cdev, uintptr_t id, int *val) { struct tsadc_softc *sc; int i, rv; sc = device_get_softc(dev); if (id >= sc->conf->ntsensors) return (ERANGE); for (i = 0; i < sc->conf->ntsensors; i++) { if (sc->conf->tsensors->id == id) { rv =tsadc_read_temp(sc, sc->conf->tsensors + id, val); return (rv); } } return (ERANGE); } static int tsadc_sysctl_temperature(SYSCTL_HANDLER_ARGS) { struct tsadc_softc *sc; int val; int rv; int id; /* Write request */ if (req->newptr != NULL) return (EINVAL); sc = arg1; id = arg2; if (id >= sc->conf->ntsensors) return (ERANGE); rv = tsadc_read_temp(sc, sc->conf->tsensors + id, &val); if (rv != 0) return (rv); val = val / 100; val += 2731; rv = sysctl_handle_int(oidp, &val, 0, req); return (rv); } static int tsadc_init_sysctl(struct tsadc_softc *sc) { int i; struct sysctl_oid *oid, *tmp; sysctl_ctx_init(&tsadc_sysctl_ctx); /* create node for hw.temp */ oid = SYSCTL_ADD_NODE(&tsadc_sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, "temperature", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); if (oid == NULL) return (ENXIO); /* Add sensors */ for (i = sc->conf->ntsensors - 1; i >= 0; i--) { tmp = SYSCTL_ADD_PROC(&tsadc_sysctl_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, sc->conf->tsensors[i].name, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, i, tsadc_sysctl_temperature, "IK", "SoC Temperature"); if (tmp == NULL) return (ENXIO); } return (0); } static int tsadc_intr(void *arg) { struct tsadc_softc *sc; uint32_t val; sc = (struct tsadc_softc *)arg; val = RD4(sc, TSADC_INT_PD); WR4(sc, TSADC_INT_PD, val); /* XXX Handle shutdown and alarm interrupts. */ if (val & 0x00F0) { device_printf(sc->dev, "Alarm: device temperature " "is above of shutdown level.\n"); } else if (val & 0x000F) { device_printf(sc->dev, "Alarm: device temperature " "is above of alarm level.\n"); } return (FILTER_HANDLED); } static int tsadc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "RockChip temperature sensors"); return (BUS_PROBE_DEFAULT); } static int tsadc_attach(device_t dev) { struct tsadc_softc *sc; phandle_t node; uint32_t val; int i, rid, rv; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(sc->dev); sc->conf = (struct tsadc_conf *) ofw_bus_search_compatible(dev, compat_data)->ocd_data; sc->alarm_temp = 90000; rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot allocate memory resources\n"); goto fail; } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate IRQ resources\n"); goto fail; } if ((bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, tsadc_intr, NULL, sc, &sc->irq_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); goto fail; } /* FDT resources */ rv = hwreset_array_get_ofw(dev, 0, &sc->hwreset); if (rv != 0) { device_printf(dev, "Cannot get resets\n"); goto fail; } rv = clk_get_by_ofw_name(dev, 0, "tsadc", &sc->tsadc_clk); if (rv != 0) { device_printf(dev, "Cannot get 'tsadc' clock: %d\n", rv); goto fail; } rv = clk_get_by_ofw_name(dev, 0, "apb_pclk", &sc->apb_pclk_clk); if (rv != 0) { device_printf(dev, "Cannot get 'apb_pclk' clock: %d\n", rv); goto fail; } /* grf is optional */ rv = syscon_get_by_ofw_property(dev, node, "rockchip,grf", &sc->grf); if (rv != 0 && rv != ENOENT) { device_printf(dev, "Cannot get 'grf' syscon: %d\n", rv); goto fail; } rv = OF_getencprop(node, "rockchip,hw-tshut-temp", &sc->shutdown_temp, sizeof(sc->shutdown_temp)); if (rv <= 0) sc->shutdown_temp = sc->conf->shutdown_temp; rv = OF_getencprop(node, "rockchip,hw-tshut-mode", &sc->shutdown_mode, sizeof(sc->shutdown_mode)); if (rv <= 0) sc->shutdown_mode = sc->conf->shutdown_mode; rv = OF_getencprop(node, "rockchip,hw-tshut-polarity", &sc->shutdown_pol, sizeof(sc->shutdown_pol)); if (rv <= 0) sc->shutdown_pol = sc->conf->shutdown_pol; /* Wakeup controller */ rv = hwreset_array_assert(sc->hwreset); if (rv != 0) { device_printf(dev, "Cannot assert reset\n"); goto fail; } /* Set the assigned clocks parent and freq */ rv = clk_set_assigned(sc->dev, node); if (rv != 0 && rv != ENOENT) { device_printf(dev, "clk_set_assigned failed\n"); goto fail; } rv = clk_enable(sc->tsadc_clk); if (rv != 0) { device_printf(dev, "Cannot enable 'tsadc_clk' clock: %d\n", rv); goto fail; } rv = clk_enable(sc->apb_pclk_clk); if (rv != 0) { device_printf(dev, "Cannot enable 'apb_pclk' clock: %d\n", rv); goto fail; } rv = hwreset_array_deassert(sc->hwreset); if (rv != 0) { device_printf(dev, "Cannot deassert reset\n"); goto fail; } tsadc_init(sc); for (i = 0; i < sc->conf->ntsensors; i++) tsadc_init_tsensor(sc, sc->conf->tsensors + i); /* Enable auto mode */ val = RD4(sc, TSADC_AUTO_CON); val |= TSADC_AUTO_CON_AUTO; WR4(sc, TSADC_AUTO_CON, val); rv = tsadc_init_sysctl(sc); if (rv != 0) { device_printf(sc->dev, "Cannot initialize sysctls\n"); goto fail_sysctl; } OF_device_register_xref(OF_xref_from_node(node), dev); return (bus_generic_attach(dev)); fail_sysctl: sysctl_ctx_free(&tsadc_sysctl_ctx); fail: if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); if (sc->tsadc_clk != NULL) clk_release(sc->tsadc_clk); if (sc->apb_pclk_clk != NULL) clk_release(sc->apb_pclk_clk); if (sc->hwreset != NULL) hwreset_array_release(sc->hwreset); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (ENXIO); } static int tsadc_detach(device_t dev) { struct tsadc_softc *sc; sc = device_get_softc(dev); if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); sysctl_ctx_free(&tsadc_sysctl_ctx); if (sc->tsadc_clk != NULL) clk_release(sc->tsadc_clk); if (sc->apb_pclk_clk != NULL) clk_release(sc->apb_pclk_clk); if (sc->hwreset != NULL) hwreset_array_release(sc->hwreset); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (ENXIO); } static device_method_t rk_tsadc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tsadc_probe), DEVMETHOD(device_attach, tsadc_attach), DEVMETHOD(device_detach, tsadc_detach), /* TSADC interface */ DEVMETHOD(rk_tsadc_get_temperature, tsadc_get_temp), DEVMETHOD_END }; static DEFINE_CLASS_0(rk_tsadc, rk_tsadc_driver, rk_tsadc_methods, sizeof(struct tsadc_softc)); EARLY_DRIVER_MODULE(rk_tsadc, simplebus, rk_tsadc_driver, NULL, NULL, BUS_PASS_TIMER + BUS_PASS_ORDER_LAST); diff --git a/sys/arm64/rockchip/rk_typec_phy.c b/sys/arm64/rockchip/rk_typec_phy.c index 0998199d8c4e..6e75394377e4 100644 --- a/sys/arm64/rockchip/rk_typec_phy.c +++ b/sys/arm64/rockchip/rk_typec_phy.c @@ -1,470 +1,470 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Rockchip PHY TYPEC */ #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "syscon_if.h" #define GRF_USB3OTG_BASE(x) (0x2430 + (0x10 * x)) #define GRF_USB3OTG_CON0(x) (GRF_USB3OTG_BASE(x) + 0x0) #define GRF_USB3OTG_CON1(x) (GRF_USB3OTG_BASE(x) + 0x4) #define USB3OTG_CON1_U3_DIS (1 << 0) #define GRF_USB3PHY_BASE(x) (0x0e580 + (0xc * (x))) #define GRF_USB3PHY_CON0(x) (GRF_USB3PHY_BASE(x) + 0x0) #define USB3PHY_CON0_USB2_ONLY (1 << 3) #define GRF_USB3PHY_CON1(x) (GRF_USB3PHY_BASE(x) + 0x4) #define GRF_USB3PHY_CON2(x) (GRF_USB3PHY_BASE(x) + 0x8) #define GRF_USB3PHY_STATUS0 0x0e5c0 #define GRF_USB3PHY_STATUS1 0x0e5c4 #define CMN_PLL0_VCOCAL_INIT (0x84 << 2) #define CMN_PLL0_VCOCAL_ITER (0x85 << 2) #define CMN_PLL0_INTDIV (0x94 << 2) #define CMN_PLL0_FRACDIV (0x95 << 2) #define CMN_PLL0_HIGH_THR (0x96 << 2) #define CMN_PLL0_DSM_DIAG (0x97 << 2) #define CMN_PLL0_SS_CTRL1 (0x98 << 2) #define CMN_PLL0_SS_CTRL2 (0x99 << 2) #define CMN_DIAG_PLL0_FBH_OVRD (0x1c0 << 2) #define CMN_DIAG_PLL0_FBL_OVRD (0x1c1 << 2) #define CMN_DIAG_PLL0_OVRD (0x1c2 << 2) #define CMN_DIAG_PLL0_V2I_TUNE (0x1c5 << 2) #define CMN_DIAG_PLL0_CP_TUNE (0x1c6 << 2) #define CMN_DIAG_PLL0_LF_PROG (0x1c7 << 2) #define CMN_DIAG_HSCLK_SEL (0x1e0 << 2) #define CMN_DIAG_HSCLK_SEL_PLL_CONFIG 0x30 #define CMN_DIAG_HSCLK_SEL_PLL_MASK 0x33 #define TX_TXCC_MGNFS_MULT_000(lane) ((0x4050 | ((lane) << 9)) << 2) #define XCVR_DIAG_BIDI_CTRL(lane) ((0x40e8 | ((lane) << 9)) << 2) #define XCVR_DIAG_LANE_FCM_EN_MGN(lane) ((0x40f2 | ((lane) << 9)) << 2) #define TX_PSC_A0(lane) ((0x4100 | ((lane) << 9)) << 2) #define TX_PSC_A1(lane) ((0x4101 | ((lane) << 9)) << 2) #define TX_PSC_A2(lane) ((0x4102 | ((lane) << 9)) << 2) #define TX_PSC_A3(lane) ((0x4103 | ((lane) << 9)) << 2) #define TX_RCVDET_EN_TMR(lane) ((0x4122 | ((lane) << 9)) << 2) #define TX_RCVDET_ST_TMR(lane) ((0x4123 | ((lane) << 9)) << 2) #define RX_PSC_A0(lane) ((0x8000 | ((lane) << 9)) << 2) #define RX_PSC_A1(lane) ((0x8001 | ((lane) << 9)) << 2) #define RX_PSC_A2(lane) ((0x8002 | ((lane) << 9)) << 2) #define RX_PSC_A3(lane) ((0x8003 | ((lane) << 9)) << 2) #define RX_PSC_CAL(lane) ((0x8006 | ((lane) << 9)) << 2) #define RX_PSC_RDY(lane) ((0x8007 | ((lane) << 9)) << 2) #define RX_SIGDET_HL_FILT_TMR(lane) ((0x8090 | ((lane) << 9)) << 2) #define RX_REE_CTRL_DATA_MASK(lane) ((0x81bb | ((lane) << 9)) << 2) #define RX_DIAG_SIGDET_TUNE(lane) ((0x81dc | ((lane) << 9)) << 2) #define PMA_LANE_CFG (0xc000 << 2) #define PIN_ASSIGN_D_F 0x5100 #define DP_MODE_CTL (0xc008 << 2) #define DP_MODE_ENTER_A2 0xc104 #define PMA_CMN_CTRL1 (0xc800 << 2) #define PMA_CMN_CTRL1_READY (1 << 0) static struct ofw_compat_data compat_data[] = { { "rockchip,rk3399-typec-phy", 1 }, { NULL, 0 } }; static struct resource_spec rk_typec_phy_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; struct rk_typec_phy_softc { device_t dev; struct resource *res; struct syscon *grf; clk_t tcpdcore; clk_t tcpdphy_ref; hwreset_t rst_uphy; hwreset_t rst_pipe; hwreset_t rst_tcphy; int mode; int phy_ctrl_id; }; #define RK_TYPEC_PHY_READ(sc, reg) bus_read_4(sc->res, (reg)) #define RK_TYPEC_PHY_WRITE(sc, reg, val) bus_write_4(sc->res, (reg), (val)) /* Phy class and methods. */ static int rk_typec_phy_enable(struct phynode *phynode, bool enable); static int rk_typec_phy_get_mode(struct phynode *phy, int *mode); static int rk_typec_phy_set_mode(struct phynode *phy, int mode); static phynode_method_t rk_typec_phy_phynode_methods[] = { PHYNODEMETHOD(phynode_enable, rk_typec_phy_enable), PHYNODEMETHOD(phynode_usb_get_mode, rk_typec_phy_get_mode), PHYNODEMETHOD(phynode_usb_set_mode, rk_typec_phy_set_mode), PHYNODEMETHOD_END }; DEFINE_CLASS_1(rk_typec_phy_phynode, rk_typec_phy_phynode_class, rk_typec_phy_phynode_methods, sizeof(struct phynode_usb_sc), phynode_usb_class); enum RK3399_USBPHY { RK3399_TYPEC_PHY_DP = 0, RK3399_TYPEC_PHY_USB3, }; static void rk_typec_phy_set_usb2_only(struct rk_typec_phy_softc *sc, bool usb2only) { uint32_t reg; /* Disable usb3tousb2 only */ reg = SYSCON_READ_4(sc->grf, GRF_USB3PHY_CON0(sc->phy_ctrl_id)); if (usb2only) reg |= USB3PHY_CON0_USB2_ONLY; else reg &= ~USB3PHY_CON0_USB2_ONLY; /* Write Mask */ reg |= (USB3PHY_CON0_USB2_ONLY) << 16; SYSCON_WRITE_4(sc->grf, GRF_USB3PHY_CON0(sc->phy_ctrl_id), reg); /* Enable the USB3 Super Speed port */ reg = SYSCON_READ_4(sc->grf, GRF_USB3OTG_CON1(sc->phy_ctrl_id)); if (usb2only) reg |= USB3OTG_CON1_U3_DIS; else reg &= ~USB3OTG_CON1_U3_DIS; /* Write Mask */ reg |= (USB3OTG_CON1_U3_DIS) << 16; SYSCON_WRITE_4(sc->grf, GRF_USB3OTG_CON1(sc->phy_ctrl_id), reg); } static int rk_typec_phy_enable(struct phynode *phynode, bool enable) { struct rk_typec_phy_softc *sc; device_t dev; intptr_t phy; uint32_t reg; int err, retry; dev = phynode_get_device(phynode); phy = phynode_get_id(phynode); sc = device_get_softc(dev); if (phy != RK3399_TYPEC_PHY_USB3) return (ERANGE); rk_typec_phy_set_usb2_only(sc, false); err = clk_enable(sc->tcpdcore); if (err != 0) { device_printf(dev, "Could not enable clock %s\n", clk_get_name(sc->tcpdcore)); return (ENXIO); } err = clk_enable(sc->tcpdphy_ref); if (err != 0) { device_printf(dev, "Could not enable clock %s\n", clk_get_name(sc->tcpdphy_ref)); clk_disable(sc->tcpdcore); return (ENXIO); } hwreset_deassert(sc->rst_tcphy); /* 24M configuration, magic values from rockchip */ RK_TYPEC_PHY_WRITE(sc, PMA_CMN_CTRL1, 0x830); for (int i = 0; i < 4; i++) { RK_TYPEC_PHY_WRITE(sc, XCVR_DIAG_LANE_FCM_EN_MGN(i), 0x90); RK_TYPEC_PHY_WRITE(sc, TX_RCVDET_EN_TMR(i), 0x960); RK_TYPEC_PHY_WRITE(sc, TX_RCVDET_ST_TMR(i), 0x30); } reg = RK_TYPEC_PHY_READ(sc, CMN_DIAG_HSCLK_SEL); reg &= ~CMN_DIAG_HSCLK_SEL_PLL_MASK; reg |= CMN_DIAG_HSCLK_SEL_PLL_CONFIG; RK_TYPEC_PHY_WRITE(sc, CMN_DIAG_HSCLK_SEL, reg); /* PLL configuration, magic values from rockchip */ RK_TYPEC_PHY_WRITE(sc, CMN_PLL0_VCOCAL_INIT, 0xf0); RK_TYPEC_PHY_WRITE(sc, CMN_PLL0_VCOCAL_ITER, 0x18); RK_TYPEC_PHY_WRITE(sc, CMN_PLL0_INTDIV, 0xd0); RK_TYPEC_PHY_WRITE(sc, CMN_PLL0_FRACDIV, 0x4a4a); RK_TYPEC_PHY_WRITE(sc, CMN_PLL0_HIGH_THR, 0x34); RK_TYPEC_PHY_WRITE(sc, CMN_PLL0_SS_CTRL1, 0x1ee); RK_TYPEC_PHY_WRITE(sc, CMN_PLL0_SS_CTRL2, 0x7f03); RK_TYPEC_PHY_WRITE(sc, CMN_PLL0_DSM_DIAG, 0x20); RK_TYPEC_PHY_WRITE(sc, CMN_DIAG_PLL0_OVRD, 0); RK_TYPEC_PHY_WRITE(sc, CMN_DIAG_PLL0_FBH_OVRD, 0); RK_TYPEC_PHY_WRITE(sc, CMN_DIAG_PLL0_FBL_OVRD, 0); RK_TYPEC_PHY_WRITE(sc, CMN_DIAG_PLL0_V2I_TUNE, 0x7); RK_TYPEC_PHY_WRITE(sc, CMN_DIAG_PLL0_CP_TUNE, 0x45); RK_TYPEC_PHY_WRITE(sc, CMN_DIAG_PLL0_LF_PROG, 0x8); /* Configure the TX and RX line, magic values from rockchip */ RK_TYPEC_PHY_WRITE(sc, TX_PSC_A0(0), 0x7799); RK_TYPEC_PHY_WRITE(sc, TX_PSC_A1(0), 0x7798); RK_TYPEC_PHY_WRITE(sc, TX_PSC_A2(0), 0x5098); RK_TYPEC_PHY_WRITE(sc, TX_PSC_A3(0), 0x5098); RK_TYPEC_PHY_WRITE(sc, TX_TXCC_MGNFS_MULT_000(0), 0x0); RK_TYPEC_PHY_WRITE(sc, XCVR_DIAG_BIDI_CTRL(0), 0xbf); RK_TYPEC_PHY_WRITE(sc, RX_PSC_A0(1), 0xa6fd); RK_TYPEC_PHY_WRITE(sc, RX_PSC_A1(1), 0xa6fd); RK_TYPEC_PHY_WRITE(sc, RX_PSC_A2(1), 0xa410); RK_TYPEC_PHY_WRITE(sc, RX_PSC_A3(1), 0x2410); RK_TYPEC_PHY_WRITE(sc, RX_PSC_CAL(1), 0x23ff); RK_TYPEC_PHY_WRITE(sc, RX_SIGDET_HL_FILT_TMR(1), 0x13); RK_TYPEC_PHY_WRITE(sc, RX_REE_CTRL_DATA_MASK(1), 0x03e7); RK_TYPEC_PHY_WRITE(sc, RX_DIAG_SIGDET_TUNE(1), 0x1004); RK_TYPEC_PHY_WRITE(sc, RX_PSC_RDY(1), 0x2010); RK_TYPEC_PHY_WRITE(sc, XCVR_DIAG_BIDI_CTRL(1), 0xfb); RK_TYPEC_PHY_WRITE(sc, PMA_LANE_CFG, PIN_ASSIGN_D_F); RK_TYPEC_PHY_WRITE(sc, DP_MODE_CTL, DP_MODE_ENTER_A2); hwreset_deassert(sc->rst_uphy); for (retry = 10000; retry > 0; retry--) { reg = RK_TYPEC_PHY_READ(sc, PMA_CMN_CTRL1); if (reg & PMA_CMN_CTRL1_READY) break; DELAY(10); } if (retry == 0) { device_printf(sc->dev, "Timeout waiting for PMA\n"); return (ENXIO); } hwreset_deassert(sc->rst_pipe); return (0); } static int rk_typec_phy_get_mode(struct phynode *phynode, int *mode) { struct rk_typec_phy_softc *sc; intptr_t phy; device_t dev; dev = phynode_get_device(phynode); phy = phynode_get_id(phynode); sc = device_get_softc(dev); if (phy != RK3399_TYPEC_PHY_USB3) return (ERANGE); *mode = sc->mode; return (0); } static int rk_typec_phy_set_mode(struct phynode *phynode, int mode) { struct rk_typec_phy_softc *sc; intptr_t phy; device_t dev; dev = phynode_get_device(phynode); phy = phynode_get_id(phynode); sc = device_get_softc(dev); if (phy != RK3399_TYPEC_PHY_USB3) return (ERANGE); sc->mode = mode; return (0); } static int rk_typec_phy_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Rockchip RK3399 PHY TYPEC"); return (BUS_PROBE_DEFAULT); } static int rk_typec_phy_attach(device_t dev) { struct rk_typec_phy_softc *sc; struct phynode_init_def phy_init; struct phynode *phynode; phandle_t node, usb3; phandle_t reg_prop[4]; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); /* * Find out which phy we are. * There is not property for this so we need to know the * address to use the correct GRF registers. */ if (OF_getencprop(node, "reg", reg_prop, sizeof(reg_prop)) <= 0) { device_printf(dev, "Cannot guess phy controller id\n"); return (ENXIO); } switch (reg_prop[1]) { case 0xff7c0000: sc->phy_ctrl_id = 0; break; case 0xff800000: sc->phy_ctrl_id = 1; break; default: device_printf(dev, "Unknown address %x for typec-phy\n", reg_prop[1]); return (ENXIO); } if (bus_alloc_resources(dev, rk_typec_phy_spec, &sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); goto fail; } if (syscon_get_by_ofw_property(dev, node, "rockchip,grf", &sc->grf) != 0) { device_printf(dev, "Cannot get syscon handle\n"); goto fail; } if (clk_get_by_ofw_name(dev, 0, "tcpdcore", &sc->tcpdcore) != 0) { device_printf(dev, "Cannot get tcpdcore clock\n"); goto fail; } if (clk_get_by_ofw_name(dev, 0, "tcpdphy-ref", &sc->tcpdphy_ref) != 0) { device_printf(dev, "Cannot get tcpdphy-ref clock\n"); goto fail; } if (hwreset_get_by_ofw_name(dev, 0, "uphy", &sc->rst_uphy) != 0) { device_printf(dev, "Cannot get uphy reset\n"); goto fail; } if (hwreset_get_by_ofw_name(dev, 0, "uphy-pipe", &sc->rst_pipe) != 0) { device_printf(dev, "Cannot get uphy-pipe reset\n"); goto fail; } if (hwreset_get_by_ofw_name(dev, 0, "uphy-tcphy", &sc->rst_tcphy) != 0) { device_printf(dev, "Cannot get uphy-tcphy reset\n"); goto fail; } /* * Make sure that the module is asserted * We need to deassert in a certain order when we enable the phy */ hwreset_assert(sc->rst_uphy); hwreset_assert(sc->rst_pipe); hwreset_assert(sc->rst_tcphy); /* Set the assigned clocks parent and freq */ if (clk_set_assigned(dev, node) != 0) { device_printf(dev, "clk_set_assigned failed\n"); goto fail; } /* Only usb3 port is supported right now */ usb3 = ofw_bus_find_child(node, "usb3-port"); if (usb3 == 0) { device_printf(dev, "Cannot find usb3-port child node\n"); goto fail; } /* If the child isn't enable attach the driver * but do not register the PHY. */ if (!ofw_bus_node_status_okay(usb3)) return (0); phy_init.id = RK3399_TYPEC_PHY_USB3; phy_init.ofw_node = usb3; phynode = phynode_create(dev, &rk_typec_phy_phynode_class, &phy_init); if (phynode == NULL) { device_printf(dev, "failed to create phy usb3-port\n"); goto fail; } if (phynode_register(phynode) == NULL) { device_printf(dev, "failed to register phy usb3-port\n"); goto fail; } OF_device_register_xref(OF_xref_from_node(usb3), dev); return (0); fail: bus_release_resources(dev, rk_typec_phy_spec, &sc->res); return (ENXIO); } static device_method_t rk_typec_phy_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk_typec_phy_probe), DEVMETHOD(device_attach, rk_typec_phy_attach), DEVMETHOD_END }; static driver_t rk_typec_phy_driver = { "rk_typec_phy", rk_typec_phy_methods, sizeof(struct rk_typec_phy_softc) }; EARLY_DRIVER_MODULE(rk_typec_phy, simplebus, rk_typec_phy_driver, 0, 0, BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(rk_typec_phy, 1); diff --git a/sys/arm64/rockchip/rk_usb2phy.c b/sys/arm64/rockchip/rk_usb2phy.c index f57e3157dc12..367d4fef61cc 100644 --- a/sys/arm64/rockchip/rk_usb2phy.c +++ b/sys/arm64/rockchip/rk_usb2phy.c @@ -1,430 +1,430 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Rockchip USB2PHY */ #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "clkdev_if.h" #include "syscon_if.h" struct rk_usb2phy_reg { uint32_t offset; uint32_t enable_mask; uint32_t disable_mask; }; struct rk_usb2phy_regs { struct rk_usb2phy_reg clk_ctl; }; struct rk_usb2phy_regs rk3399_regs = { .clk_ctl = { .offset = 0x0000, /* bit 4 put pll in suspend */ .enable_mask = 0x100000, .disable_mask = 0x100010, } }; struct rk_usb2phy_regs rk3568_regs = { .clk_ctl = { .offset = 0x0008, .enable_mask = 0x100000, /* bit 4 put pll in suspend */ .disable_mask = 0x100010, } }; static struct ofw_compat_data compat_data[] = { { "rockchip,rk3399-usb2phy", (uintptr_t)&rk3399_regs }, { "rockchip,rk3568-usb2phy", (uintptr_t)&rk3568_regs }, { NULL, 0 } }; struct rk_usb2phy_softc { device_t dev; struct syscon *grf; regulator_t phy_supply; clk_t clk; int mode; }; /* Phy class and methods. */ static int rk_usb2phy_enable(struct phynode *phynode, bool enable); static int rk_usb2phy_get_mode(struct phynode *phy, int *mode); static int rk_usb2phy_set_mode(struct phynode *phy, int mode); static phynode_method_t rk_usb2phy_phynode_methods[] = { PHYNODEMETHOD(phynode_enable, rk_usb2phy_enable), PHYNODEMETHOD(phynode_usb_get_mode, rk_usb2phy_get_mode), PHYNODEMETHOD(phynode_usb_set_mode, rk_usb2phy_set_mode), PHYNODEMETHOD_END }; DEFINE_CLASS_1(rk_usb2phy_phynode, rk_usb2phy_phynode_class, rk_usb2phy_phynode_methods, sizeof(struct phynode_usb_sc), phynode_usb_class); enum RK_USBPHY { RK_USBPHY_HOST = 0, RK_USBPHY_OTG, }; static int rk_usb2phy_enable(struct phynode *phynode, bool enable) { struct rk_usb2phy_softc *sc; device_t dev; intptr_t phy; int error; dev = phynode_get_device(phynode); phy = phynode_get_id(phynode); sc = device_get_softc(dev); if (phy != RK_USBPHY_HOST) return (ERANGE); if (sc->phy_supply) { if (enable) error = regulator_enable(sc->phy_supply); else error = regulator_disable(sc->phy_supply); if (error != 0) { device_printf(dev, "Cannot %sable the regulator\n", enable ? "En" : "Dis"); goto fail; } } return (0); fail: return (ENXIO); } static int rk_usb2phy_get_mode(struct phynode *phynode, int *mode) { struct rk_usb2phy_softc *sc; intptr_t phy; device_t dev; dev = phynode_get_device(phynode); phy = phynode_get_id(phynode); sc = device_get_softc(dev); if (phy != RK_USBPHY_HOST) return (ERANGE); *mode = sc->mode; return (0); } static int rk_usb2phy_set_mode(struct phynode *phynode, int mode) { struct rk_usb2phy_softc *sc; intptr_t phy; device_t dev; dev = phynode_get_device(phynode); phy = phynode_get_id(phynode); sc = device_get_softc(dev); if (phy != RK_USBPHY_HOST) return (ERANGE); sc->mode = mode; return (0); } /* Clock class and method */ struct rk_usb2phy_clk_sc { device_t clkdev; struct syscon *grf; struct rk_usb2phy_regs *regs; }; static int rk_usb2phy_clk_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static int rk_usb2phy_clk_set_gate(struct clknode *clk, bool enable) { struct rk_usb2phy_clk_sc *sc; sc = clknode_get_softc(clk); if (enable) SYSCON_WRITE_4(sc->grf, sc->regs->clk_ctl.offset, sc->regs->clk_ctl.enable_mask); else SYSCON_WRITE_4(sc->grf, sc->regs->clk_ctl.offset, sc->regs->clk_ctl.disable_mask); return (0); } static int rk_usb2phy_clk_recalc(struct clknode *clk, uint64_t *freq) { *freq = 480000000; return (0); } static clknode_method_t rk_usb2phy_clk_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, rk_usb2phy_clk_init), CLKNODEMETHOD(clknode_set_gate, rk_usb2phy_clk_set_gate), CLKNODEMETHOD(clknode_recalc_freq, rk_usb2phy_clk_recalc), CLKNODEMETHOD_END }; DEFINE_CLASS_1(rk_usb2phy_clk_clknode, rk_usb2phy_clk_clknode_class, rk_usb2phy_clk_clknode_methods, sizeof(struct rk_usb2phy_clk_sc), clknode_class); static int rk_usb2phy_clk_ofw_map(struct clkdom *clkdom, uint32_t ncells, phandle_t *cells, struct clknode **clk) { if (ncells != 0) return (ERANGE); *clk = clknode_find_by_id(clkdom, 0); if (*clk == NULL) return (ENXIO); return (0); } static int rk_usb2phy_export_clock(struct rk_usb2phy_softc *devsc) { struct clknode_init_def def; struct rk_usb2phy_clk_sc *sc; const char **clknames; struct clkdom *clkdom; struct clknode *clk; clk_t clk_parent; phandle_t node; phandle_t regs[2]; int i, nclocks, ncells, error; node = ofw_bus_get_node(devsc->dev); error = ofw_bus_parse_xref_list_get_length(node, "clocks", "#clock-cells", &ncells); if (error != 0 || ncells != 1) { device_printf(devsc->dev, "couldn't find parent clock\n"); return (ENXIO); } nclocks = ofw_bus_string_list_to_array(node, "clock-output-names", &clknames); if (nclocks != 1) return (ENXIO); clkdom = clkdom_create(devsc->dev); clkdom_set_ofw_mapper(clkdom, rk_usb2phy_clk_ofw_map); memset(&def, 0, sizeof(def)); def.id = 0; def.name = clknames[0]; def.parent_names = malloc(sizeof(char *) * ncells, M_OFWPROP, M_WAITOK); for (i = 0; i < ncells; i++) { error = clk_get_by_ofw_index(devsc->dev, 0, i, &clk_parent); if (error != 0) { device_printf(devsc->dev, "cannot get clock %d\n", error); return (ENXIO); } def.parent_names[i] = clk_get_name(clk_parent); clk_release(clk_parent); } def.parent_cnt = ncells; clk = clknode_create(clkdom, &rk_usb2phy_clk_clknode_class, &def); if (clk == NULL) { device_printf(devsc->dev, "cannot create clknode\n"); return (ENXIO); } sc = clknode_get_softc(clk); sc->clkdev = device_get_parent(devsc->dev); sc->grf = devsc->grf; sc->regs = (struct rk_usb2phy_regs *)ofw_bus_search_compatible(devsc->dev, compat_data)->ocd_data; if (sc->regs->clk_ctl.offset == 0) { OF_getencprop(node, "reg", regs, sizeof(regs)); sc->regs->clk_ctl.offset = regs[0]; } clknode_register(clkdom, clk); if (clkdom_finit(clkdom) != 0) { device_printf(devsc->dev, "cannot finalize clkdom initialization\n"); return (ENXIO); } if (bootverbose) clkdom_dump(clkdom); return (0); } static int rk_usb2phy_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Rockchip USB2PHY"); return (BUS_PROBE_DEFAULT); } static int rk_usb2phy_attach(device_t dev) { struct rk_usb2phy_softc *sc; struct phynode_init_def phy_init; struct phynode *phynode; phandle_t node, host; int err; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); if (OF_hasprop(node, "rockchip,usbgrf")) { if (syscon_get_by_ofw_property(dev, node, "rockchip,usbgrf", &sc->grf)) { device_printf(dev, "Cannot get syscon handle\n"); return (ENXIO); } } else { if (syscon_get_handle_default(dev, &sc->grf)) { device_printf(dev, "Cannot get syscon handle\n"); return (ENXIO); } } if (clk_get_by_ofw_name(dev, 0, "phyclk", &sc->clk) != 0) { device_printf(dev, "Cannot get clock\n"); return (ENXIO); } err = clk_enable(sc->clk); if (err != 0) { device_printf(dev, "Could not enable clock %s\n", clk_get_name(sc->clk)); return (ENXIO); } err = rk_usb2phy_export_clock(sc); if (err != 0) return (err); /* Only host is supported right now */ host = ofw_bus_find_child(node, "host-port"); if (host == 0) { device_printf(dev, "Cannot find host-port child node\n"); return (ENXIO); } if (!ofw_bus_node_status_okay(host)) { device_printf(dev, "host-port isn't okay\n"); return (0); } regulator_get_by_ofw_property(dev, host, "phy-supply", &sc->phy_supply); phy_init.id = RK_USBPHY_HOST; phy_init.ofw_node = host; phynode = phynode_create(dev, &rk_usb2phy_phynode_class, &phy_init); if (phynode == NULL) { device_printf(dev, "failed to create host USB2PHY\n"); return (ENXIO); } if (phynode_register(phynode) == NULL) { device_printf(dev, "failed to register host USB2PHY\n"); return (ENXIO); } OF_device_register_xref(OF_xref_from_node(host), dev); return (0); } static device_method_t rk_usb2phy_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk_usb2phy_probe), DEVMETHOD(device_attach, rk_usb2phy_attach), DEVMETHOD_END }; static driver_t rk_usb2phy_driver = { "rk_usb2phy", rk_usb2phy_methods, sizeof(struct rk_usb2phy_softc) }; EARLY_DRIVER_MODULE(rk_usb2phy, simplebus, rk_usb2phy_driver, 0, 0, BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(rk_usb2phy, 1); diff --git a/sys/arm64/rockchip/rk_usbphy.c b/sys/arm64/rockchip/rk_usbphy.c index 4434ac22751d..6f9001470df8 100644 --- a/sys/arm64/rockchip/rk_usbphy.c +++ b/sys/arm64/rockchip/rk_usbphy.c @@ -1,300 +1,300 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include "phynode_if.h" #include "phynode_usb_if.h" #include "syscon_if.h" /* Phy registers */ #define UOC_CON0 0x00 #define UOC_CON0_SIDDQ (1 << 13) #define UOC_CON0_DISABLE (1 << 4) #define UOC_CON0_COMMON_ON_N (1 << 0) #define UOC_CON2 0x08 #define UOC_CON2_SOFT_CON_SEL (1 << 2) #define UOC_CON3 0x0c #define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, (_r)) static struct ofw_compat_data compat_data[] = { {"rockchip,rk3288-usb-phy", 1}, {NULL, 0}, }; struct rk_usbphy_softc { device_t dev; }; struct rk_phynode_sc { struct phynode_usb_sc usb_sc; uint32_t base; int mode; clk_t clk; hwreset_t hwreset; regulator_t supply_vbus; struct syscon *syscon; }; static int rk_phynode_phy_enable(struct phynode *phy, bool enable) { struct rk_phynode_sc *sc; int rv; sc = phynode_get_softc(phy); rv = SYSCON_MODIFY_4(sc->syscon, sc->base + UOC_CON0, UOC_CON0_SIDDQ << 16 | UOC_CON0_SIDDQ, enable ? 0 : UOC_CON0_SIDDQ); return (rv); } static int rk_phynode_get_mode(struct phynode *phynode, int *mode) { struct rk_phynode_sc *sc; sc = phynode_get_softc(phynode); *mode = sc->mode; return (0); } static int rk_phynode_set_mode(struct phynode *phynode, int mode) { struct rk_phynode_sc *sc; sc = phynode_get_softc(phynode); sc->mode = mode; return (0); } /* Phy controller class and methods. */ static phynode_method_t rk_phynode_methods[] = { PHYNODEUSBMETHOD(phynode_enable, rk_phynode_phy_enable), PHYNODEMETHOD(phynode_usb_get_mode, rk_phynode_get_mode), PHYNODEMETHOD(phynode_usb_set_mode, rk_phynode_set_mode), PHYNODEUSBMETHOD_END }; DEFINE_CLASS_1(rk_phynode, rk_phynode_class, rk_phynode_methods, sizeof(struct rk_phynode_sc), phynode_usb_class); static int rk_usbphy_init_phy(struct rk_usbphy_softc *sc, phandle_t node) { struct phynode *phynode; struct phynode_init_def phy_init; struct rk_phynode_sc *phy_sc; int rv; uint32_t base; clk_t clk; hwreset_t hwreset; regulator_t supply_vbus; struct syscon *syscon; clk = NULL; hwreset = NULL; supply_vbus = NULL; rv = OF_getencprop(node, "reg", &base, sizeof(base)); if (rv <= 0) { device_printf(sc->dev, "cannot get 'reg' property.\n"); goto fail; } /* FDT resources. All are optional. */ rv = clk_get_by_ofw_name(sc->dev, node, "phyclk", &clk); if (rv != 0 && rv != ENOENT) { device_printf(sc->dev, "cannot get 'phyclk' clock.\n"); goto fail; } rv = hwreset_get_by_ofw_name(sc->dev, node, "phy-reset", &hwreset); if (rv != 0 && rv != ENOENT) { device_printf(sc->dev, "Cannot get 'phy-reset' reset\n"); goto fail; } rv = regulator_get_by_ofw_property(sc->dev, node, "vbus-supply", &supply_vbus); if (rv != 0 && rv != ENOENT) { device_printf(sc->dev, "Cannot get 'vbus' regulator.\n"); goto fail; } rv = SYSCON_GET_HANDLE(sc->dev, &syscon); if (rv != 0) { device_printf(sc->dev, "Cannot get parent syscon\n"); goto fail; } /* Init HW resources */ if (hwreset != NULL) { rv = hwreset_assert(hwreset); if (rv != 0) { device_printf(sc->dev, "Cannot assert reset\n"); goto fail; } } if (clk != NULL) { rv = clk_enable(clk); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'phyclk' clock.\n"); goto fail; } } if (hwreset != NULL) { rv = hwreset_deassert(hwreset); if (rv != 0) { device_printf(sc->dev, "Cannot deassert reset\n"); goto fail; } } /* Create and register phy. */ bzero(&phy_init, sizeof(phy_init)); phy_init.id = 1; phy_init.ofw_node = node; phynode = phynode_create(sc->dev, &rk_phynode_class, &phy_init); if (phynode == NULL) { device_printf(sc->dev, "Cannot create phy.\n"); return (ENXIO); } phy_sc = phynode_get_softc(phynode); phy_sc->base = base; phy_sc->clk = clk; phy_sc->hwreset = hwreset; phy_sc->supply_vbus = supply_vbus; phy_sc->syscon = syscon; if (phynode_register(phynode) == NULL) { device_printf(sc->dev, "Cannot register phy.\n"); return (ENXIO); } /* XXX It breaks boot */ /* rk_phynode_phy_enable(phynode, 1); */ return (0); fail: if (supply_vbus != NULL) regulator_release(supply_vbus); if (clk != NULL) clk_release(clk); if (hwreset != NULL) hwreset_release(hwreset); return (ENXIO); } static int rk_usbphy_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "RockChip USB Phy"); return (BUS_PROBE_DEFAULT); } static int rk_usbphy_attach(device_t dev) { struct rk_usbphy_softc *sc; phandle_t node, child; int rv; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(sc->dev); /* Attach child devices */ for (child = OF_child(node); child > 0; child = OF_peer(child)) { rv = rk_usbphy_init_phy(sc, child); if (rv != 0) goto fail; } return (bus_generic_attach(dev)); fail: return (ENXIO); } static int rk_usbphy_detach(device_t dev) { return (0); } static device_method_t rk_usbphy_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk_usbphy_probe), DEVMETHOD(device_attach, rk_usbphy_attach), DEVMETHOD(device_detach, rk_usbphy_detach), DEVMETHOD_END }; static DEFINE_CLASS_0(rk_usbphy, rk_usbphy_driver, rk_usbphy_methods, sizeof(struct rk_usbphy_softc)); EARLY_DRIVER_MODULE(rk_usbphy, simplebus, rk_usbphy_driver, NULL, NULL, BUS_PASS_TIMER + BUS_PASS_ORDER_LAST); diff --git a/sys/conf/files b/sys/conf/files index cdc6f62edbc2..8b28d6428584 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -1,5240 +1,5240 @@ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # acpi_quirks.h optional acpi \ dependency "$S/tools/acpi_quirks2h.awk $S/dev/acpica/acpi_quirks" \ compile-with "${AWK} -f $S/tools/acpi_quirks2h.awk $S/dev/acpica/acpi_quirks" \ no-obj no-implicit-rule before-depend \ clean "acpi_quirks.h" bhnd_nvram_map.h optional bhnd \ dependency "$S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/tools/nvram_map_gen.awk $S/dev/bhnd/nvram/nvram_map" \ compile-with "sh $S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/nvram/nvram_map -h" \ no-obj no-implicit-rule before-depend \ clean "bhnd_nvram_map.h" bhnd_nvram_map_data.h optional bhnd \ dependency "$S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/tools/nvram_map_gen.awk $S/dev/bhnd/nvram/nvram_map" \ compile-with "sh $S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/nvram/nvram_map -d" \ no-obj no-implicit-rule before-depend \ clean "bhnd_nvram_map_data.h" fdt_static_dtb.h optional fdt fdt_dtb_static \ compile-with "sh -c 'MACHINE=${MACHINE} $S/tools/fdt/make_dtbh.sh ${FDT_DTS_FILE} ${.CURDIR}'" \ dependency "${FDT_DTS_FILE:T:R}.dtb" \ no-obj no-implicit-rule before-depend \ clean "fdt_static_dtb.h" feeder_eq_gen.h optional sound \ dependency "$S/tools/sound/feeder_eq_mkfilter.awk" \ compile-with "${AWK} -f $S/tools/sound/feeder_eq_mkfilter.awk -- ${FEEDER_EQ_PRESETS} > feeder_eq_gen.h" \ no-obj no-implicit-rule before-depend \ clean "feeder_eq_gen.h" feeder_rate_gen.h optional sound \ dependency "$S/tools/sound/feeder_rate_mkfilter.awk" \ compile-with "${AWK} -f $S/tools/sound/feeder_rate_mkfilter.awk -- ${FEEDER_RATE_PRESETS} > feeder_rate_gen.h" \ no-obj no-implicit-rule before-depend \ clean "feeder_rate_gen.h" font.h optional sc_dflt_font \ compile-with "uudecode < ${SRCTOP}/share/syscons/fonts/${SC_DFLT_FONT}-8x16.fnt && file2c 'u_char dflt_font_16[16*256] = {' '};' < ${SC_DFLT_FONT}-8x16 > font.h && uudecode < ${SRCTOP}/share/syscons/fonts/${SC_DFLT_FONT}-8x14.fnt && file2c 'u_char dflt_font_14[14*256] = {' '};' < ${SC_DFLT_FONT}-8x14 >> font.h && uudecode < ${SRCTOP}/share/syscons/fonts/${SC_DFLT_FONT}-8x8.fnt && file2c 'u_char dflt_font_8[8*256] = {' '};' < ${SC_DFLT_FONT}-8x8 >> font.h" \ no-obj no-implicit-rule before-depend \ clean "font.h ${SC_DFLT_FONT}-8x14 ${SC_DFLT_FONT}-8x16 ${SC_DFLT_FONT}-8x8" snd_fxdiv_gen.h optional sound \ dependency "$S/tools/sound/snd_fxdiv_gen.awk" \ compile-with "${AWK} -f $S/tools/sound/snd_fxdiv_gen.awk -- > snd_fxdiv_gen.h" \ no-obj no-implicit-rule before-depend \ clean "snd_fxdiv_gen.h" miidevs.h optional miibus | mii \ dependency "$S/tools/miidevs2h.awk $S/dev/mii/miidevs" \ compile-with "${AWK} -f $S/tools/miidevs2h.awk $S/dev/mii/miidevs" \ no-obj no-implicit-rule before-depend \ clean "miidevs.h" kbdmuxmap.h optional kbdmux_dflt_keymap \ compile-with "${KEYMAP} -L ${KBDMUX_DFLT_KEYMAP} | ${KEYMAP_FIX} > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "kbdmuxmap.h" teken_state.h optional sc | vt \ dependency "$S/teken/gensequences $S/teken/sequences" \ compile-with "${AWK} -f $S/teken/gensequences $S/teken/sequences > teken_state.h" \ no-obj no-implicit-rule before-depend \ clean "teken_state.h" ukbdmap.h optional ukbd_dflt_keymap \ compile-with "${KEYMAP} -L ${UKBD_DFLT_KEYMAP} | ${KEYMAP_FIX} > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "ukbdmap.h" usbdevs.h optional usb | hid \ dependency "$S/tools/usbdevs2h.awk $S/dev/usb/usbdevs" \ compile-with "${AWK} -f $S/tools/usbdevs2h.awk $S/dev/usb/usbdevs -h" \ no-obj no-implicit-rule before-depend \ clean "usbdevs.h" usbdevs_data.h optional usb \ dependency "$S/tools/usbdevs2h.awk $S/dev/usb/usbdevs" \ compile-with "${AWK} -f $S/tools/usbdevs2h.awk $S/dev/usb/usbdevs -d" \ no-obj no-implicit-rule before-depend \ clean "usbdevs_data.h" sdiodevs.h optional mmccam \ dependency "$S/tools/sdiodevs2h.awk $S/dev/sdio/sdiodevs" \ compile-with "${AWK} -f $S/tools/sdiodevs2h.awk $S/dev/sdio/sdiodevs -h" \ no-obj no-implicit-rule before-depend \ clean "sdiodevs.h" sdiodevs_data.h optional mmccam \ dependency "$S/tools/sdiodevs2h.awk $S/dev/sdio/sdiodevs" \ compile-with "${AWK} -f $S/tools/sdiodevs2h.awk $S/dev/sdio/sdiodevs -d" \ no-obj no-implicit-rule before-depend \ clean "sdiodevs_data.h" cam/cam.c optional scbus cam/cam_compat.c optional scbus cam/cam_iosched.c optional scbus cam/cam_periph.c optional scbus cam/cam_queue.c optional scbus cam/cam_sim.c optional scbus cam/cam_xpt.c optional scbus cam/ata/ata_all.c optional scbus cam/ata/ata_xpt.c optional scbus cam/ata/ata_pmp.c optional scbus cam/nvme/nvme_all.c optional scbus cam/nvme/nvme_da.c optional nda | da cam/nvme/nvme_xpt.c optional scbus cam/scsi/scsi_xpt.c optional scbus cam/scsi/scsi_all.c optional scbus cam/scsi/scsi_cd.c optional cd cam/scsi/scsi_ch.c optional ch cam/ata/ata_da.c optional ada | da cam/ctl/ctl.c optional ctl cam/ctl/ctl_backend.c optional ctl cam/ctl/ctl_backend_block.c optional ctl cam/ctl/ctl_backend_ramdisk.c optional ctl cam/ctl/ctl_cmd_table.c optional ctl cam/ctl/ctl_frontend.c optional ctl cam/ctl/ctl_frontend_cam_sim.c optional ctl cam/ctl/ctl_frontend_ioctl.c optional ctl cam/ctl/ctl_frontend_iscsi.c optional ctl cfiscsi cam/ctl/ctl_ha.c optional ctl cam/ctl/ctl_scsi_all.c optional ctl cam/ctl/ctl_tpc.c optional ctl cam/ctl/ctl_tpc_local.c optional ctl cam/ctl/ctl_error.c optional ctl cam/ctl/ctl_util.c optional ctl cam/ctl/scsi_ctl.c optional ctl cam/mmc/mmc_xpt.c optional scbus mmccam cam/mmc/mmc_sim.c optional scbus mmccam cam/mmc/mmc_sim_if.m optional scbus mmccam cam/mmc/mmc_da.c optional scbus mmccam da cam/scsi/scsi_da.c optional da cam/scsi/scsi_pass.c optional pass cam/scsi/scsi_pt.c optional pt cam/scsi/scsi_sa.c optional sa cam/scsi/scsi_enc.c optional ses cam/scsi/scsi_enc_ses.c optional ses cam/scsi/scsi_enc_safte.c optional ses cam/scsi/scsi_sg.c optional sg cam/scsi/scsi_targ_bh.c optional targbh cam/scsi/scsi_target.c optional targ cam/scsi/smp_all.c optional scbus # shared between zfs and dtrace cddl/compat/opensolaris/kern/opensolaris.c optional dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_proc.c optional zfs | dtrace compile-with "${CDDL_C}" contrib/openzfs/module/os/freebsd/spl/spl_misc.c optional zfs | dtrace compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/spl/spl_cmn_err.c optional zfs | dtrace compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/spl/spl_taskq.c optional zfs | dtrace compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/spl/spl_kmem.c optional zfs | dtrace compile-with "${ZFS_C}" #zfs solaris portability layer contrib/openzfs/module/os/freebsd/spl/acl_common.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/spl/callb.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/spl/list.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/spl/spl_acl.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/spl/spl_dtrace.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/spl/spl_kstat.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/spl/spl_policy.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/spl/spl_procfs_list.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/spl/spl_string.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/spl/spl_sunddi.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/spl/spl_sysevent.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/spl/spl_uio.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/spl/spl_vfs.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/spl/spl_vm.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/spl/spl_zlib.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/spl/spl_zone.c optional zfs compile-with "${ZFS_C}" # zfs specific #zfs avl contrib/openzfs/module/avl/avl.c optional zfs compile-with "${ZFS_C}" # zfs lua support contrib/openzfs/module/lua/lapi.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/lauxlib.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/lbaselib.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/lcode.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/lcompat.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/lcorolib.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/lctype.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/ldebug.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/ldo.c optional zfs compile-with "${ZFS_C} ${NO_WINFINITE_RECURSION}" contrib/openzfs/module/lua/lfunc.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/lgc.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/llex.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/lmem.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/lobject.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/lopcodes.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/lparser.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/lstate.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/lstring.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/lstrlib.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/ltable.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/ltablib.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/ltm.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/lvm.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/lua/lzio.c optional zfs compile-with "${ZFS_C}" # zfs nvpair support contrib/openzfs/module/nvpair/fnvpair.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/nvpair/nvpair.c optional zfs compile-with "${ZFS_RPC_C} ${NO_WSTRINGOP_OVERREAD}" contrib/openzfs/module/nvpair/nvpair_alloc_fixed.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/nvpair/nvpair_alloc_spl.c optional zfs compile-with "${ZFS_C}" #zfs platform compatibility code contrib/openzfs/module/os/freebsd/zfs/abd_os.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/arc_os.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/crypto_os.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/dmu_os.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/event_os.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/hkdf.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/kmod_core.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/spa_os.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c optional zfs compile-with "${ZFS_C} -include $S/modules/zfs/zfs_config.h" contrib/openzfs/module/os/freebsd/zfs/vdev_file.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/vdev_label_os.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/zfs_debug.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/zfs_ioctl_compat.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/zfs_ioctl_os.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/zfs_racct.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/os/freebsd/zfs/zvol_os.c optional zfs compile-with "${ZFS_C}" #zfs unicode support contrib/openzfs/module/unicode/uconv.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/unicode/u8_textprep.c optional zfs compile-with "${ZFS_C}" #zfs checksums / zcommon contrib/openzfs/module/zcommon/cityhash.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zcommon/zfeature_common.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zcommon/zfs_comutil.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zcommon/zfs_deleg.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zcommon/zfs_fletcher.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zcommon/zfs_fletcher_superscalar.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zcommon/zfs_fletcher_superscalar4.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zcommon/zfs_namecheck.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zcommon/zfs_prop.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zcommon/zpool_prop.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zcommon/zprop_common.c optional zfs compile-with "${ZFS_C}" # zfs edon-r hash support contrib/openzfs/module/icp/algs/edonr/edonr.c optional zfs compile-with "${ZFS_C}" # zfs blake3 hash support contrib/openzfs/module/icp/algs/blake3/blake3.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/icp/algs/blake3/blake3_generic.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/icp/algs/blake3/blake3_impl.c optional zfs compile-with "${ZFS_C}" # zfs sha2 hash support contrib/openzfs/module/icp/algs/sha2/sha2_generic.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/icp/algs/sha2/sha256_impl.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/icp/algs/sha2/sha512_impl.c optional zfs compile-with "${ZFS_C}" #zfs core common code contrib/openzfs/module/zfs/abd.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/aggsum.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/arc.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/blake3_zfs.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/blkptr.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/bplist.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/bpobj.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/bptree.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/brt.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/btree.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/bqueue.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dbuf.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dbuf_stats.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dataset_kstats.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/ddt.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/ddt_zap.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dmu.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dmu_diff.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dmu_object.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dmu_objset.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dmu_recv.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dmu_redact.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dmu_send.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dmu_traverse.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dmu_tx.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dmu_zfetch.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dnode.c optional zfs compile-with "${ZFS_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" \ warning "kernel contains CDDL licensed ZFS filesystem" contrib/openzfs/module/zfs/dnode_sync.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dsl_bookmark.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dsl_crypt.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dsl_dataset.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dsl_deadlist.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dsl_deleg.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dsl_destroy.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dsl_dir.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dsl_pool.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dsl_prop.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dsl_scan.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dsl_synctask.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/dsl_userhold.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/edonr_zfs.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/fm.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/gzip.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/lzjb.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/lz4.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/lz4_zfs.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/metaslab.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/mmp.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/multilist.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/objlist.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/pathname.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/range_tree.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/refcount.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/rrwlock.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/sa.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/sha2_zfs.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/skein_zfs.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/spa.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/spa_checkpoint.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/spa_config.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/spa_errlog.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/spa_history.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/spa_log_spacemap.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/spa_misc.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/spa_stats.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/space_map.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/space_reftree.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/txg.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/uberblock.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/unique.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_draid.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_draid_rand.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_indirect.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_indirect_births.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_indirect_mapping.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_initialize.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_label.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_mirror.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_missing.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_queue.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_raidz.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_raidz_math.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_raidz_math_scalar.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_rebuild.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_removal.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_root.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_trim.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zap.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zap_leaf.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zap_micro.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zcp.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zcp_get.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zcp_global.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zcp_iter.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zcp_set.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zcp_synctask.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zfeature.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zfs_byteswap.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zfs_chksum.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zfs_fm.c optional zfs compile-with "${ZFS_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" contrib/openzfs/module/zfs/zfs_fuid.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zfs_impl.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zfs_ioctl.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zfs_log.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zfs_onexit.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zfs_quota.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zfs_ratelimit.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zfs_replay.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zfs_rlock.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zfs_sa.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zfs_vnops.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zstd/zfs_zstd.c optional zfs zstdio compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zil.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zio.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zio_checksum.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zio_compress.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zio_inject.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zle.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zrlock.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zthr.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/zvol.c optional zfs compile-with "${ZFS_C}" # dtrace specific cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c optional dtrace compile-with "${DTRACE_C}" \ warning "kernel contains CDDL licensed DTRACE" cddl/contrib/opensolaris/uts/common/dtrace/dtrace_xoroshiro128_plus.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/dtmalloc/dtmalloc.c optional dtmalloc | dtraceall compile-with "${CDDL_C}" cddl/dev/profile/profile.c optional dtrace_profile | dtraceall compile-with "${CDDL_C}" cddl/dev/sdt/sdt.c optional dtrace_sdt | dtraceall compile-with "${CDDL_C}" cddl/dev/fbt/fbt.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" cddl/dev/systrace/systrace.c optional dtrace_systrace | dtraceall compile-with "${CDDL_C}" cddl/dev/prototype.c optional dtrace_prototype | dtraceall compile-with "${CDDL_C}" fs/nfsclient/nfs_clkdtrace.c optional dtnfscl nfscl | dtraceall nfscl compile-with "${CDDL_C}" compat/freebsd32/freebsd32_abort2.c optional compat_freebsd32 compat/freebsd32/freebsd32_capability.c optional compat_freebsd32 compat/freebsd32/freebsd32_ioctl.c optional compat_freebsd32 compat/freebsd32/freebsd32_misc.c optional compat_freebsd32 compat/freebsd32/freebsd32_syscalls.c optional compat_freebsd32 compat/freebsd32/freebsd32_sysent.c optional compat_freebsd32 contrib/ck/src/ck_array.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_centralized.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_combining.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_dissemination.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_mcs.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_tournament.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_epoch.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_hp.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_hs.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_ht.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_rhs.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/dev/acpica/common/ahids.c optional acpi acpi_debug contrib/dev/acpica/common/ahuuids.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbcmds.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbconvert.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbdisply.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbexec.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbhistry.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbinput.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbmethod.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbnames.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbobject.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbstats.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbtest.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbutils.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbxface.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmbuffer.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmcstyle.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmdeferred.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmnames.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmopcode.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrc.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrcl.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrcl2.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrcs.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmutils.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmwalk.c optional acpi acpi_debug contrib/dev/acpica/components/dispatcher/dsargs.c optional acpi contrib/dev/acpica/components/dispatcher/dscontrol.c optional acpi contrib/dev/acpica/components/dispatcher/dsdebug.c optional acpi contrib/dev/acpica/components/dispatcher/dsfield.c optional acpi contrib/dev/acpica/components/dispatcher/dsinit.c optional acpi contrib/dev/acpica/components/dispatcher/dsmethod.c optional acpi contrib/dev/acpica/components/dispatcher/dsmthdat.c optional acpi contrib/dev/acpica/components/dispatcher/dsobject.c optional acpi contrib/dev/acpica/components/dispatcher/dsopcode.c optional acpi contrib/dev/acpica/components/dispatcher/dspkginit.c optional acpi contrib/dev/acpica/components/dispatcher/dsutils.c optional acpi contrib/dev/acpica/components/dispatcher/dswexec.c optional acpi contrib/dev/acpica/components/dispatcher/dswload.c optional acpi contrib/dev/acpica/components/dispatcher/dswload2.c optional acpi contrib/dev/acpica/components/dispatcher/dswscope.c optional acpi contrib/dev/acpica/components/dispatcher/dswstate.c optional acpi contrib/dev/acpica/components/events/evevent.c optional acpi contrib/dev/acpica/components/events/evglock.c optional acpi contrib/dev/acpica/components/events/evgpe.c optional acpi contrib/dev/acpica/components/events/evgpeblk.c optional acpi contrib/dev/acpica/components/events/evgpeinit.c optional acpi contrib/dev/acpica/components/events/evgpeutil.c optional acpi contrib/dev/acpica/components/events/evhandler.c optional acpi contrib/dev/acpica/components/events/evmisc.c optional acpi contrib/dev/acpica/components/events/evregion.c optional acpi contrib/dev/acpica/components/events/evrgnini.c optional acpi contrib/dev/acpica/components/events/evsci.c optional acpi contrib/dev/acpica/components/events/evxface.c optional acpi contrib/dev/acpica/components/events/evxfevnt.c optional acpi contrib/dev/acpica/components/events/evxfgpe.c optional acpi contrib/dev/acpica/components/events/evxfregn.c optional acpi contrib/dev/acpica/components/executer/exconcat.c optional acpi contrib/dev/acpica/components/executer/exconfig.c optional acpi contrib/dev/acpica/components/executer/exconvrt.c optional acpi contrib/dev/acpica/components/executer/excreate.c optional acpi contrib/dev/acpica/components/executer/exdebug.c optional acpi contrib/dev/acpica/components/executer/exdump.c optional acpi contrib/dev/acpica/components/executer/exfield.c optional acpi contrib/dev/acpica/components/executer/exfldio.c optional acpi contrib/dev/acpica/components/executer/exmisc.c optional acpi contrib/dev/acpica/components/executer/exmutex.c optional acpi contrib/dev/acpica/components/executer/exnames.c optional acpi contrib/dev/acpica/components/executer/exoparg1.c optional acpi contrib/dev/acpica/components/executer/exoparg2.c optional acpi contrib/dev/acpica/components/executer/exoparg3.c optional acpi contrib/dev/acpica/components/executer/exoparg6.c optional acpi contrib/dev/acpica/components/executer/exprep.c optional acpi contrib/dev/acpica/components/executer/exregion.c optional acpi contrib/dev/acpica/components/executer/exresnte.c optional acpi contrib/dev/acpica/components/executer/exresolv.c optional acpi contrib/dev/acpica/components/executer/exresop.c optional acpi contrib/dev/acpica/components/executer/exserial.c optional acpi contrib/dev/acpica/components/executer/exstore.c optional acpi contrib/dev/acpica/components/executer/exstoren.c optional acpi contrib/dev/acpica/components/executer/exstorob.c optional acpi contrib/dev/acpica/components/executer/exsystem.c optional acpi contrib/dev/acpica/components/executer/extrace.c optional acpi contrib/dev/acpica/components/executer/exutils.c optional acpi contrib/dev/acpica/components/hardware/hwacpi.c optional acpi contrib/dev/acpica/components/hardware/hwesleep.c optional acpi contrib/dev/acpica/components/hardware/hwgpe.c optional acpi contrib/dev/acpica/components/hardware/hwpci.c optional acpi contrib/dev/acpica/components/hardware/hwregs.c optional acpi contrib/dev/acpica/components/hardware/hwsleep.c optional acpi contrib/dev/acpica/components/hardware/hwtimer.c optional acpi contrib/dev/acpica/components/hardware/hwvalid.c optional acpi contrib/dev/acpica/components/hardware/hwxface.c optional acpi contrib/dev/acpica/components/hardware/hwxfsleep.c optional acpi contrib/dev/acpica/components/namespace/nsaccess.c optional acpi \ compile-with "${NORMAL_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" contrib/dev/acpica/components/namespace/nsalloc.c optional acpi contrib/dev/acpica/components/namespace/nsarguments.c optional acpi contrib/dev/acpica/components/namespace/nsconvert.c optional acpi contrib/dev/acpica/components/namespace/nsdump.c optional acpi contrib/dev/acpica/components/namespace/nseval.c optional acpi contrib/dev/acpica/components/namespace/nsinit.c optional acpi contrib/dev/acpica/components/namespace/nsload.c optional acpi contrib/dev/acpica/components/namespace/nsnames.c optional acpi contrib/dev/acpica/components/namespace/nsobject.c optional acpi contrib/dev/acpica/components/namespace/nsparse.c optional acpi contrib/dev/acpica/components/namespace/nspredef.c optional acpi contrib/dev/acpica/components/namespace/nsprepkg.c optional acpi contrib/dev/acpica/components/namespace/nsrepair.c optional acpi contrib/dev/acpica/components/namespace/nsrepair2.c optional acpi contrib/dev/acpica/components/namespace/nssearch.c optional acpi contrib/dev/acpica/components/namespace/nsutils.c optional acpi contrib/dev/acpica/components/namespace/nswalk.c optional acpi contrib/dev/acpica/components/namespace/nsxfeval.c optional acpi contrib/dev/acpica/components/namespace/nsxfname.c optional acpi contrib/dev/acpica/components/namespace/nsxfobj.c optional acpi contrib/dev/acpica/components/parser/psargs.c optional acpi contrib/dev/acpica/components/parser/psloop.c optional acpi contrib/dev/acpica/components/parser/psobject.c optional acpi contrib/dev/acpica/components/parser/psopcode.c optional acpi contrib/dev/acpica/components/parser/psopinfo.c optional acpi contrib/dev/acpica/components/parser/psparse.c optional acpi contrib/dev/acpica/components/parser/psscope.c optional acpi contrib/dev/acpica/components/parser/pstree.c optional acpi contrib/dev/acpica/components/parser/psutils.c optional acpi contrib/dev/acpica/components/parser/pswalk.c optional acpi contrib/dev/acpica/components/parser/psxface.c optional acpi contrib/dev/acpica/components/resources/rsaddr.c optional acpi contrib/dev/acpica/components/resources/rscalc.c optional acpi contrib/dev/acpica/components/resources/rscreate.c optional acpi contrib/dev/acpica/components/resources/rsdump.c optional acpi acpi_debug contrib/dev/acpica/components/resources/rsdumpinfo.c optional acpi contrib/dev/acpica/components/resources/rsinfo.c optional acpi contrib/dev/acpica/components/resources/rsio.c optional acpi contrib/dev/acpica/components/resources/rsirq.c optional acpi contrib/dev/acpica/components/resources/rslist.c optional acpi contrib/dev/acpica/components/resources/rsmemory.c optional acpi contrib/dev/acpica/components/resources/rsmisc.c optional acpi contrib/dev/acpica/components/resources/rsserial.c optional acpi contrib/dev/acpica/components/resources/rsutils.c optional acpi contrib/dev/acpica/components/resources/rsxface.c optional acpi contrib/dev/acpica/components/tables/tbdata.c optional acpi contrib/dev/acpica/components/tables/tbfadt.c optional acpi contrib/dev/acpica/components/tables/tbfind.c optional acpi contrib/dev/acpica/components/tables/tbinstal.c optional acpi contrib/dev/acpica/components/tables/tbprint.c optional acpi contrib/dev/acpica/components/tables/tbutils.c optional acpi contrib/dev/acpica/components/tables/tbxface.c optional acpi contrib/dev/acpica/components/tables/tbxfload.c optional acpi contrib/dev/acpica/components/tables/tbxfroot.c optional acpi contrib/dev/acpica/components/utilities/utaddress.c optional acpi contrib/dev/acpica/components/utilities/utalloc.c optional acpi contrib/dev/acpica/components/utilities/utascii.c optional acpi contrib/dev/acpica/components/utilities/utbuffer.c optional acpi contrib/dev/acpica/components/utilities/utcache.c optional acpi contrib/dev/acpica/components/utilities/utcksum.c optional acpi contrib/dev/acpica/components/utilities/utcopy.c optional acpi contrib/dev/acpica/components/utilities/utdebug.c optional acpi contrib/dev/acpica/components/utilities/utdecode.c optional acpi contrib/dev/acpica/components/utilities/utdelete.c optional acpi contrib/dev/acpica/components/utilities/uterror.c optional acpi contrib/dev/acpica/components/utilities/uteval.c optional acpi contrib/dev/acpica/components/utilities/utexcep.c optional acpi contrib/dev/acpica/components/utilities/utglobal.c optional acpi contrib/dev/acpica/components/utilities/uthex.c optional acpi contrib/dev/acpica/components/utilities/utids.c optional acpi contrib/dev/acpica/components/utilities/utinit.c optional acpi contrib/dev/acpica/components/utilities/utlock.c optional acpi contrib/dev/acpica/components/utilities/utmath.c optional acpi contrib/dev/acpica/components/utilities/utmisc.c optional acpi contrib/dev/acpica/components/utilities/utmutex.c optional acpi contrib/dev/acpica/components/utilities/utnonansi.c optional acpi contrib/dev/acpica/components/utilities/utobject.c optional acpi contrib/dev/acpica/components/utilities/utosi.c optional acpi contrib/dev/acpica/components/utilities/utownerid.c optional acpi contrib/dev/acpica/components/utilities/utpredef.c optional acpi contrib/dev/acpica/components/utilities/utresdecode.c optional acpi acpi_debug contrib/dev/acpica/components/utilities/utresrc.c optional acpi contrib/dev/acpica/components/utilities/utstate.c optional acpi contrib/dev/acpica/components/utilities/utstring.c optional acpi contrib/dev/acpica/components/utilities/utstrsuppt.c optional acpi contrib/dev/acpica/components/utilities/utstrtoul64.c optional acpi contrib/dev/acpica/components/utilities/utuuid.c optional acpi acpi_debug contrib/dev/acpica/components/utilities/utxface.c optional acpi contrib/dev/acpica/components/utilities/utxferror.c optional acpi contrib/dev/acpica/components/utilities/utxfinit.c optional acpi contrib/dev/acpica/os_specific/service_layers/osgendbg.c optional acpi acpi_debug netpfil/ipfilter/netinet/fil.c optional ipfilter inet \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-unused -I$S/netpfil/ipfilter" netpfil/ipfilter/netinet/ip_auth.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter" netpfil/ipfilter/netinet/ip_fil_freebsd.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter" netpfil/ipfilter/netinet/ip_frag.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter" netpfil/ipfilter/netinet/ip_log.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/netpfil/ipfilter" netpfil/ipfilter/netinet/ip_nat.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter" netpfil/ipfilter/netinet/ip_proxy.c optional ipfilter inet \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-unused -I$S/netpfil/ipfilter" netpfil/ipfilter/netinet/ip_state.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter" netpfil/ipfilter/netinet/ip_lookup.c optional ipfilter inet \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-unused -Wno-error -I$S/netpfil/ipfilter" netpfil/ipfilter/netinet/ip_pool.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter" netpfil/ipfilter/netinet/ip_htable.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter ${NO_WTAUTOLOGICAL_POINTER_COMPARE}" netpfil/ipfilter/netinet/ip_sync.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter" netpfil/ipfilter/netinet/mlfk_ipl.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/netpfil/ipfilter" netpfil/ipfilter/netinet/ip_nat6.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter" netpfil/ipfilter/netinet/ip_rules.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/netpfil/ipfilter" netpfil/ipfilter/netinet/ip_scan.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter" netpfil/ipfilter/netinet/ip_dstlist.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter" netpfil/ipfilter/netinet/radix_ipf.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/netpfil/ipfilter" contrib/libfdt/fdt.c optional fdt contrib/libfdt/fdt_ro.c optional fdt contrib/libfdt/fdt_rw.c optional fdt contrib/libfdt/fdt_strerror.c optional fdt contrib/libfdt/fdt_sw.c optional fdt contrib/libfdt/fdt_wip.c optional fdt contrib/libnv/cnvlist.c standard contrib/libnv/dnvlist.c standard contrib/libnv/nvlist.c standard contrib/libnv/bsd_nvpair.c standard # xz dev/xz/xz_mod.c optional xz \ compile-with "${NORMAL_C} -DXZ_USE_CRC64 -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_crc32.c optional xz \ compile-with "${NORMAL_C} -DXZ_USE_CRC64 -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_crc64.c optional xz \ compile-with "${NORMAL_C} -DXZ_USE_CRC64 -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_dec_bcj.c optional xz \ compile-with "${NORMAL_C} -DXZ_USE_CRC64 -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_dec_lzma2.c optional xz \ compile-with "${NORMAL_C} -DXZ_USE_CRC64 -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_dec_stream.c optional xz \ compile-with "${NORMAL_C} -DXZ_USE_CRC64 -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" # Zstd contrib/zstd/lib/freebsd/zstd_kmalloc.c optional zstdio compile-with ${ZSTD_C} contrib/zstd/lib/common/zstd_common.c optional zstdio compile-with ${ZSTD_C} contrib/zstd/lib/common/fse_decompress.c optional zstdio compile-with ${ZSTD_C} contrib/zstd/lib/common/entropy_common.c optional zstdio compile-with ${ZSTD_C} contrib/zstd/lib/common/error_private.c optional zstdio compile-with ${ZSTD_C} contrib/zstd/lib/common/xxhash.c optional zstdio compile-with ${ZSTD_C} contrib/zstd/lib/compress/zstd_compress.c optional zstdio compile-with ${ZSTD_C} contrib/zstd/lib/compress/zstd_compress_literals.c optional zstdio compile-with ${ZSTD_C} contrib/zstd/lib/compress/zstd_compress_sequences.c optional zstdio compile-with ${ZSTD_C} contrib/zstd/lib/compress/zstd_compress_superblock.c optional zstdio compile-with "${ZSTD_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" contrib/zstd/lib/compress/fse_compress.c optional zstdio compile-with ${ZSTD_C} contrib/zstd/lib/compress/hist.c optional zstdio compile-with ${ZSTD_C} contrib/zstd/lib/compress/huf_compress.c optional zstdio compile-with ${ZSTD_C} contrib/zstd/lib/compress/zstd_double_fast.c optional zstdio compile-with ${ZSTD_C} contrib/zstd/lib/compress/zstd_fast.c optional zstdio compile-with ${ZSTD_C} contrib/zstd/lib/compress/zstd_lazy.c optional zstdio compile-with ${ZSTD_C} contrib/zstd/lib/compress/zstd_ldm.c optional zstdio compile-with ${ZSTD_C} contrib/zstd/lib/compress/zstd_opt.c optional zstdio compile-with ${ZSTD_C} contrib/zstd/lib/decompress/zstd_ddict.c optional zstdio compile-with ${ZSTD_C} contrib/zstd/lib/decompress/zstd_decompress.c optional zstdio compile-with ${ZSTD_C} # See comment in sys/conf/kern.pre.mk contrib/zstd/lib/decompress/zstd_decompress_block.c optional zstdio \ compile-with "${ZSTD_C} ${ZSTD_DECOMPRESS_BLOCK_FLAGS}" contrib/zstd/lib/decompress/huf_decompress.c optional zstdio compile-with "${ZSTD_C} ${NO_WBITWISE_INSTEAD_OF_LOGICAL}" # Blake 2 contrib/libb2/blake2b-ref.c optional crypto | !random_loadable random_fenestrasx \ compile-with "${NORMAL_C} -I$S/crypto/blake2 -Wno-cast-qual -DSUFFIX=_ref -Wno-unused-function" contrib/libb2/blake2s-ref.c optional crypto \ compile-with "${NORMAL_C} -I$S/crypto/blake2 -Wno-cast-qual -DSUFFIX=_ref -Wno-unused-function" crypto/blake2/blake2-sw.c optional crypto \ compile-with "${NORMAL_C} -I$S/crypto/blake2 -Wno-cast-qual" crypto/camellia/camellia.c optional crypto crypto/camellia/camellia-api.c optional crypto crypto/chacha20/chacha.c standard crypto/chacha20/chacha-sw.c optional crypto crypto/chacha20_poly1305.c optional crypto crypto/curve25519.c optional crypto \ compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include -I$S/crypto/libsodium" crypto/des/des_ecb.c optional netsmb crypto/des/des_setkey.c optional netsmb crypto/openssl/ossl.c optional ossl crypto/openssl/ossl_aes.c optional ossl crypto/openssl/ossl_chacha20.c optional ossl crypto/openssl/ossl_poly1305.c optional ossl crypto/openssl/ossl_sha1.c optional ossl crypto/openssl/ossl_sha256.c optional ossl crypto/openssl/ossl_sha512.c optional ossl crypto/rc4/rc4.c optional netgraph_mppc_encryption crypto/rijndael/rijndael-alg-fst.c optional crypto | ekcd | geom_bde | \ !random_loadable | wlan_ccmp crypto/rijndael/rijndael-api-fst.c optional ekcd | geom_bde | !random_loadable crypto/rijndael/rijndael-api.c optional crypto | wlan_ccmp crypto/sha1.c optional carp | crypto | ether | \ netgraph_mppc_encryption | sctp crypto/sha2/sha256c.c optional crypto | ekcd | geom_bde | \ !random_loadable | sctp | zfs crypto/sha2/sha512c.c optional crypto | geom_bde | zfs crypto/skein/skein.c optional crypto | zfs crypto/skein/skein_block.c optional crypto | zfs crypto/siphash/siphash.c optional inet | inet6 | wg crypto/siphash/siphash_test.c optional inet | inet6 | wg ddb/db_access.c optional ddb ddb/db_break.c optional ddb ddb/db_capture.c optional ddb ddb/db_command.c optional ddb ddb/db_examine.c optional ddb ddb/db_expr.c optional ddb ddb/db_input.c optional ddb ddb/db_lex.c optional ddb ddb/db_main.c optional ddb ddb/db_output.c optional ddb ddb/db_print.c optional ddb ddb/db_ps.c optional ddb ddb/db_run.c optional ddb ddb/db_script.c optional ddb ddb/db_sym.c optional ddb ddb/db_thread.c optional ddb ddb/db_textdump.c optional ddb ddb/db_variables.c optional ddb ddb/db_watch.c optional ddb ddb/db_write_cmd.c optional ddb dev/aac/aac.c optional aac dev/aac/aac_cam.c optional aacp aac dev/aac/aac_debug.c optional aac dev/aac/aac_disk.c optional aac dev/aac/aac_pci.c optional aac pci dev/aacraid/aacraid.c optional aacraid dev/aacraid/aacraid_cam.c optional aacraid scbus dev/aacraid/aacraid_debug.c optional aacraid dev/aacraid/aacraid_pci.c optional aacraid pci dev/acpi_support/acpi_wmi.c optional acpi_wmi acpi dev/acpi_support/acpi_asus.c optional acpi_asus acpi dev/acpi_support/acpi_asus_wmi.c optional acpi_asus_wmi acpi dev/acpi_support/acpi_fujitsu.c optional acpi_fujitsu acpi dev/acpi_support/acpi_hp.c optional acpi_hp acpi dev/acpi_support/acpi_ibm.c optional acpi_ibm acpi dev/acpi_support/acpi_panasonic.c optional acpi_panasonic acpi dev/acpi_support/acpi_sony.c optional acpi_sony acpi dev/acpi_support/acpi_toshiba.c optional acpi_toshiba acpi dev/acpi_support/atk0110.c optional aibs acpi dev/acpica/Osd/OsdDebug.c optional acpi dev/acpica/Osd/OsdHardware.c optional acpi dev/acpica/Osd/OsdInterrupt.c optional acpi dev/acpica/Osd/OsdMemory.c optional acpi dev/acpica/Osd/OsdSchedule.c optional acpi dev/acpica/Osd/OsdStream.c optional acpi dev/acpica/Osd/OsdSynch.c optional acpi dev/acpica/Osd/OsdTable.c optional acpi dev/acpica/acpi.c optional acpi dev/acpica/acpi_acad.c optional acpi dev/acpica/acpi_apei.c optional acpi dev/acpica/acpi_battery.c optional acpi dev/acpica/acpi_button.c optional acpi dev/acpica/acpi_cmbat.c optional acpi dev/acpica/acpi_cpu.c optional acpi dev/acpica/acpi_ec.c optional acpi dev/acpica/acpi_ged.c optional acpi_ged acpi dev/acpica/acpi_isab.c optional acpi isa dev/acpica/acpi_lid.c optional acpi dev/acpica/acpi_package.c optional acpi dev/acpica/acpi_perf.c optional acpi dev/acpica/acpi_powerres.c optional acpi dev/acpica/acpi_quirk.c optional acpi dev/acpica/acpi_resource.c optional acpi dev/acpica/acpi_container.c optional acpi dev/acpica/acpi_smbat.c optional acpi dev/acpica/acpi_thermal.c optional acpi dev/acpica/acpi_throttle.c optional acpi dev/acpica/acpi_video.c optional acpi_video acpi dev/acpica/acpi_dock.c optional acpi_dock acpi dev/adlink/adlink.c optional adlink dev/ae/if_ae.c optional ae pci dev/age/if_age.c optional age pci dev/agp/agp.c optional agp pci dev/agp/agp_if.m optional agp pci dev/ahci/ahci.c optional ahci dev/ahci/ahciem.c optional ahci dev/ahci/ahci_pci.c optional ahci pci dev/aic7xxx/ahc_isa.c optional ahc isa dev/aic7xxx/ahc_pci.c optional ahc pci \ compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}" dev/aic7xxx/ahd_pci.c optional ahd pci \ compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}" dev/aic7xxx/aic7770.c optional ahc dev/aic7xxx/aic79xx.c optional ahd pci dev/aic7xxx/aic79xx_osm.c optional ahd pci dev/aic7xxx/aic79xx_pci.c optional ahd pci dev/aic7xxx/aic79xx_reg_print.c optional ahd pci ahd_reg_pretty_print dev/aic7xxx/aic7xxx.c optional ahc dev/aic7xxx/aic7xxx_93cx6.c optional ahc dev/aic7xxx/aic7xxx_osm.c optional ahc dev/aic7xxx/aic7xxx_pci.c optional ahc pci dev/aic7xxx/aic7xxx_reg_print.c optional ahc ahc_reg_pretty_print dev/al_eth/al_eth.c optional al_eth fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}" dev/al_eth/al_init_eth_lm.c optional al_eth fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}" dev/al_eth/al_init_eth_kr.c optional al_eth fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}" contrib/alpine-hal/al_hal_iofic.c optional al_iofic \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}" contrib/alpine-hal/al_hal_serdes_25g.c optional al_serdes \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}" contrib/alpine-hal/al_hal_serdes_hssp.c optional al_serdes \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}" contrib/alpine-hal/al_hal_udma_config.c optional al_udma \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}" contrib/alpine-hal/al_hal_udma_debug.c optional al_udma \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}" contrib/alpine-hal/al_hal_udma_iofic.c optional al_udma \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}" contrib/alpine-hal/al_hal_udma_main.c optional al_udma \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}" contrib/alpine-hal/al_serdes.c optional al_serdes \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}" contrib/alpine-hal/eth/al_hal_eth_kr.c optional al_eth \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}" contrib/alpine-hal/eth/al_hal_eth_main.c optional al_eth \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}" dev/alc/if_alc.c optional alc pci dev/ale/if_ale.c optional ale pci dev/alpm/alpm.c optional alpm pci dev/altera/avgen/altera_avgen.c optional altera_avgen dev/altera/avgen/altera_avgen_fdt.c optional altera_avgen fdt dev/altera/avgen/altera_avgen_nexus.c optional altera_avgen dev/altera/msgdma/msgdma.c optional altera_msgdma xdma dev/altera/sdcard/altera_sdcard.c optional altera_sdcard dev/altera/sdcard/altera_sdcard_disk.c optional altera_sdcard dev/altera/sdcard/altera_sdcard_io.c optional altera_sdcard dev/altera/sdcard/altera_sdcard_fdt.c optional altera_sdcard fdt dev/altera/sdcard/altera_sdcard_nexus.c optional altera_sdcard dev/altera/softdma/softdma.c optional altera_softdma xdma fdt dev/altera/pio/pio.c optional altera_pio dev/altera/pio/pio_if.m optional altera_pio dev/amdpm/amdpm.c optional amdpm pci | nfpm pci dev/amdsmb/amdsmb.c optional amdsmb pci # dev/ata/ata_if.m optional ata | atacore dev/ata/ata-all.c optional ata | atacore dev/ata/ata-dma.c optional ata | atacore dev/ata/ata-lowlevel.c optional ata | atacore dev/ata/ata-sata.c optional ata | atacore dev/ata/ata-isa.c optional ata isa | ataisa dev/ata/ata-pci.c optional ata pci | atapci dev/ata/chipsets/ata-acard.c optional ata pci | ataacard dev/ata/chipsets/ata-acerlabs.c optional ata pci | ataacerlabs dev/ata/chipsets/ata-amd.c optional ata pci | ataamd dev/ata/chipsets/ata-ati.c optional ata pci | ataati dev/ata/chipsets/ata-cenatek.c optional ata pci | atacenatek dev/ata/chipsets/ata-cypress.c optional ata pci | atacypress dev/ata/chipsets/ata-cyrix.c optional ata pci | atacyrix dev/ata/chipsets/ata-highpoint.c optional ata pci | atahighpoint dev/ata/chipsets/ata-intel.c optional ata pci | ataintel dev/ata/chipsets/ata-ite.c optional ata pci | ataite dev/ata/chipsets/ata-jmicron.c optional ata pci | atajmicron dev/ata/chipsets/ata-marvell.c optional ata pci | atamarvell dev/ata/chipsets/ata-micron.c optional ata pci | atamicron dev/ata/chipsets/ata-national.c optional ata pci | atanational dev/ata/chipsets/ata-netcell.c optional ata pci | atanetcell dev/ata/chipsets/ata-nvidia.c optional ata pci | atanvidia dev/ata/chipsets/ata-promise.c optional ata pci | atapromise dev/ata/chipsets/ata-serverworks.c optional ata pci | ataserverworks dev/ata/chipsets/ata-siliconimage.c optional ata pci | atasiliconimage | ataati dev/ata/chipsets/ata-sis.c optional ata pci | atasis dev/ata/chipsets/ata-via.c optional ata pci | atavia # dev/ath/if_ath.c optional ath \ compile-with "${ATH_C}" dev/ath/if_ath_alq.c optional ath \ compile-with "${ATH_C}" dev/ath/if_ath_beacon.c optional ath \ compile-with "${ATH_C}" dev/ath/if_ath_btcoex.c optional ath \ compile-with "${ATH_C}" dev/ath/if_ath_btcoex_mci.c optional ath \ compile-with "${ATH_C}" dev/ath/if_ath_debug.c optional ath \ compile-with "${ATH_C}" dev/ath/if_ath_descdma.c optional ath \ compile-with "${ATH_C}" dev/ath/if_ath_keycache.c optional ath \ compile-with "${ATH_C}" dev/ath/if_ath_ioctl.c optional ath \ compile-with "${ATH_C}" dev/ath/if_ath_led.c optional ath \ compile-with "${ATH_C}" dev/ath/if_ath_lna_div.c optional ath \ compile-with "${ATH_C}" dev/ath/if_ath_pci.c optional ath pci \ compile-with "${ATH_C}" dev/ath/if_ath_tx.c optional ath \ compile-with "${ATH_C}" dev/ath/if_ath_tx_edma.c optional ath \ compile-with "${ATH_C}" dev/ath/if_ath_tx_ht.c optional ath \ compile-with "${ATH_C}" dev/ath/if_ath_tdma.c optional ath \ compile-with "${ATH_C}" dev/ath/if_ath_sysctl.c optional ath \ compile-with "${ATH_C}" dev/ath/if_ath_rx.c optional ath \ compile-with "${ATH_C}" dev/ath/if_ath_rx_edma.c optional ath \ compile-with "${ATH_C}" dev/ath/if_ath_spectral.c optional ath \ compile-with "${ATH_C}" dev/ath/ah_osdep.c optional ath \ compile-with "${ATH_C}" # dev/ath/ath_hal/ah.c optional ath \ compile-with "${ATH_C}" dev/ath/ath_hal/ah_eeprom_v1.c optional ath_hal | ath_ar5210 \ compile-with "${ATH_C}" dev/ath/ath_hal/ah_eeprom_v3.c optional ath_hal | ath_ar5211 | ath_ar5212 \ compile-with "${ATH_C}" dev/ath/ath_hal/ah_eeprom_v14.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 \ compile-with "${ATH_C}" dev/ath/ath_hal/ah_eeprom_v4k.c \ optional ath_hal | ath_ar9285 \ compile-with "${ATH_C}" dev/ath/ath_hal/ah_eeprom_9287.c \ optional ath_hal | ath_ar9287 \ compile-with "${ATH_C}" dev/ath/ath_hal/ah_regdomain.c optional ath \ compile-with "${ATH_C} ${NO_WSHIFT_COUNT_NEGATIVE} ${NO_WSHIFT_COUNT_OVERFLOW}" # ar5210 dev/ath/ath_hal/ar5210/ar5210_attach.c optional ath_hal | ath_ar5210 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_beacon.c optional ath_hal | ath_ar5210 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_interrupts.c optional ath_hal | ath_ar5210 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_keycache.c optional ath_hal | ath_ar5210 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_misc.c optional ath_hal | ath_ar5210 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_phy.c optional ath_hal | ath_ar5210 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_power.c optional ath_hal | ath_ar5210 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_recv.c optional ath_hal | ath_ar5210 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_reset.c optional ath_hal | ath_ar5210 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_xmit.c optional ath_hal | ath_ar5210 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" # ar5211 dev/ath/ath_hal/ar5211/ar5211_attach.c optional ath_hal | ath_ar5211 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_beacon.c optional ath_hal | ath_ar5211 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_interrupts.c optional ath_hal | ath_ar5211 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_keycache.c optional ath_hal | ath_ar5211 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_misc.c optional ath_hal | ath_ar5211 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_phy.c optional ath_hal | ath_ar5211 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_power.c optional ath_hal | ath_ar5211 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_recv.c optional ath_hal | ath_ar5211 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_reset.c optional ath_hal | ath_ar5211 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_xmit.c optional ath_hal | ath_ar5211 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" # ar5212 dev/ath/ath_hal/ar5212/ar5212_ani.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_attach.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_beacon.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_eeprom.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_gpio.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_interrupts.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_keycache.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_misc.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_phy.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_power.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_recv.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_reset.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_rfgain.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_xmit.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" # ar5416 (depends on ar5212) dev/ath/ath_hal/ar5416/ar5416_ani.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_attach.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_beacon.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_btcoex.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal_iq.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal_adcgain.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal_adcdc.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_eeprom.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_gpio.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_interrupts.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_keycache.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_misc.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_phy.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_power.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_radar.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_recv.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_reset.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_spectral.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_xmit.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" # ar9160 (depends on ar5416) dev/ath/ath_hal/ar9001/ar9160_attach.c optional ath_hal | ath_ar9160 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" # ar9280 (depends on ar5416) dev/ath/ath_hal/ar9002/ar9280_attach.c optional ath_hal | ath_ar9280 | \ ath_ar9285 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9280_olc.c optional ath_hal | ath_ar9280 | \ ath_ar9285 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" # ar9285 (depends on ar5416 and ar9280) dev/ath/ath_hal/ar9002/ar9285_attach.c optional ath_hal | ath_ar9285 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_btcoex.c optional ath_hal | ath_ar9285 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_reset.c optional ath_hal | ath_ar9285 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_cal.c optional ath_hal | ath_ar9285 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_phy.c optional ath_hal | ath_ar9285 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_diversity.c optional ath_hal | ath_ar9285 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" # ar9287 (depends on ar5416) dev/ath/ath_hal/ar9002/ar9287_attach.c optional ath_hal | ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287_reset.c optional ath_hal | ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287_cal.c optional ath_hal | ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287_olc.c optional ath_hal | ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" # ar9300 contrib/dev/ath/ath_hal/ar9300/ar9300_ani.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_attach.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_beacon.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_eeprom.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal ${NO_WCONSTANT_CONVERSION}" contrib/dev/ath/ath_hal/ar9300/ar9300_freebsd.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_gpio.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_interrupts.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_keycache.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_mci.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_misc.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_paprd.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_phy.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_power.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_radar.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_radio.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_recv.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_recv_ds.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_reset.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal ${NO_WSOMETIMES_UNINITIALIZED} -Wno-unused-function" contrib/dev/ath/ath_hal/ar9300/ar9300_stub.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_stub_funcs.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_spectral.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_timer.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_xmit.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_xmit_ds.c optional ath_hal | ath_ar9300 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" # rf backends dev/ath/ath_hal/ar5212/ar2316.c optional ath_rf2316 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar2317.c optional ath_rf2317 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar2413.c optional ath_hal | ath_rf2413 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar2425.c optional ath_hal | ath_rf2425 | ath_rf2417 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5111.c optional ath_hal | ath_rf5111 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5112.c optional ath_hal | ath_rf5112 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5413.c optional ath_hal | ath_rf5413 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar2133.c optional ath_hal | ath_ar5416 | \ ath_ar9130 | ath_ar9160 | ath_ar9280 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9280.c optional ath_hal | ath_ar9280 | ath_ar9285 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285.c optional ath_hal | ath_ar9285 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287.c optional ath_hal | ath_ar9287 \ compile-with "${ATH_C} -I$S/dev/ath/ath_hal" # ath rate control algorithms dev/ath/ath_rate/amrr/amrr.c optional ath_rate_amrr \ compile-with "${ATH_C}" dev/ath/ath_rate/onoe/onoe.c optional ath_rate_onoe \ compile-with "${ATH_C}" dev/ath/ath_rate/sample/sample.c optional ath_rate_sample \ compile-with "${ATH_C}" # ath DFS modules dev/ath/ath_dfs/null/dfs_null.c optional ath \ compile-with "${ATH_C}" # dev/backlight/backlight_if.m optional backlight | compat_linuxkpi dev/backlight/backlight.c optional backlight | compat_linuxkpi dev/bce/if_bce.c optional bce dev/bfe/if_bfe.c optional bfe dev/bge/if_bge.c optional bge dev/bhnd/bhnd.c optional bhnd dev/bhnd/bhnd_erom.c optional bhnd dev/bhnd/bhnd_erom_if.m optional bhnd dev/bhnd/bhnd_subr.c optional bhnd dev/bhnd/bhnd_bus_if.m optional bhnd dev/bhnd/bhndb/bhnd_bhndb.c optional bhndb bhnd dev/bhnd/bhndb/bhndb.c optional bhndb bhnd dev/bhnd/bhndb/bhndb_bus_if.m optional bhndb bhnd dev/bhnd/bhndb/bhndb_hwdata.c optional bhndb bhnd dev/bhnd/bhndb/bhndb_if.m optional bhndb bhnd dev/bhnd/bhndb/bhndb_pci.c optional bhndb_pci bhndb bhnd pci dev/bhnd/bhndb/bhndb_pci_hwdata.c optional bhndb_pci bhndb bhnd pci dev/bhnd/bhndb/bhndb_pci_sprom.c optional bhndb_pci bhndb bhnd pci dev/bhnd/bhndb/bhndb_subr.c optional bhndb bhnd dev/bhnd/bcma/bcma.c optional bcma bhnd dev/bhnd/bcma/bcma_bhndb.c optional bcma bhnd bhndb dev/bhnd/bcma/bcma_erom.c optional bcma bhnd dev/bhnd/bcma/bcma_subr.c optional bcma bhnd dev/bhnd/cores/chipc/bhnd_chipc_if.m optional bhnd dev/bhnd/cores/chipc/bhnd_sprom_chipc.c optional bhnd dev/bhnd/cores/chipc/bhnd_pmu_chipc.c optional bhnd dev/bhnd/cores/chipc/chipc.c optional bhnd dev/bhnd/cores/chipc/chipc_cfi.c optional bhnd cfi dev/bhnd/cores/chipc/chipc_gpio.c optional bhnd gpio dev/bhnd/cores/chipc/chipc_slicer.c optional bhnd cfi | bhnd spibus dev/bhnd/cores/chipc/chipc_spi.c optional bhnd spibus dev/bhnd/cores/chipc/chipc_subr.c optional bhnd dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl.c optional bhnd dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl_if.m optional bhnd dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl_hostb_if.m optional bhnd dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl_subr.c optional bhnd dev/bhnd/cores/pci/bhnd_pci.c optional bhnd pci dev/bhnd/cores/pci/bhnd_pci_hostb.c optional bhndb bhnd pci dev/bhnd/cores/pci/bhnd_pcib.c optional bhnd_pcib bhnd pci dev/bhnd/cores/pcie2/bhnd_pcie2.c optional bhnd pci dev/bhnd/cores/pcie2/bhnd_pcie2_hostb.c optional bhndb bhnd pci dev/bhnd/cores/pcie2/bhnd_pcie2b.c optional bhnd_pcie2b bhnd pci dev/bhnd/cores/pmu/bhnd_pmu.c optional bhnd dev/bhnd/cores/pmu/bhnd_pmu_core.c optional bhnd dev/bhnd/cores/pmu/bhnd_pmu_if.m optional bhnd dev/bhnd/cores/pmu/bhnd_pmu_subr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_bcm.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_bcmraw.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_btxt.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_sprom.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_sprom_subr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_tlv.c optional bhnd dev/bhnd/nvram/bhnd_nvram_if.m optional bhnd dev/bhnd/nvram/bhnd_nvram_io.c optional bhnd dev/bhnd/nvram/bhnd_nvram_iobuf.c optional bhnd dev/bhnd/nvram/bhnd_nvram_ioptr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_iores.c optional bhnd dev/bhnd/nvram/bhnd_nvram_plist.c optional bhnd dev/bhnd/nvram/bhnd_nvram_store.c optional bhnd dev/bhnd/nvram/bhnd_nvram_store_subr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_subr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_value.c optional bhnd dev/bhnd/nvram/bhnd_nvram_value_fmts.c optional bhnd dev/bhnd/nvram/bhnd_nvram_value_prf.c optional bhnd dev/bhnd/nvram/bhnd_nvram_value_subr.c optional bhnd dev/bhnd/nvram/bhnd_sprom.c optional bhnd dev/bhnd/siba/siba.c optional siba bhnd dev/bhnd/siba/siba_bhndb.c optional siba bhnd bhndb dev/bhnd/siba/siba_erom.c optional siba bhnd dev/bhnd/siba/siba_subr.c optional siba bhnd # dev/bnxt/bnxt_hwrm.c optional bnxt iflib pci dev/bnxt/bnxt_mgmt.c optional bnxt iflib pci dev/bnxt/bnxt_sysctl.c optional bnxt iflib pci dev/bnxt/bnxt_txrx.c optional bnxt iflib pci dev/bnxt/if_bnxt.c optional bnxt iflib pci dev/bwi/bwimac.c optional bwi dev/bwi/bwiphy.c optional bwi dev/bwi/bwirf.c optional bwi dev/bwi/if_bwi.c optional bwi dev/bwi/if_bwi_pci.c optional bwi pci dev/bwn/if_bwn.c optional bwn bhnd dev/bwn/if_bwn_pci.c optional bwn pci bhnd bhndb bhndb_pci dev/bwn/if_bwn_phy_common.c optional bwn bhnd dev/bwn/if_bwn_phy_g.c optional bwn bhnd dev/bwn/if_bwn_phy_lp.c optional bwn bhnd dev/bwn/if_bwn_phy_n.c optional bwn bhnd dev/bwn/if_bwn_util.c optional bwn bhnd dev/cadence/if_cgem.c optional cgem fdt dev/cardbus/card_if.m standard dev/cardbus/cardbus.c optional cardbus dev/cardbus/cardbus_cis.c optional cardbus dev/cardbus/cardbus_device.c optional cardbus dev/cardbus/power_if.m standard dev/cas/if_cas.c optional cas dev/cfi/cfi_bus_fdt.c optional cfi fdt dev/cfi/cfi_bus_nexus.c optional cfi dev/cfi/cfi_core.c optional cfi dev/cfi/cfi_dev.c optional cfi dev/cfi/cfi_disk.c optional cfid dev/chromebook_platform/chromebook_platform.c optional chromebook_platform dev/ciss/ciss.c optional ciss +dev/clk/clk.c optional clk +dev/clk/clkdev_if.m optional clk +dev/clk/clknode_if.m optional clk +dev/clk/clk_bus.c optional clk fdt +dev/clk/clk_div.c optional clk +dev/clk/clk_fixed.c optional clk +dev/clk/clk_gate.c optional clk +dev/clk/clk_link.c optional clk +dev/clk/clk_mux.c optional clk dev/cpufreq/ichss.c optional cpufreq pci dev/cxgb/cxgb_main.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/cxgb_sge.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_mc5.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_vsc7323.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_vsc8211.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_ael1002.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_aq100x.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_mv88e1xxx.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_xgmac.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_t3_hw.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_tn1010.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/sys/uipc_mvec.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/cxgb_t3fw.c optional cxgb cxgb_t3fw \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgbe/t4_clip.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_filter.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_if.m optional cxgbe pci dev/cxgbe/t4_iov.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_mp_ring.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_main.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_netmap.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_sched.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_sge.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_smt.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_l2t.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_tracer.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_vf.c optional cxgbev pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/common/t4_hw.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/common/t4vf_hw.c optional cxgbev pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/crypto/t6_kern_tls.c optional cxgbe pci kern_tls \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/crypto/t4_keyctx.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/cudbg/cudbg_common.c optional cxgbe \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/cudbg/cudbg_flash_utils.c optional cxgbe \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/cudbg/cudbg_lib.c optional cxgbe \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/cudbg/cudbg_wtp.c optional cxgbe \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/cudbg/fastlz.c optional cxgbe \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/cudbg/fastlz_api.c optional cxgbe \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" t4fw_cfg.c optional cxgbe \ compile-with "${AWK} -f $S/tools/fw_stub.awk t4fw_cfg.fw:t4fw_cfg t4fw_cfg_uwire.fw:t4fw_cfg_uwire t4fw.fw:t4fw -mt4fw_cfg -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "t4fw_cfg.c" t4fw_cfg.fwo optional cxgbe \ dependency "t4fw_cfg.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t4fw_cfg.fwo" t4fw_cfg.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t4fw_cfg.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t4fw_cfg.fw" t4fw_cfg_uwire.fwo optional cxgbe \ dependency "t4fw_cfg_uwire.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t4fw_cfg_uwire.fwo" t4fw_cfg_uwire.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t4fw_cfg_uwire.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t4fw_cfg_uwire.fw" t4fw.fwo optional cxgbe \ dependency "t4fw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t4fw.fwo" t4fw.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t4fw-1.27.5.0.bin" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t4fw.fw" t5fw_cfg.c optional cxgbe \ compile-with "${AWK} -f $S/tools/fw_stub.awk t5fw_cfg.fw:t5fw_cfg t5fw_cfg_uwire.fw:t5fw_cfg_uwire t5fw.fw:t5fw -mt5fw_cfg -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "t5fw_cfg.c" t5fw_cfg.fwo optional cxgbe \ dependency "t5fw_cfg.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t5fw_cfg.fwo" t5fw_cfg.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t5fw_cfg.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t5fw_cfg.fw" t5fw_cfg_uwire.fwo optional cxgbe \ dependency "t5fw_cfg_uwire.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t5fw_cfg_uwire.fwo" t5fw_cfg_uwire.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t5fw_cfg_uwire.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t5fw_cfg_uwire.fw" t5fw.fwo optional cxgbe \ dependency "t5fw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t5fw.fwo" t5fw.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t5fw-1.27.5.0.bin" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t5fw.fw" t6fw_cfg.c optional cxgbe \ compile-with "${AWK} -f $S/tools/fw_stub.awk t6fw_cfg.fw:t6fw_cfg t6fw_cfg_uwire.fw:t6fw_cfg_uwire t6fw.fw:t6fw -mt6fw_cfg -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "t6fw_cfg.c" t6fw_cfg.fwo optional cxgbe \ dependency "t6fw_cfg.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t6fw_cfg.fwo" t6fw_cfg.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t6fw_cfg.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t6fw_cfg.fw" t6fw_cfg_uwire.fwo optional cxgbe \ dependency "t6fw_cfg_uwire.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t6fw_cfg_uwire.fwo" t6fw_cfg_uwire.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t6fw_cfg_uwire.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t6fw_cfg_uwire.fw" t6fw.fwo optional cxgbe \ dependency "t6fw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t6fw.fwo" t6fw.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t6fw-1.27.5.0.bin" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t6fw.fw" dev/cxgbe/crypto/t4_crypto.c optional ccr \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cyapa/cyapa.c optional cyapa iicbus dev/dc/if_dc.c optional dc pci dev/dc/dcphy.c optional dc pci dev/dc/pnphy.c optional dc pci dev/dcons/dcons.c optional dcons dev/dcons/dcons_crom.c optional dcons_crom dev/dcons/dcons_os.c optional dcons dev/dialog/da9063/da9063_if.m optional da9063_pmic dev/dialog/da9063/da9063_iic.c optional da9063_pmic iicbus fdt dev/dialog/da9063/da9063_rtc.c optional da9063_rtc fdt dev/drm2/drm_agpsupport.c optional drm2 dev/drm2/drm_auth.c optional drm2 dev/drm2/drm_bufs.c optional drm2 dev/drm2/drm_buffer.c optional drm2 dev/drm2/drm_context.c optional drm2 dev/drm2/drm_crtc.c optional drm2 dev/drm2/drm_crtc_helper.c optional drm2 dev/drm2/drm_dma.c optional drm2 dev/drm2/drm_dp_helper.c optional drm2 dev/drm2/drm_dp_iic_helper.c optional drm2 dev/drm2/drm_drv.c optional drm2 dev/drm2/drm_edid.c optional drm2 dev/drm2/drm_fb_helper.c optional drm2 dev/drm2/drm_fops.c optional drm2 dev/drm2/drm_gem.c optional drm2 dev/drm2/drm_gem_names.c optional drm2 dev/drm2/drm_global.c optional drm2 dev/drm2/drm_hashtab.c optional drm2 dev/drm2/drm_ioctl.c optional drm2 dev/drm2/drm_irq.c optional drm2 dev/drm2/drm_linux_list_sort.c optional drm2 dev/drm2/drm_lock.c optional drm2 dev/drm2/drm_memory.c optional drm2 dev/drm2/drm_mm.c optional drm2 dev/drm2/drm_modes.c optional drm2 dev/drm2/drm_pci.c optional drm2 dev/drm2/drm_platform.c optional drm2 dev/drm2/drm_scatter.c optional drm2 dev/drm2/drm_stub.c optional drm2 dev/drm2/drm_sysctl.c optional drm2 dev/drm2/drm_vm.c optional drm2 dev/drm2/drm_os_freebsd.c optional drm2 dev/drm2/ttm/ttm_agp_backend.c optional drm2 dev/drm2/ttm/ttm_lock.c optional drm2 dev/drm2/ttm/ttm_object.c optional drm2 dev/drm2/ttm/ttm_tt.c optional drm2 dev/drm2/ttm/ttm_bo_util.c optional drm2 dev/drm2/ttm/ttm_bo.c optional drm2 dev/drm2/ttm/ttm_bo_manager.c optional drm2 dev/drm2/ttm/ttm_execbuf_util.c optional drm2 dev/drm2/ttm/ttm_memory.c optional drm2 dev/drm2/ttm/ttm_page_alloc.c optional drm2 dev/drm2/ttm/ttm_bo_vm.c optional drm2 dev/efidev/efidev.c optional efirt dev/efidev/efirt.c optional efirt dev/efidev/efirtc.c optional efirt dev/e1000/if_em.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/em_txrx.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/igb_txrx.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_80003es2lan.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82540.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82541.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82542.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82543.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82571.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82575.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_ich8lan.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_i210.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_api.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_base.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_mac.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_manage.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_nvm.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_phy.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_vf.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_mbx.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_osdep.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/et/if_et.c optional et dev/ena/ena.c optional ena \ compile-with "${NORMAL_C} -I$S/contrib" dev/ena/ena_datapath.c optional ena \ compile-with "${NORMAL_C} -I$S/contrib" dev/ena/ena_netmap.c optional ena \ compile-with "${NORMAL_C} -I$S/contrib" dev/ena/ena_rss.c optional ena \ compile-with "${NORMAL_C} -I$S/contrib" dev/ena/ena_sysctl.c optional ena \ compile-with "${NORMAL_C} -I$S/contrib" contrib/ena-com/ena_com.c optional ena contrib/ena-com/ena_eth_com.c optional ena dev/etherswitch/arswitch/arswitch.c optional arswitch dev/etherswitch/arswitch/arswitch_reg.c optional arswitch dev/etherswitch/arswitch/arswitch_phy.c optional arswitch dev/etherswitch/arswitch/arswitch_8216.c optional arswitch dev/etherswitch/arswitch/arswitch_8226.c optional arswitch dev/etherswitch/arswitch/arswitch_8316.c optional arswitch dev/etherswitch/arswitch/arswitch_8327.c optional arswitch dev/etherswitch/arswitch/arswitch_vlans.c optional arswitch dev/etherswitch/etherswitch.c optional etherswitch dev/etherswitch/etherswitch_if.m optional etherswitch dev/etherswitch/ip17x/ip17x.c optional ip17x dev/etherswitch/ip17x/ip175c.c optional ip17x dev/etherswitch/ip17x/ip175d.c optional ip17x dev/etherswitch/ip17x/ip17x_phy.c optional ip17x dev/etherswitch/ip17x/ip17x_vlans.c optional ip17x dev/etherswitch/miiproxy.c optional miiproxy dev/etherswitch/rtl8366/rtl8366rb.c optional rtl8366rb dev/etherswitch/e6000sw/e6000sw.c optional e6000sw fdt dev/etherswitch/e6000sw/e6060sw.c optional e6060sw dev/etherswitch/infineon/adm6996fc.c optional adm6996fc dev/etherswitch/micrel/ksz8995ma.c optional ksz8995ma dev/etherswitch/ukswitch/ukswitch.c optional ukswitch dev/evdev/cdev.c optional evdev dev/evdev/evdev.c optional evdev dev/evdev/evdev_mt.c optional evdev dev/evdev/evdev_utils.c optional evdev dev/evdev/uinput.c optional evdev uinput dev/exca/exca.c optional cbb -dev/extres/clk/clk.c optional clk -dev/extres/clk/clkdev_if.m optional clk -dev/extres/clk/clknode_if.m optional clk -dev/extres/clk/clk_bus.c optional clk fdt -dev/extres/clk/clk_div.c optional clk -dev/extres/clk/clk_fixed.c optional clk -dev/extres/clk/clk_gate.c optional clk -dev/extres/clk/clk_link.c optional clk -dev/extres/clk/clk_mux.c optional clk dev/extres/phy/phy.c optional phy dev/extres/phy/phydev_if.m optional phy fdt dev/extres/phy/phynode_if.m optional phy dev/extres/phy/phy_usb.c optional phy dev/extres/phy/phynode_usb_if.m optional phy dev/extres/hwreset/hwreset.c optional hwreset dev/extres/hwreset/hwreset_array.c optional hwreset dev/extres/hwreset/hwreset_if.m optional hwreset dev/extres/nvmem/nvmem.c optional nvmem fdt dev/extres/nvmem/nvmem_if.m optional nvmem dev/extres/regulator/regdev_if.m optional regulator fdt dev/extres/regulator/regnode_if.m optional regulator dev/extres/regulator/regulator.c optional regulator dev/extres/regulator/regulator_bus.c optional regulator fdt dev/extres/regulator/regulator_fixed.c optional regulator dev/extres/syscon/syscon.c optional syscon dev/extres/syscon/syscon_generic.c optional syscon fdt dev/extres/syscon/syscon_if.m optional syscon dev/extres/syscon/syscon_power.c optional syscon syscon_power dev/fb/fbd.c optional fbd | vt dev/fb/fb_if.m standard dev/fb/splash.c optional sc splash dev/fdt/fdt_clock.c optional fdt fdt_clock dev/fdt/fdt_clock_if.m optional fdt fdt_clock dev/fdt/fdt_common.c optional fdt dev/fdt/fdt_pinctrl.c optional fdt fdt_pinctrl dev/fdt/fdt_pinctrl_if.m optional fdt fdt_pinctrl dev/fdt/fdt_slicer.c optional fdt cfi | fdt mx25l | fdt n25q | fdt at45d dev/fdt/fdt_static_dtb.S optional fdt fdt_dtb_static \ dependency "${FDT_DTS_FILE:T:R}.dtb" dev/fdt/simplebus.c optional fdt dev/fdt/simple_mfd.c optional syscon fdt dev/filemon/filemon.c optional filemon dev/firewire/firewire.c optional firewire dev/firewire/fwcrom.c optional firewire dev/firewire/fwdev.c optional firewire dev/firewire/fwdma.c optional firewire dev/firewire/fwmem.c optional firewire dev/firewire/fwohci.c optional firewire dev/firewire/fwohci_pci.c optional firewire pci dev/firewire/if_fwe.c optional fwe dev/firewire/if_fwip.c optional fwip dev/firewire/sbp.c optional sbp dev/firewire/sbp_targ.c optional sbp_targ dev/flash/at45d.c optional at45d dev/flash/cqspi.c optional cqspi fdt xdma dev/flash/mx25l.c optional mx25l dev/flash/n25q.c optional n25q fdt dev/flash/qspi_if.m optional cqspi fdt | n25q fdt dev/fxp/if_fxp.c optional fxp dev/fxp/inphy.c optional fxp dev/gem/if_gem.c optional gem dev/gem/if_gem_pci.c optional gem pci dev/gve/gve_adminq.c optional gve dev/gve/gve_main.c optional gve dev/gve/gve_qpl.c optional gve dev/gve/gve_rx.c optional gve dev/gve/gve_sysctl.c optional gve dev/gve/gve_tx.c optional gve dev/gve/gve_utils.c optional gve dev/goldfish/goldfish_rtc.c optional goldfish_rtc fdt dev/gpio/dwgpio/dwgpio.c optional gpio dwgpio fdt dev/gpio/dwgpio/dwgpio_bus.c optional gpio dwgpio fdt dev/gpio/dwgpio/dwgpio_if.m optional gpio dwgpio fdt dev/gpio/gpiobacklight.c optional gpiobacklight fdt dev/gpio/gpiokeys.c optional gpiokeys fdt dev/gpio/gpiokeys_codes.c optional gpiokeys fdt dev/gpio/gpiobus.c optional gpio \ dependency "gpiobus_if.h" dev/gpio/gpioc.c optional gpio \ dependency "gpio_if.h" dev/gpio/gpioiic.c optional gpioiic dev/gpio/gpioled.c optional gpioled !fdt dev/gpio/gpioled_fdt.c optional gpioled fdt dev/gpio/gpiomdio.c optional gpiomdio mii_bitbang dev/gpio/gpiopower.c optional gpiopower fdt dev/gpio/gpioregulator.c optional gpioregulator fdt dev/gpio/gpiospi.c optional gpiospi dev/gpio/gpioths.c optional gpioths dev/gpio/gpio_if.m optional gpio dev/gpio/gpiobus_if.m optional gpio dev/gpio/gpiopps.c optional gpiopps fdt dev/gpio/ofw_gpiobus.c optional fdt gpio dev/hid/bcm5974.c optional bcm5974 dev/hid/hconf.c optional hconf dev/hid/hcons.c optional hcons dev/hid/hgame.c optional hgame dev/hid/hid.c optional hid dev/hid/hid_if.m optional hid dev/hid/hidbus.c optional hidbus dev/hid/hidmap.c optional hidmap dev/hid/hidquirk.c optional hid dev/hid/hidraw.c optional hidraw dev/hid/hkbd.c optional hkbd dev/hid/hms.c optional hms dev/hid/hmt.c optional hmt hconf dev/hid/hpen.c optional hpen dev/hid/hsctrl.c optional hsctrl dev/hid/ietp.c optional ietp dev/hid/ps4dshock.c optional ps4dshock dev/hid/xb360gp.c optional xb360gp dev/hifn/hifn7751.c optional hifn dev/hptiop/hptiop.c optional hptiop scbus dev/hwpmc/hwpmc_logging.c optional hwpmc dev/hwpmc/hwpmc_mod.c optional hwpmc dev/hwpmc/hwpmc_soft.c optional hwpmc dev/ichiic/ig4_acpi.c optional ig4 acpi iicbus dev/ichiic/ig4_iic.c optional ig4 iicbus dev/ichiic/ig4_pci.c optional ig4 pci iicbus dev/ichsmb/ichsmb.c optional ichsmb dev/ichsmb/ichsmb_pci.c optional ichsmb pci dev/ida/ida.c optional ida dev/ida/ida_disk.c optional ida dev/ida/ida_pci.c optional ida pci dev/iicbus/acpi_iicbus.c optional acpi iicbus | acpi compat_linuxkpi dev/iicbus/icee.c optional icee dev/iicbus/if_ic.c optional ic dev/iicbus/iic.c optional iic dev/iicbus/iic_recover_bus.c optional iicbus | compat_linuxkpi dev/iicbus/iicbb.c optional iicbb | compat_linuxkpi dev/iicbus/iicbb_if.m optional iicbb | compat_linuxkpi dev/iicbus/iicbus.c optional iicbus | compat_linuxkpi dev/iicbus/iicbus_if.m optional iicbus | compat_linuxkpi dev/iicbus/iichid.c optional iichid acpi hid iicbus dev/iicbus/iiconf.c optional iicbus | compat_linuxkpi dev/iicbus/iicsmb.c optional iicsmb \ dependency "iicbus_if.h" dev/iicbus/adc/ad7418.c optional ad7418 dev/iicbus/adc/ads111x.c optional ads111x dev/iicbus/adc/pcf8591.c optional pcf8591 dev/iicbus/controller/opencores/iicoc.c optional iicoc dev/iicbus/controller/opencores/iicoc_fdt.c optional iicoc fdt dev/iicbus/controller/opencores/iicoc_pci.c optional iicoc pci dev/iicbus/mux/iicmux.c optional iicmux dev/iicbus/mux/iicmux_if.m optional iicmux dev/iicbus/mux/iic_gpiomux.c optional iic_gpiomux fdt dev/iicbus/mux/ltc430x.c optional ltc430x dev/iicbus/mux/pca954x.c optional pca954x iicbus iicmux dev/iicbus/ofw_iicbus.c optional fdt iicbus dev/iicbus/ofw_iicbus_if.m optional fdt iicbus dev/iicbus/rtc/ds1307.c optional ds1307 dev/iicbus/rtc/ds13rtc.c optional ds13rtc | ds133x | ds1374 dev/iicbus/rtc/ds1672.c optional ds1672 dev/iicbus/rtc/ds3231.c optional ds3231 dev/iicbus/rtc/isl12xx.c optional isl12xx dev/iicbus/rtc/nxprtc.c optional nxprtc | pcf8563 dev/iicbus/rtc/pcf85063.c optional pcf85063 iicbus fdt dev/iicbus/rtc/rtc8583.c optional rtc8583 dev/iicbus/rtc/rv3032.c optional rv3032 iicbus fdt dev/iicbus/rtc/rx8803.c optional rx8803 iicbus fdt dev/iicbus/rtc/s35390a.c optional s35390a dev/iicbus/sensor/htu21.c optional htu21 dev/iicbus/sensor/lm75.c optional lm75 dev/iicbus/sensor/max44009.c optional max44009 dev/iicbus/gpio/pcf8574.c optional pcf8574 dev/iicbus/gpio/tca64xx.c optional tca64xx fdt gpio dev/iicbus/pmic/fan53555.c optional fan53555 fdt | tcs4525 fdt dev/iicbus/pmic/silergy/sy8106a.c optional sy8106a fdt dev/iicbus/pmic/silergy/syr827.c optional syr827 fdt dev/igc/if_igc.c optional igc iflib pci dev/igc/igc_api.c optional igc iflib pci dev/igc/igc_base.c optional igc iflib pci dev/igc/igc_i225.c optional igc iflib pci dev/igc/igc_mac.c optional igc iflib pci dev/igc/igc_nvm.c optional igc iflib pci dev/igc/igc_phy.c optional igc iflib pci dev/igc/igc_txrx.c optional igc iflib pci dev/intpm/intpm.c optional intpm pci # XXX Work around clang warning, until maintainer approves fix. dev/ips/ips.c optional ips \ compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}" dev/ips/ips_commands.c optional ips dev/ips/ips_disk.c optional ips dev/ips/ips_ioctl.c optional ips dev/ips/ips_pci.c optional ips pci dev/ipw/if_ipw.c optional ipw ipwbssfw.c optional ipwbssfw | ipwfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_bss.fw:ipw_bss:130 -lintel_ipw -mipw_bss -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "ipwbssfw.c" ipw_bss.fwo optional ipwbssfw | ipwfw \ dependency "ipw_bss.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ipw_bss.fwo" ipw_bss.fw optional ipwbssfw | ipwfw \ dependency "$S/contrib/dev/ipw/ipw2100-1.3.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "ipw_bss.fw" ipwibssfw.c optional ipwibssfw | ipwfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_ibss.fw:ipw_ibss:130 -lintel_ipw -mipw_ibss -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "ipwibssfw.c" ipw_ibss.fwo optional ipwibssfw | ipwfw \ dependency "ipw_ibss.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ipw_ibss.fwo" ipw_ibss.fw optional ipwibssfw | ipwfw \ dependency "$S/contrib/dev/ipw/ipw2100-1.3-i.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "ipw_ibss.fw" ipwmonitorfw.c optional ipwmonitorfw | ipwfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_monitor.fw:ipw_monitor:130 -lintel_ipw -mipw_monitor -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "ipwmonitorfw.c" ipw_monitor.fwo optional ipwmonitorfw | ipwfw \ dependency "ipw_monitor.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ipw_monitor.fwo" ipw_monitor.fw optional ipwmonitorfw | ipwfw \ dependency "$S/contrib/dev/ipw/ipw2100-1.3-p.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "ipw_monitor.fw" dev/iscsi/icl.c optional iscsi dev/iscsi/icl_conn_if.m optional cfiscsi | iscsi dev/iscsi/icl_soft.c optional iscsi dev/iscsi/icl_soft_proxy.c optional iscsi dev/iscsi/iscsi.c optional iscsi scbus dev/ismt/ismt.c optional ismt dev/isl/isl.c optional isl iicbus dev/isp/isp.c optional isp dev/isp/isp_freebsd.c optional isp dev/isp/isp_library.c optional isp dev/isp/isp_pci.c optional isp pci dev/isp/isp_target.c optional isp dev/ispfw/ispfw.c optional ispfw dev/iwi/if_iwi.c optional iwi iwibssfw.c optional iwibssfw | iwifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_bss.fw:iwi_bss:300 -lintel_iwi -miwi_bss -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwibssfw.c" iwi_bss.fwo optional iwibssfw | iwifw \ dependency "iwi_bss.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwi_bss.fwo" iwi_bss.fw optional iwibssfw | iwifw \ dependency "$S/contrib/dev/iwi/ipw2200-bss.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwi_bss.fw" iwiibssfw.c optional iwiibssfw | iwifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_ibss.fw:iwi_ibss:300 -lintel_iwi -miwi_ibss -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwiibssfw.c" iwi_ibss.fwo optional iwiibssfw | iwifw \ dependency "iwi_ibss.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwi_ibss.fwo" iwi_ibss.fw optional iwiibssfw | iwifw \ dependency "$S/contrib/dev/iwi/ipw2200-ibss.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwi_ibss.fw" iwimonitorfw.c optional iwimonitorfw | iwifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_monitor.fw:iwi_monitor:300 -lintel_iwi -miwi_monitor -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwimonitorfw.c" iwi_monitor.fwo optional iwimonitorfw | iwifw \ dependency "iwi_monitor.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwi_monitor.fwo" iwi_monitor.fw optional iwimonitorfw | iwifw \ dependency "$S/contrib/dev/iwi/ipw2200-sniffer.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwi_monitor.fw" dev/iwm/if_iwm.c optional iwm dev/iwm/if_iwm_7000.c optional iwm dev/iwm/if_iwm_8000.c optional iwm dev/iwm/if_iwm_9000.c optional iwm dev/iwm/if_iwm_9260.c optional iwm dev/iwm/if_iwm_binding.c optional iwm dev/iwm/if_iwm_fw.c optional iwm dev/iwm/if_iwm_led.c optional iwm dev/iwm/if_iwm_mac_ctxt.c optional iwm dev/iwm/if_iwm_notif_wait.c optional iwm dev/iwm/if_iwm_pcie_trans.c optional iwm dev/iwm/if_iwm_phy_ctxt.c optional iwm dev/iwm/if_iwm_phy_db.c optional iwm dev/iwm/if_iwm_power.c optional iwm dev/iwm/if_iwm_scan.c optional iwm dev/iwm/if_iwm_sf.c optional iwm dev/iwm/if_iwm_sta.c optional iwm dev/iwm/if_iwm_time_event.c optional iwm dev/iwm/if_iwm_util.c optional iwm iwm3160fw.c optional iwm3160fw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm3160.fw:iwm3160fw -miwm3160fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwm3160fw.c" iwm3160fw.fwo optional iwm3160fw | iwmfw \ dependency "iwm3160.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm3160fw.fwo" iwm3160.fw optional iwm3160fw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-3160-17.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm3160.fw" iwm3168fw.c optional iwm3168fw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm3168.fw:iwm3168fw -miwm3168fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwm3168fw.c" iwm3168fw.fwo optional iwm3168fw | iwmfw \ dependency "iwm3168.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm3168fw.fwo" iwm3168.fw optional iwm3168fw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-3168-22.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm3168.fw" iwm7260fw.c optional iwm7260fw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm7260.fw:iwm7260fw -miwm7260fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwm7260fw.c" iwm7260fw.fwo optional iwm7260fw | iwmfw \ dependency "iwm7260.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm7260fw.fwo" iwm7260.fw optional iwm7260fw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-7260-17.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm7260.fw" iwm7265fw.c optional iwm7265fw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm7265.fw:iwm7265fw -miwm7265fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwm7265fw.c" iwm7265fw.fwo optional iwm7265fw | iwmfw \ dependency "iwm7265.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm7265fw.fwo" iwm7265.fw optional iwm7265fw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-7265-17.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm7265.fw" iwm7265Dfw.c optional iwm7265Dfw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm7265D.fw:iwm7265Dfw -miwm7265Dfw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwm7265Dfw.c" iwm7265Dfw.fwo optional iwm7265Dfw | iwmfw \ dependency "iwm7265D.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm7265Dfw.fwo" iwm7265D.fw optional iwm7265Dfw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-7265D-17.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm7265D.fw" iwm8000Cfw.c optional iwm8000Cfw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm8000C.fw:iwm8000Cfw -miwm8000Cfw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwm8000Cfw.c" iwm8000Cfw.fwo optional iwm8000Cfw | iwmfw \ dependency "iwm8000C.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm8000Cfw.fwo" iwm8000C.fw optional iwm8000Cfw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-8000C-16.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm8000C.fw" iwm8265.fw optional iwm8265fw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-8265-22.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm8265.fw" iwm8265fw.c optional iwm8265fw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm8265.fw:iwm8265fw -miwm8265fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwm8265fw.c" iwm8265fw.fwo optional iwm8265fw | iwmfw \ dependency "iwm8265.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm8265fw.fwo" dev/iwn/if_iwn.c optional iwn iwn1000fw.c optional iwn1000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn1000.fw:iwn1000fw -miwn1000fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwn1000fw.c" iwn1000fw.fwo optional iwn1000fw | iwnfw \ dependency "iwn1000.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn1000fw.fwo" iwn1000.fw optional iwn1000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-1000-39.31.5.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn1000.fw" iwn100fw.c optional iwn100fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn100.fw:iwn100fw -miwn100fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwn100fw.c" iwn100fw.fwo optional iwn100fw | iwnfw \ dependency "iwn100.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn100fw.fwo" iwn100.fw optional iwn100fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-100-39.31.5.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn100.fw" iwn105fw.c optional iwn105fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn105.fw:iwn105fw -miwn105fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwn105fw.c" iwn105fw.fwo optional iwn105fw | iwnfw \ dependency "iwn105.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn105fw.fwo" iwn105.fw optional iwn105fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-105-6-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn105.fw" iwn135fw.c optional iwn135fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn135.fw:iwn135fw -miwn135fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwn135fw.c" iwn135fw.fwo optional iwn135fw | iwnfw \ dependency "iwn135.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn135fw.fwo" iwn135.fw optional iwn135fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-135-6-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn135.fw" iwn2000fw.c optional iwn2000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn2000.fw:iwn2000fw -miwn2000fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwn2000fw.c" iwn2000fw.fwo optional iwn2000fw | iwnfw \ dependency "iwn2000.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn2000fw.fwo" iwn2000.fw optional iwn2000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-2000-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn2000.fw" iwn2030fw.c optional iwn2030fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn2030.fw:iwn2030fw -miwn2030fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwn2030fw.c" iwn2030fw.fwo optional iwn2030fw | iwnfw \ dependency "iwn2030.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn2030fw.fwo" iwn2030.fw optional iwn2030fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwnwifi-2030-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn2030.fw" iwn4965fw.c optional iwn4965fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn4965.fw:iwn4965fw -miwn4965fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwn4965fw.c" iwn4965fw.fwo optional iwn4965fw | iwnfw \ dependency "iwn4965.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn4965fw.fwo" iwn4965.fw optional iwn4965fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-4965-228.61.2.24.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn4965.fw" iwn5000fw.c optional iwn5000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn5000.fw:iwn5000fw -miwn5000fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwn5000fw.c" iwn5000fw.fwo optional iwn5000fw | iwnfw \ dependency "iwn5000.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn5000fw.fwo" iwn5000.fw optional iwn5000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-5000-8.83.5.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn5000.fw" iwn5150fw.c optional iwn5150fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn5150.fw:iwn5150fw -miwn5150fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwn5150fw.c" iwn5150fw.fwo optional iwn5150fw | iwnfw \ dependency "iwn5150.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn5150fw.fwo" iwn5150.fw optional iwn5150fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-5150-8.24.2.2.fw.uu"\ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn5150.fw" iwn6000fw.c optional iwn6000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000.fw:iwn6000fw -miwn6000fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwn6000fw.c" iwn6000fw.fwo optional iwn6000fw | iwnfw \ dependency "iwn6000.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn6000fw.fwo" iwn6000.fw optional iwn6000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6000-9.221.4.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn6000.fw" iwn6000g2afw.c optional iwn6000g2afw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000g2a.fw:iwn6000g2afw -miwn6000g2afw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwn6000g2afw.c" iwn6000g2afw.fwo optional iwn6000g2afw | iwnfw \ dependency "iwn6000g2a.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn6000g2afw.fwo" iwn6000g2a.fw optional iwn6000g2afw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6000g2a-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn6000g2a.fw" iwn6000g2bfw.c optional iwn6000g2bfw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000g2b.fw:iwn6000g2bfw -miwn6000g2bfw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwn6000g2bfw.c" iwn6000g2bfw.fwo optional iwn6000g2bfw | iwnfw \ dependency "iwn6000g2b.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn6000g2bfw.fwo" iwn6000g2b.fw optional iwn6000g2bfw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6000g2b-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn6000g2b.fw" iwn6050fw.c optional iwn6050fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6050.fw:iwn6050fw -miwn6050fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "iwn6050fw.c" iwn6050fw.fwo optional iwn6050fw | iwnfw \ dependency "iwn6050.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn6050fw.fwo" iwn6050.fw optional iwn6050fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6050-41.28.5.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn6050.fw" dev/ixgbe/if_ix.c optional ix inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe -DSMP" dev/ixgbe/if_ixv.c optional ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe -DSMP" dev/ixgbe/if_bypass.c optional ix inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/if_fdir.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/if_sriov.c optional ix inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ix_txrx.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_osdep.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_phy.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_api.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_common.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_mbx.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_vf.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_82598.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_82599.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_x540.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_x550.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_dcb.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_dcb_82598.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_dcb_82599.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/jedec_dimm/jedec_dimm.c optional jedec_dimm smbus dev/jme/if_jme.c optional jme pci dev/kbd/kbd.c optional atkbd | pckbd | sc | ukbd | vt | hkbd dev/kbdmux/kbdmux.c optional kbdmux dev/ksyms/ksyms.c optional ksyms dev/le/am7990.c optional le dev/le/am79900.c optional le dev/le/if_le_pci.c optional le pci dev/le/lance.c optional le dev/led/led.c standard dev/lge/if_lge.c optional lge dev/liquidio/base/cn23xx_pf_device.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_console.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_ctrl.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_device.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_droq.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_mem_ops.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_request_manager.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_response_manager.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/lio_core.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/lio_ioctl.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/lio_main.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/lio_rss.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/lio_rxtx.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/lio_sysctl.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" lio.c optional lio \ compile-with "${AWK} -f $S/tools/fw_stub.awk lio_23xx_nic.bin.fw:lio_23xx_nic.bin -mlio_23xx_nic.bin -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "lio.c" lio_23xx_nic.bin.fw.fwo optional lio \ dependency "lio_23xx_nic.bin.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "lio_23xx_nic.bin.fw.fwo" lio_23xx_nic.bin.fw optional lio \ dependency "$S/contrib/dev/liquidio/lio_23xx_nic.bin.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "lio_23xx_nic.bin.fw" dev/malo/if_malo.c optional malo dev/malo/if_malohal.c optional malo dev/malo/if_malo_pci.c optional malo pci dev/md/md.c optional md dev/mdio/mdio_if.m optional miiproxy | mdio dev/mdio/mdio.c optional miiproxy | mdio dev/mem/memdev.c optional mem dev/mem/memutil.c optional mem dev/mfi/mfi.c optional mfi dev/mfi/mfi_debug.c optional mfi dev/mfi/mfi_pci.c optional mfi pci dev/mfi/mfi_disk.c optional mfi dev/mfi/mfi_syspd.c optional mfi dev/mfi/mfi_tbolt.c optional mfi dev/mfi/mfi_cam.c optional mfip scbus dev/mii/acphy.c optional miibus | acphy dev/mii/amphy.c optional miibus | amphy dev/mii/atphy.c optional miibus | atphy dev/mii/axphy.c optional miibus | axphy dev/mii/bmtphy.c optional miibus | bmtphy dev/mii/brgphy.c optional miibus | brgphy dev/mii/ciphy.c optional miibus | ciphy dev/mii/dp83822phy.c optional miibus | dp83822phy dev/mii/dp83867phy.c optional miibus | dp83867phy dev/mii/e1000phy.c optional miibus | e1000phy dev/mii/gentbi.c optional miibus | gentbi dev/mii/icsphy.c optional miibus | icsphy dev/mii/ip1000phy.c optional miibus | ip1000phy dev/mii/jmphy.c optional miibus | jmphy dev/mii/lxtphy.c optional miibus | lxtphy dev/mii/mcommphy.c optional miibus | mcommphy dev/mii/micphy.c optional miibus fdt | micphy fdt dev/mii/mii.c optional miibus | mii dev/mii/mii_bitbang.c optional miibus | mii_bitbang dev/mii/mii_physubr.c optional miibus | mii dev/mii/mii_fdt.c optional miibus fdt | mii fdt dev/mii/miibus_if.m optional miibus | mii dev/mii/mv88e151x.c optional miibus | mv88e151x dev/mii/nsgphy.c optional miibus | nsgphy dev/mii/nsphy.c optional miibus | nsphy dev/mii/nsphyter.c optional miibus | nsphyter dev/mii/pnaphy.c optional miibus | pnaphy dev/mii/qsphy.c optional miibus | qsphy dev/mii/rdcphy.c optional miibus | rdcphy dev/mii/rgephy.c optional miibus | rgephy dev/mii/rlphy.c optional miibus | rlphy dev/mii/rlswitch.c optional rlswitch dev/mii/smcphy.c optional miibus | smcphy dev/mii/smscphy.c optional miibus | smscphy dev/mii/tdkphy.c optional miibus | tdkphy dev/mii/truephy.c optional miibus | truephy dev/mii/ukphy.c optional miibus | mii dev/mii/ukphy_subr.c optional miibus | mii dev/mii/vscphy.c optional miibus | vscphy dev/mii/xmphy.c optional miibus | xmphy dev/mlxfw/mlxfw_fsm.c optional mlxfw \ compile-with "${MLXFW_C}" dev/mlxfw/mlxfw_mfa2.c optional mlxfw \ compile-with "${MLXFW_C}" dev/mlxfw/mlxfw_mfa2_tlv_multi.c optional mlxfw \ compile-with "${MLXFW_C}" dev/mlx/mlx.c optional mlx dev/mlx/mlx_disk.c optional mlx dev/mlx/mlx_pci.c optional mlx pci dev/mmc/mmc_subr.c optional mmc | mmcsd !mmccam dev/mmc/mmc.c optional mmc !mmccam dev/mmc/mmcbr_if.m standard dev/mmc/mmcbus_if.m standard dev/mmc/mmcsd.c optional mmcsd !mmccam dev/mmc/mmc_fdt_helpers.c optional mmc regulator clk fdt | mmccam regulator clk fdt dev/mmc/mmc_helpers.c optional mmc gpio regulator clk | mmccam gpio regulator clk dev/mmc/mmc_pwrseq.c optional mmc clk regulator fdt | mmccam clk regulator fdt dev/mmc/mmc_pwrseq_if.m optional mmc clk regulator fdt | mmccam clk regulator fdt dev/mmcnull/mmcnull.c optional mmcnull dev/mpr/mpr.c optional mpr dev/mpr/mpr_config.c optional mpr # XXX Work around clang warning, until maintainer approves fix. dev/mpr/mpr_mapping.c optional mpr \ compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}" dev/mpr/mpr_pci.c optional mpr pci dev/mpr/mpr_sas.c optional mpr \ compile-with "${NORMAL_C} ${NO_WUNNEEDED_INTERNAL_DECL}" dev/mpr/mpr_sas_lsi.c optional mpr dev/mpr/mpr_table.c optional mpr dev/mpr/mpr_user.c optional mpr dev/mps/mps.c optional mps dev/mps/mps_config.c optional mps # XXX Work around clang warning, until maintainer approves fix. dev/mps/mps_mapping.c optional mps \ compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}" dev/mps/mps_pci.c optional mps pci dev/mps/mps_sas.c optional mps \ compile-with "${NORMAL_C} ${NO_WUNNEEDED_INTERNAL_DECL}" dev/mps/mps_sas_lsi.c optional mps dev/mps/mps_table.c optional mps dev/mps/mps_user.c optional mps dev/mpt/mpt.c optional mpt dev/mpt/mpt_cam.c optional mpt dev/mpt/mpt_debug.c optional mpt dev/mpt/mpt_pci.c optional mpt pci dev/mpt/mpt_raid.c optional mpt dev/mpt/mpt_user.c optional mpt dev/mrsas/mrsas.c optional mrsas dev/mrsas/mrsas_cam.c optional mrsas dev/mrsas/mrsas_ioctl.c optional mrsas dev/mrsas/mrsas_fp.c optional mrsas dev/msk/if_msk.c optional msk dev/mvs/mvs.c optional mvs dev/mvs/mvs_if.m optional mvs dev/mvs/mvs_pci.c optional mvs pci dev/mwl/if_mwl.c optional mwl dev/mwl/if_mwl_pci.c optional mwl pci dev/mwl/mwlhal.c optional mwl mwlfw.c optional mwlfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk mw88W8363.fw:mw88W8363fw mwlboot.fw:mwlboot -mmwl -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "mwlfw.c" mw88W8363.fwo optional mwlfw \ dependency "mw88W8363.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "mw88W8363.fwo" mw88W8363.fw optional mwlfw \ dependency "$S/contrib/dev/mwl/mw88W8363.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "mw88W8363.fw" mwlboot.fwo optional mwlfw \ dependency "mwlboot.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "mwlboot.fwo" mwlboot.fw optional mwlfw \ dependency "$S/contrib/dev/mwl/mwlboot.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "mwlboot.fw" dev/mxge/if_mxge.c optional mxge pci dev/mxge/mxge_eth_z8e.c optional mxge pci dev/mxge/mxge_ethp_z8e.c optional mxge pci dev/mxge/mxge_rss_eth_z8e.c optional mxge pci dev/mxge/mxge_rss_ethp_z8e.c optional mxge pci dev/my/if_my.c optional my dev/netmap/if_ptnet.c optional netmap inet dev/netmap/netmap.c optional netmap dev/netmap/netmap_bdg.c optional netmap dev/netmap/netmap_freebsd.c optional netmap dev/netmap/netmap_generic.c optional netmap dev/netmap/netmap_kloop.c optional netmap dev/netmap/netmap_legacy.c optional netmap dev/netmap/netmap_mbq.c optional netmap dev/netmap/netmap_mem2.c optional netmap dev/netmap/netmap_monitor.c optional netmap dev/netmap/netmap_null.c optional netmap dev/netmap/netmap_offloadings.c optional netmap dev/netmap/netmap_pipe.c optional netmap dev/netmap/netmap_vale.c optional netmap # compile-with "${NORMAL_C} -Wconversion -Wextra" dev/nfsmb/nfsmb.c optional nfsmb pci dev/nge/if_nge.c optional nge dev/nmdm/nmdm.c optional nmdm dev/null/null.c standard dev/nvd/nvd.c optional nvd nvme dev/nvme/nvme.c optional nvme dev/nvme/nvme_ahci.c optional nvme ahci dev/nvme/nvme_ctrlr.c optional nvme dev/nvme/nvme_ctrlr_cmd.c optional nvme dev/nvme/nvme_ns.c optional nvme dev/nvme/nvme_ns_cmd.c optional nvme dev/nvme/nvme_pci.c optional nvme pci dev/nvme/nvme_qpair.c optional nvme dev/nvme/nvme_sim.c optional nvme scbus dev/nvme/nvme_sysctl.c optional nvme dev/nvme/nvme_test.c optional nvme dev/nvme/nvme_util.c optional nvme dev/oce/oce_hw.c optional oce pci dev/oce/oce_if.c optional oce pci dev/oce/oce_mbox.c optional oce pci dev/oce/oce_queue.c optional oce pci dev/oce/oce_sysctl.c optional oce pci dev/oce/oce_util.c optional oce pci dev/ocs_fc/ocs_gendump.c optional ocs_fc pci dev/ocs_fc/ocs_pci.c optional ocs_fc pci dev/ocs_fc/ocs_ioctl.c optional ocs_fc pci dev/ocs_fc/ocs_os.c optional ocs_fc pci dev/ocs_fc/ocs_utils.c optional ocs_fc pci dev/ocs_fc/ocs_hw.c optional ocs_fc pci dev/ocs_fc/ocs_hw_queues.c optional ocs_fc pci dev/ocs_fc/sli4.c optional ocs_fc pci dev/ocs_fc/ocs_sm.c optional ocs_fc pci dev/ocs_fc/ocs_device.c optional ocs_fc pci dev/ocs_fc/ocs_xport.c optional ocs_fc pci dev/ocs_fc/ocs_domain.c optional ocs_fc pci dev/ocs_fc/ocs_sport.c optional ocs_fc pci dev/ocs_fc/ocs_els.c optional ocs_fc pci dev/ocs_fc/ocs_fabric.c optional ocs_fc pci dev/ocs_fc/ocs_io.c optional ocs_fc pci dev/ocs_fc/ocs_node.c optional ocs_fc pci dev/ocs_fc/ocs_scsi.c optional ocs_fc pci dev/ocs_fc/ocs_unsol.c optional ocs_fc pci dev/ocs_fc/ocs_ddump.c optional ocs_fc pci dev/ocs_fc/ocs_mgmt.c optional ocs_fc pci dev/ocs_fc/ocs_cam.c optional ocs_fc pci dev/ofw/ofw_bus_if.m optional fdt dev/ofw/ofw_bus_subr.c optional fdt dev/ofw/ofw_cpu.c optional fdt dev/ofw/ofw_fdt.c optional fdt dev/ofw/ofw_firmware.c optional fdt dev/ofw/ofw_if.m optional fdt dev/ofw/ofw_graph.c optional fdt dev/ofw/ofw_subr.c optional fdt dev/ofw/ofwbus.c optional fdt dev/ofw/openfirm.c optional fdt dev/ofw/openfirmio.c optional fdt dev/ow/ow.c optional ow \ dependency "owll_if.h" \ dependency "own_if.h" dev/ow/owll_if.m optional ow dev/ow/own_if.m optional ow dev/ow/ow_temp.c optional ow_temp dev/ow/owc_gpiobus.c optional owc gpio dev/pbio/pbio.c optional pbio isa dev/pccbb/pccbb.c optional cbb dev/pccbb/pccbb_pci.c optional cbb pci dev/pcf/pcf.c optional pcf dev/pci/fixup_pci.c optional pci dev/pci/hostb_pci.c optional pci dev/pci/ignore_pci.c optional pci dev/pci/isa_pci.c optional pci isa dev/pci/pci.c optional pci dev/pci/pci_if.m standard dev/pci/pci_iov.c optional pci pci_iov dev/pci/pci_iov_if.m standard dev/pci/pci_iov_schema.c optional pci pci_iov dev/pci/pci_pci.c optional pci dev/pci/pci_subr.c optional pci dev/pci/pci_user.c optional pci dev/pci/pcib_if.m standard dev/pci/pcib_support.c standard dev/pci/vga_pci.c optional pci dev/pms/freebsd/driver/ini/src/agtiapi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sadisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/mpi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saframe.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sahw.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sainit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saint.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sampicmd.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sampirsp.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saphy.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saport.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sasata.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sasmp.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sassp.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/satimer.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sautil.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saioctlcmd.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/mpidebug.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dminit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmsmp.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmdisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmport.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmtimer.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmmisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/sminit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smmisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smsat.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smsatcb.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smsathw.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smtimer.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdinit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdmisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdesgl.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdport.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdint.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdioctl.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdhw.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/ossacmnapi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tddmcmnapi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdsmcmnapi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdtimers.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sas/ini/itdio.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sas/ini/itdcb.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sas/ini/itdinit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sas/ini/itddisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sata/host/sat.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sata/host/ossasat.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sata/host/sathw.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/ppbus/if_plip.c optional plip dev/ppbus/lpbb.c optional lpbb dev/ppbus/lpt.c optional lpt dev/ppbus/pcfclock.c optional pcfclock dev/ppbus/ppb_1284.c optional ppbus dev/ppbus/ppb_base.c optional ppbus dev/ppbus/ppb_msq.c optional ppbus dev/ppbus/ppbconf.c optional ppbus dev/ppbus/ppbus_if.m optional ppbus dev/ppbus/ppi.c optional ppi dev/ppbus/pps.c optional pps dev/ppc/ppc.c optional ppc dev/ppc/ppc_acpi.c optional ppc acpi dev/ppc/ppc_isa.c optional ppc isa dev/ppc/ppc_pci.c optional ppc pci dev/ppc/ppc_puc.c optional ppc puc dev/proto/proto_bus_isa.c optional proto acpi | proto isa dev/proto/proto_bus_pci.c optional proto pci dev/proto/proto_busdma.c optional proto dev/proto/proto_core.c optional proto dev/pst/pst-iop.c optional pst dev/pst/pst-pci.c optional pst pci dev/pst/pst-raid.c optional pst dev/pty/pty.c optional pty dev/puc/puc.c optional puc dev/puc/puc_cfg.c optional puc dev/puc/puc_pci.c optional puc pci dev/pwm/pwmc.c optional pwm | pwmc dev/pwm/pwmbus.c optional pwm | pwmbus dev/pwm/pwmbus_if.m optional pwm | pwmbus dev/pwm/ofw_pwm.c optional pwm fdt | pwmbus fdt dev/pwm/ofw_pwmbus.c optional pwm fdt | pwmbus fdt dev/pwm/pwm_backlight.c optional pwm pwm_backlight fdt backlight dev/quicc/quicc_core.c optional quicc dev/ral/rt2560.c optional ral dev/ral/rt2661.c optional ral dev/ral/rt2860.c optional ral dev/ral/if_ral_pci.c optional ral pci rt2561fw.c optional rt2561fw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2561.fw:rt2561fw -mrt2561 -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "rt2561fw.c" rt2561fw.fwo optional rt2561fw | ralfw \ dependency "rt2561.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rt2561fw.fwo" rt2561.fw optional rt2561fw | ralfw \ dependency "$S/contrib/dev/ral/rt2561.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rt2561.fw" rt2561sfw.c optional rt2561sfw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2561s.fw:rt2561sfw -mrt2561s -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "rt2561sfw.c" rt2561sfw.fwo optional rt2561sfw | ralfw \ dependency "rt2561s.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rt2561sfw.fwo" rt2561s.fw optional rt2561sfw | ralfw \ dependency "$S/contrib/dev/ral/rt2561s.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rt2561s.fw" rt2661fw.c optional rt2661fw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2661.fw:rt2661fw -mrt2661 -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "rt2661fw.c" rt2661fw.fwo optional rt2661fw | ralfw \ dependency "rt2661.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rt2661fw.fwo" rt2661.fw optional rt2661fw | ralfw \ dependency "$S/contrib/dev/ral/rt2661.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rt2661.fw" rt2860fw.c optional rt2860fw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2860.fw:rt2860fw -mrt2860 -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "rt2860fw.c" rt2860fw.fwo optional rt2860fw | ralfw \ dependency "rt2860.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rt2860fw.fwo" rt2860.fw optional rt2860fw | ralfw \ dependency "$S/contrib/dev/ral/rt2860.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rt2860.fw" dev/random/random_infra.c standard dev/random/random_harvestq.c standard dev/random/randomdev.c optional !random_loadable dev/random/fenestrasX/fx_brng.c optional !random_loadable random_fenestrasx dev/random/fenestrasX/fx_main.c optional !random_loadable random_fenestrasx \ compile-with "${NORMAL_C} -I$S/crypto/blake2" dev/random/fenestrasX/fx_pool.c optional !random_loadable random_fenestrasx \ compile-with "${NORMAL_C} -I$S/crypto/blake2" dev/random/fenestrasX/fx_rng.c optional !random_loadable random_fenestrasx \ compile-with "${NORMAL_C} -I$S/crypto/blake2" dev/random/fortuna.c optional !random_loadable !random_fenestrasx dev/random/hash.c optional !random_loadable dev/rccgpio/rccgpio.c optional rccgpio gpio dev/re/if_re.c optional re dev/rl/if_rl.c optional rl pci dev/rndtest/rndtest.c optional rndtest # dev/rtsx/rtsx.c optional rtsx pci # dev/rtwn/if_rtwn.c optional rtwn dev/rtwn/if_rtwn_beacon.c optional rtwn dev/rtwn/if_rtwn_calib.c optional rtwn dev/rtwn/if_rtwn_cam.c optional rtwn dev/rtwn/if_rtwn_efuse.c optional rtwn dev/rtwn/if_rtwn_fw.c optional rtwn dev/rtwn/if_rtwn_rx.c optional rtwn dev/rtwn/if_rtwn_task.c optional rtwn dev/rtwn/if_rtwn_tx.c optional rtwn # dev/rtwn/pci/rtwn_pci_attach.c optional rtwn_pci pci dev/rtwn/pci/rtwn_pci_reg.c optional rtwn_pci pci dev/rtwn/pci/rtwn_pci_rx.c optional rtwn_pci pci dev/rtwn/pci/rtwn_pci_tx.c optional rtwn_pci pci # dev/rtwn/usb/rtwn_usb_attach.c optional rtwn_usb dev/rtwn/usb/rtwn_usb_ep.c optional rtwn_usb dev/rtwn/usb/rtwn_usb_reg.c optional rtwn_usb dev/rtwn/usb/rtwn_usb_rx.c optional rtwn_usb dev/rtwn/usb/rtwn_usb_tx.c optional rtwn_usb # RTL8188E dev/rtwn/rtl8188e/r88e_beacon.c optional rtwn dev/rtwn/rtl8188e/r88e_calib.c optional rtwn dev/rtwn/rtl8188e/r88e_chan.c optional rtwn dev/rtwn/rtl8188e/r88e_fw.c optional rtwn dev/rtwn/rtl8188e/r88e_init.c optional rtwn dev/rtwn/rtl8188e/r88e_led.c optional rtwn dev/rtwn/rtl8188e/r88e_tx.c optional rtwn dev/rtwn/rtl8188e/r88e_rf.c optional rtwn dev/rtwn/rtl8188e/r88e_rom.c optional rtwn dev/rtwn/rtl8188e/r88e_rx.c optional rtwn dev/rtwn/rtl8188e/pci/r88ee_attach.c optional rtwn_pci pci dev/rtwn/rtl8188e/pci/r88ee_init.c optional rtwn_pci pci dev/rtwn/rtl8188e/pci/r88ee_rx.c optional rtwn_pci pci dev/rtwn/rtl8188e/usb/r88eu_attach.c optional rtwn_usb dev/rtwn/rtl8188e/usb/r88eu_init.c optional rtwn_usb # RTL8192C dev/rtwn/rtl8192c/r92c_attach.c optional rtwn dev/rtwn/rtl8192c/r92c_beacon.c optional rtwn dev/rtwn/rtl8192c/r92c_calib.c optional rtwn dev/rtwn/rtl8192c/r92c_chan.c optional rtwn dev/rtwn/rtl8192c/r92c_fw.c optional rtwn dev/rtwn/rtl8192c/r92c_init.c optional rtwn dev/rtwn/rtl8192c/r92c_llt.c optional rtwn dev/rtwn/rtl8192c/r92c_rf.c optional rtwn dev/rtwn/rtl8192c/r92c_rom.c optional rtwn dev/rtwn/rtl8192c/r92c_rx.c optional rtwn dev/rtwn/rtl8192c/r92c_tx.c optional rtwn dev/rtwn/rtl8192c/pci/r92ce_attach.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_calib.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_fw.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_init.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_led.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_rx.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_tx.c optional rtwn_pci pci dev/rtwn/rtl8192c/usb/r92cu_attach.c optional rtwn_usb dev/rtwn/rtl8192c/usb/r92cu_init.c optional rtwn_usb dev/rtwn/rtl8192c/usb/r92cu_led.c optional rtwn_usb dev/rtwn/rtl8192c/usb/r92cu_rx.c optional rtwn_usb dev/rtwn/rtl8192c/usb/r92cu_tx.c optional rtwn_usb # RTL8192E dev/rtwn/rtl8192e/r92e_chan.c optional rtwn dev/rtwn/rtl8192e/r92e_fw.c optional rtwn dev/rtwn/rtl8192e/r92e_init.c optional rtwn dev/rtwn/rtl8192e/r92e_led.c optional rtwn dev/rtwn/rtl8192e/r92e_rf.c optional rtwn dev/rtwn/rtl8192e/r92e_rom.c optional rtwn dev/rtwn/rtl8192e/r92e_rx.c optional rtwn dev/rtwn/rtl8192e/usb/r92eu_attach.c optional rtwn_usb dev/rtwn/rtl8192e/usb/r92eu_init.c optional rtwn_usb # RTL8812A dev/rtwn/rtl8812a/r12a_beacon.c optional rtwn dev/rtwn/rtl8812a/r12a_calib.c optional rtwn dev/rtwn/rtl8812a/r12a_caps.c optional rtwn dev/rtwn/rtl8812a/r12a_chan.c optional rtwn dev/rtwn/rtl8812a/r12a_fw.c optional rtwn dev/rtwn/rtl8812a/r12a_init.c optional rtwn dev/rtwn/rtl8812a/r12a_led.c optional rtwn dev/rtwn/rtl8812a/r12a_rf.c optional rtwn dev/rtwn/rtl8812a/r12a_rom.c optional rtwn dev/rtwn/rtl8812a/r12a_rx.c optional rtwn dev/rtwn/rtl8812a/r12a_tx.c optional rtwn dev/rtwn/rtl8812a/usb/r12au_attach.c optional rtwn_usb dev/rtwn/rtl8812a/usb/r12au_init.c optional rtwn_usb dev/rtwn/rtl8812a/usb/r12au_rx.c optional rtwn_usb dev/rtwn/rtl8812a/usb/r12au_tx.c optional rtwn_usb # RTL8821A dev/rtwn/rtl8821a/r21a_beacon.c optional rtwn dev/rtwn/rtl8821a/r21a_calib.c optional rtwn dev/rtwn/rtl8821a/r21a_chan.c optional rtwn dev/rtwn/rtl8821a/r21a_fw.c optional rtwn dev/rtwn/rtl8821a/r21a_init.c optional rtwn dev/rtwn/rtl8821a/r21a_led.c optional rtwn dev/rtwn/rtl8821a/r21a_rom.c optional rtwn dev/rtwn/rtl8821a/r21a_rx.c optional rtwn dev/rtwn/rtl8821a/usb/r21au_attach.c optional rtwn_usb dev/rtwn/rtl8821a/usb/r21au_dfs.c optional rtwn_usb dev/rtwn/rtl8821a/usb/r21au_init.c optional rtwn_usb rtwn-rtl8188eefw.c optional rtwn-rtl8188eefw | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8188eefw.fw:rtwn-rtl8188eefw:111 -mrtwn-rtl8188eefw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "rtwn-rtl8188eefw.c" rtwn-rtl8188eefw.fwo optional rtwn-rtl8188eefw | rtwnfw \ dependency "rtwn-rtl8188eefw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8188eefw.fwo" rtwn-rtl8188eefw.fw optional rtwn-rtl8188eefw | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8188eefw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8188eefw.fw" rtwn-rtl8188eufw.c optional rtwn-rtl8188eufw | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8188eufw.fw:rtwn-rtl8188eufw:111 -mrtwn-rtl8188eufw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "rtwn-rtl8188eufw.c" rtwn-rtl8188eufw.fwo optional rtwn-rtl8188eufw | rtwnfw \ dependency "rtwn-rtl8188eufw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8188eufw.fwo" rtwn-rtl8188eufw.fw optional rtwn-rtl8188eufw | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8188eufw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8188eufw.fw" rtwn-rtl8192cfwE.c optional rtwn-rtl8192cfwE | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwE.fw:rtwn-rtl8192cfwE:111 -mrtwn-rtl8192cfwE -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "rtwn-rtl8192cfwE.c" rtwn-rtl8192cfwE.fwo optional rtwn-rtl8192cfwE | rtwnfw \ dependency "rtwn-rtl8192cfwE.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192cfwE.fwo" rtwn-rtl8192cfwE.fw optional rtwn-rtl8192cfwE | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwE.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192cfwE.fw" rtwn-rtl8192cfwE_B.c optional rtwn-rtl8192cfwE_B | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwE_B.fw:rtwn-rtl8192cfwE_B:111 -mrtwn-rtl8192cfwE_B -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "rtwn-rtl8192cfwE_B.c" rtwn-rtl8192cfwE_B.fwo optional rtwn-rtl8192cfwE_B | rtwnfw \ dependency "rtwn-rtl8192cfwE_B.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192cfwE_B.fwo" rtwn-rtl8192cfwE_B.fw optional rtwn-rtl8192cfwE_B | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwE_B.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192cfwE_B.fw" rtwn-rtl8192cfwT.c optional rtwn-rtl8192cfwT | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwT.fw:rtwn-rtl8192cfwT:111 -mrtwn-rtl8192cfwT -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "rtwn-rtl8192cfwT.c" rtwn-rtl8192cfwT.fwo optional rtwn-rtl8192cfwT | rtwnfw \ dependency "rtwn-rtl8192cfwT.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192cfwT.fwo" rtwn-rtl8192cfwT.fw optional rtwn-rtl8192cfwT | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwT.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192cfwT.fw" rtwn-rtl8192cfwU.c optional rtwn-rtl8192cfwU | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwU.fw:rtwn-rtl8192cfwU:111 -mrtwn-rtl8192cfwU -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "rtwn-rtl8192cfwU.c" rtwn-rtl8192cfwU.fwo optional rtwn-rtl8192cfwU | rtwnfw \ dependency "rtwn-rtl8192cfwU.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192cfwU.fwo" rtwn-rtl8192cfwU.fw optional rtwn-rtl8192cfwU | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwU.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192cfwU.fw" rtwn-rtl8192eufw.c optional rtwn-rtl8192eufw | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192eufw.fw:rtwn-rtl8192eufw:111 -mrtwn-rtl8192eufw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "rtwn-rtl8192eufw.c" rtwn-rtl8192eufw.fwo optional rtwn-rtl8192eufw | rtwnfw \ dependency "rtwn-rtl8192eufw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192eufw.fwo" rtwn-rtl8192eufw.fw optional rtwn-rtl8192eufw | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192eufw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192eufw.fw" rtwn-rtl8812aufw.c optional rtwn-rtl8812aufw | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8812aufw.fw:rtwn-rtl8812aufw:111 -mrtwn-rtl8812aufw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "rtwn-rtl8812aufw.c" rtwn-rtl8812aufw.fwo optional rtwn-rtl8812aufw | rtwnfw \ dependency "rtwn-rtl8812aufw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8812aufw.fwo" rtwn-rtl8812aufw.fw optional rtwn-rtl8812aufw | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8812aufw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8812aufw.fw" rtwn-rtl8821aufw.c optional rtwn-rtl8821aufw | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8821aufw.fw:rtwn-rtl8821aufw:111 -mrtwn-rtl8821aufw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "rtwn-rtl8821aufw.c" rtwn-rtl8821aufw.fwo optional rtwn-rtl8821aufw | rtwnfw \ dependency "rtwn-rtl8821aufw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8821aufw.fwo" rtwn-rtl8821aufw.fw optional rtwn-rtl8821aufw | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8821aufw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8821aufw.fw" dev/safe/safe.c optional safe dev/scc/scc_if.m optional scc dev/scc/scc_bfe_quicc.c optional scc quicc dev/scc/scc_core.c optional scc dev/scc/scc_dev_quicc.c optional scc quicc dev/scc/scc_dev_z8530.c optional scc dev/sdhci/sdhci.c optional sdhci dev/sdhci/sdhci_fdt.c optional sdhci fdt regulator clk dev/sdhci/sdhci_fdt_gpio.c optional sdhci fdt gpio dev/sdhci/sdhci_fsl_fdt.c optional sdhci fdt gpio regulator clk dev/sdhci/sdhci_if.m optional sdhci dev/sdhci/sdhci_acpi.c optional sdhci acpi dev/sdhci/sdhci_pci.c optional sdhci pci dev/sdio/sdio_if.m optional mmccam dev/sdio/sdio_subr.c optional mmccam dev/sdio/sdiob.c optional mmccam dev/sff/sff_if.m optional sff dev/sff/sfp_fdt.c optional sff fdt dev/sge/if_sge.c optional sge pci dev/siis/siis.c optional siis pci dev/sis/if_sis.c optional sis pci dev/sk/if_sk.c optional sk pci dev/smbios/smbios.c optional smbios dev/smbus/smb.c optional smb dev/smbus/smbconf.c optional smbus dev/smbus/smbus.c optional smbus dev/smbus/smbus_if.m optional smbus dev/smc/if_smc.c optional smc dev/smc/if_smc_acpi.c optional smc acpi dev/smc/if_smc_fdt.c optional smc fdt dev/snp/snp.c optional snp dev/sound/clone.c optional sound dev/sound/unit.c optional sound dev/sound/pci/als4000.c optional snd_als4000 pci dev/sound/pci/atiixp.c optional snd_atiixp pci dev/sound/pci/cmi.c optional snd_cmi pci dev/sound/pci/cs4281.c optional snd_cs4281 pci dev/sound/pci/csa.c optional snd_csa pci dev/sound/pci/csapcm.c optional snd_csa pci dev/sound/pci/emu10k1.c optional snd_emu10k1 pci dev/sound/pci/emu10kx.c optional snd_emu10kx pci dev/sound/pci/emu10kx-pcm.c optional snd_emu10kx pci dev/sound/pci/emu10kx-midi.c optional snd_emu10kx pci dev/sound/pci/envy24.c optional snd_envy24 pci dev/sound/pci/envy24ht.c optional snd_envy24ht pci dev/sound/pci/es137x.c optional snd_es137x pci dev/sound/pci/fm801.c optional snd_fm801 pci dev/sound/pci/ich.c optional snd_ich pci dev/sound/pci/maestro3.c optional snd_maestro3 pci dev/sound/pci/neomagic.c optional snd_neomagic pci dev/sound/pci/solo.c optional snd_solo pci dev/sound/pci/spicds.c optional snd_spicds pci dev/sound/pci/t4dwave.c optional snd_t4dwave pci dev/sound/pci/via8233.c optional snd_via8233 pci dev/sound/pci/via82c686.c optional snd_via82c686 pci dev/sound/pci/vibes.c optional snd_vibes pci dev/sound/pci/hda/hdaa.c optional snd_hda pci dev/sound/pci/hda/hdaa_patches.c optional snd_hda pci dev/sound/pci/hda/hdac.c optional snd_hda pci dev/sound/pci/hda/hdac_if.m optional snd_hda pci dev/sound/pci/hda/hdacc.c optional snd_hda pci dev/sound/pci/hdspe.c optional snd_hdspe pci dev/sound/pci/hdspe-pcm.c optional snd_hdspe pci dev/sound/pcm/ac97.c optional sound dev/sound/pcm/ac97_if.m optional sound dev/sound/pcm/ac97_patch.c optional sound dev/sound/pcm/buffer.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/channel.c optional sound dev/sound/pcm/channel_if.m optional sound dev/sound/pcm/dsp.c optional sound dev/sound/pcm/feeder.c optional sound dev/sound/pcm/feeder_chain.c optional sound dev/sound/pcm/feeder_eq.c optional sound \ dependency "feeder_eq_gen.h" \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_if.m optional sound dev/sound/pcm/feeder_format.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_matrix.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_mixer.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_rate.c optional sound \ dependency "feeder_rate_gen.h" \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_volume.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/mixer.c optional sound dev/sound/pcm/mixer_if.m optional sound dev/sound/pcm/sndstat.c optional sound dev/sound/pcm/sound.c optional sound dev/sound/pcm/vchan.c optional sound dev/sound/usb/uaudio.c optional snd_uaudio usb dev/sound/usb/uaudio_pcm.c optional snd_uaudio usb dev/sound/midi/midi.c optional sound dev/sound/midi/mpu401.c optional sound dev/sound/midi/mpu_if.m optional sound dev/sound/midi/mpufoi_if.m optional sound dev/sound/midi/sequencer.c optional sound dev/sound/midi/synth_if.m optional sound dev/spibus/acpi_spibus.c optional acpi spibus dev/spibus/ofw_spibus.c optional fdt spibus dev/spibus/spibus.c optional spibus \ dependency "spibus_if.h" dev/spibus/spigen.c optional spigen dev/spibus/spibus_if.m optional spibus dev/ste/if_ste.c optional ste pci dev/stge/if_stge.c optional stge dev/sym/sym_hipd.c optional sym \ dependency "$S/dev/sym/sym_{conf,defs}.h" dev/syscons/blank/blank_saver.c optional blank_saver dev/syscons/daemon/daemon_saver.c optional daemon_saver dev/syscons/dragon/dragon_saver.c optional dragon_saver dev/syscons/fade/fade_saver.c optional fade_saver dev/syscons/fire/fire_saver.c optional fire_saver dev/syscons/green/green_saver.c optional green_saver dev/syscons/logo/logo.c optional logo_saver dev/syscons/logo/logo_saver.c optional logo_saver dev/syscons/rain/rain_saver.c optional rain_saver dev/syscons/schistory.c optional sc dev/syscons/scmouse.c optional sc dev/syscons/scterm.c optional sc dev/syscons/scterm-dumb.c optional sc !SC_NO_TERM_DUMB dev/syscons/scterm-sc.c optional sc !SC_NO_TERM_SC dev/syscons/scterm-teken.c optional sc !SC_NO_TERM_TEKEN dev/syscons/scvidctl.c optional sc dev/syscons/scvtb.c optional sc dev/syscons/snake/snake_saver.c optional snake_saver dev/syscons/star/star_saver.c optional star_saver dev/syscons/syscons.c optional sc dev/syscons/sysmouse.c optional sc dev/syscons/warp/warp_saver.c optional warp_saver dev/tcp_log/tcp_log_dev.c optional tcp_blackbox inet | tcp_blackbox inet6 dev/tdfx/tdfx_pci.c optional tdfx pci dev/ti/if_ti.c optional ti pci dev/tws/tws.c optional tws dev/tws/tws_cam.c optional tws dev/tws/tws_hdm.c optional tws dev/tws/tws_services.c optional tws dev/tws/tws_user.c optional tws dev/uart/uart_bus_acpi.c optional uart acpi dev/uart/uart_bus_fdt.c optional uart fdt dev/uart/uart_bus_isa.c optional uart isa dev/uart/uart_bus_pci.c optional uart pci dev/uart/uart_bus_puc.c optional uart puc dev/uart/uart_bus_scc.c optional uart scc dev/uart/uart_core.c optional uart dev/uart/uart_cpu_acpi.c optional uart acpi dev/uart/uart_dbg.c optional uart gdb dev/uart/uart_dev_imx.c optional uart uart_imx fdt dev/uart/uart_dev_msm.c optional uart uart_msm fdt dev/uart/uart_dev_mvebu.c optional uart uart_mvebu fdt dev/uart/uart_dev_ns8250.c optional uart uart_ns8250 | uart uart_snps dev/uart/uart_dev_pl011.c optional uart pl011 dev/uart/uart_dev_quicc.c optional uart quicc dev/uart/uart_dev_snps.c optional uart uart_snps fdt dev/uart/uart_dev_z8530.c optional uart uart_z8530 | uart scc dev/uart/uart_if.m optional uart dev/uart/uart_subr.c optional uart dev/uart/uart_tty.c optional uart # # USB controller drivers # dev/usb/controller/musb_otg.c optional musb dev/usb/controller/dwc_otg.c optional dwcotg dev/usb/controller/dwc_otg_fdt.c optional dwcotg fdt dev/usb/controller/dwc_otg_acpi.c optional dwcotg acpi dev/usb/controller/ehci.c optional ehci dev/usb/controller/ehci_msm.c optional ehci_msm fdt dev/usb/controller/ehci_pci.c optional ehci pci dev/usb/controller/ohci.c optional ohci dev/usb/controller/ohci_pci.c optional ohci pci dev/usb/controller/uhci.c optional uhci dev/usb/controller/uhci_pci.c optional uhci pci dev/usb/controller/xhci.c optional xhci dev/usb/controller/xhci_pci.c optional xhci pci dev/usb/controller/saf1761_otg.c optional saf1761otg dev/usb/controller/saf1761_otg_fdt.c optional saf1761otg fdt dev/usb/controller/uss820dci.c optional uss820dci dev/usb/controller/usb_controller.c optional usb # # USB storage drivers # dev/usb/storage/cfumass.c optional cfumass ctl dev/usb/storage/umass.c optional umass dev/usb/storage/urio.c optional urio dev/usb/storage/ustorage_fs.c optional usfs # # USB core # dev/usb/usb_busdma.c optional usb dev/usb/usb_core.c optional usb dev/usb/usb_debug.c optional usb dev/usb/usb_dev.c optional usb dev/usb/usb_device.c optional usb dev/usb/usb_dynamic.c optional usb dev/usb/usb_error.c optional usb dev/usb/usb_fdt_support.c optional usb fdt dev/usb/usb_generic.c optional usb dev/usb/usb_handle_request.c optional usb dev/usb/usb_hid.c optional usb dev/usb/usb_hub.c optional usb dev/usb/usb_hub_acpi.c optional uacpi acpi dev/usb/usb_if.m optional usb dev/usb/usb_lookup.c optional usb dev/usb/usb_mbuf.c optional usb dev/usb/usb_msctest.c optional usb dev/usb/usb_parse.c optional usb dev/usb/usb_pf.c optional usb dev/usb/usb_process.c optional usb dev/usb/usb_request.c optional usb dev/usb/usb_transfer.c optional usb dev/usb/usb_util.c optional usb # # USB network drivers # dev/usb/net/if_aue.c optional aue dev/usb/net/if_axe.c optional axe dev/usb/net/if_axge.c optional axge dev/usb/net/if_cdce.c optional cdce dev/usb/net/if_cdceem.c optional cdceem dev/usb/net/if_cue.c optional cue dev/usb/net/if_ipheth.c optional ipheth dev/usb/net/if_kue.c optional kue dev/usb/net/if_mos.c optional mos dev/usb/net/if_muge.c optional muge dev/usb/net/if_rue.c optional rue dev/usb/net/if_smsc.c optional smsc dev/usb/net/if_udav.c optional udav dev/usb/net/if_ure.c optional ure dev/usb/net/if_usie.c optional usie dev/usb/net/if_urndis.c optional urndis dev/usb/net/ruephy.c optional rue dev/usb/net/usb_ethernet.c optional uether | aue | axe | axge | cdce | \ cdceem | cue | ipheth | kue | mos | \ rue | smsc | udav | ure | urndis | muge dev/usb/net/uhso.c optional uhso # # USB WLAN drivers # dev/usb/wlan/if_rsu.c optional rsu rsu-rtl8712fw.c optional rsu-rtl8712fw | rsufw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rsu-rtl8712fw.fw:rsu-rtl8712fw:120 -mrsu-rtl8712fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "rsu-rtl8712fw.c" rsu-rtl8712fw.fwo optional rsu-rtl8712fw | rsufw \ dependency "rsu-rtl8712fw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rsu-rtl8712fw.fwo" rsu-rtl8712fw.fw optional rsu-rtl8712.fw | rsufw \ dependency "$S/contrib/dev/rsu/rsu-rtl8712fw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rsu-rtl8712fw.fw" dev/usb/wlan/if_rum.c optional rum dev/usb/wlan/if_run.c optional run runfw.c optional runfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk run.fw:runfw -mrunfw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "runfw.c" runfw.fwo optional runfw \ dependency "run.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "runfw.fwo" run.fw optional runfw \ dependency "$S/contrib/dev/run/rt2870.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "run.fw" dev/usb/wlan/if_uath.c optional uath dev/usb/wlan/if_upgt.c optional upgt dev/usb/wlan/if_ural.c optional ural dev/usb/wlan/if_urtw.c optional urtw dev/usb/wlan/if_zyd.c optional zyd # # USB serial and parallel port drivers # dev/usb/serial/u3g.c optional u3g dev/usb/serial/uark.c optional uark dev/usb/serial/ubsa.c optional ubsa dev/usb/serial/ubser.c optional ubser dev/usb/serial/uchcom.c optional uchcom dev/usb/serial/ucycom.c optional ucycom dev/usb/serial/ufoma.c optional ufoma dev/usb/serial/uftdi.c optional uftdi dev/usb/serial/ugensa.c optional ugensa dev/usb/serial/uipaq.c optional uipaq dev/usb/serial/ulpt.c optional ulpt dev/usb/serial/umcs.c optional umcs dev/usb/serial/umct.c optional umct dev/usb/serial/umodem.c optional umodem dev/usb/serial/umoscom.c optional umoscom dev/usb/serial/uplcom.c optional uplcom dev/usb/serial/uslcom.c optional uslcom dev/usb/serial/uvisor.c optional uvisor dev/usb/serial/uvscom.c optional uvscom dev/usb/serial/usb_serial.c optional ucom | u3g | uark | ubsa | ubser | \ uchcom | ucycom | ufoma | uftdi | \ ugensa | uipaq | umcs | umct | \ umodem | umoscom | uplcom | usie | \ uslcom | uvisor | uvscom # # USB misc drivers # dev/usb/misc/cp2112.c optional cp2112 dev/usb/misc/udbp.c optional udbp dev/usb/misc/ugold.c optional ugold dev/usb/misc/uled.c optional uled # # USB input drivers # dev/usb/input/atp.c optional atp dev/usb/input/uep.c optional uep dev/usb/input/uhid.c optional uhid dev/usb/input/uhid_snes.c optional uhid_snes dev/usb/input/ukbd.c optional ukbd dev/usb/input/ums.c optional ums dev/usb/input/usbhid.c optional usbhid dev/usb/input/wmt.c optional wmt dev/usb/input/wsp.c optional wsp # # USB quirks # dev/usb/quirk/usb_quirk.c optional usb # # USB templates # dev/usb/template/usb_template.c optional usb_template dev/usb/template/usb_template_audio.c optional usb_template dev/usb/template/usb_template_cdce.c optional usb_template dev/usb/template/usb_template_kbd.c optional usb_template dev/usb/template/usb_template_modem.c optional usb_template dev/usb/template/usb_template_mouse.c optional usb_template dev/usb/template/usb_template_msc.c optional usb_template dev/usb/template/usb_template_mtp.c optional usb_template dev/usb/template/usb_template_phone.c optional usb_template dev/usb/template/usb_template_serialnet.c optional usb_template dev/usb/template/usb_template_midi.c optional usb_template dev/usb/template/usb_template_multi.c optional usb_template dev/usb/template/usb_template_cdceem.c optional usb_template # # USB video drivers # dev/usb/video/udl.c optional udl # # USB END # dev/videomode/videomode.c optional videomode dev/videomode/edid.c optional videomode dev/videomode/pickmode.c optional videomode dev/videomode/vesagtf.c optional videomode dev/veriexec/verified_exec.c optional mac_veriexec dev/vge/if_vge.c optional vge dev/viapm/viapm.c optional viapm pci dev/virtio/virtio.c optional virtio dev/virtio/virtqueue.c optional virtio dev/virtio/virtio_bus_if.m optional virtio dev/virtio/virtio_if.m optional virtio dev/virtio/pci/virtio_pci.c optional virtio_pci dev/virtio/pci/virtio_pci_if.m optional virtio_pci dev/virtio/pci/virtio_pci_legacy.c optional virtio_pci dev/virtio/pci/virtio_pci_modern.c optional virtio_pci dev/virtio/mmio/virtio_mmio.c optional virtio_mmio dev/virtio/mmio/virtio_mmio_acpi.c optional virtio_mmio acpi dev/virtio/mmio/virtio_mmio_cmdline.c optional virtio_mmio dev/virtio/mmio/virtio_mmio_fdt.c optional virtio_mmio fdt dev/virtio/mmio/virtio_mmio_if.m optional virtio_mmio dev/virtio/network/if_vtnet.c optional vtnet dev/virtio/block/virtio_blk.c optional virtio_blk dev/virtio/balloon/virtio_balloon.c optional virtio_balloon dev/virtio/gpu/virtio_gpu.c optional virtio_gpu dev/virtio/scsi/virtio_scsi.c optional virtio_scsi dev/virtio/random/virtio_random.c optional virtio_random dev/virtio/console/virtio_console.c optional virtio_console dev/vkbd/vkbd.c optional vkbd dev/vmgenc/vmgenc_acpi.c optional acpi dev/vmware/vmxnet3/if_vmx.c optional vmx dev/vmware/vmci/vmci.c optional vmci dev/vmware/vmci/vmci_datagram.c optional vmci dev/vmware/vmci/vmci_doorbell.c optional vmci dev/vmware/vmci/vmci_driver.c optional vmci dev/vmware/vmci/vmci_event.c optional vmci dev/vmware/vmci/vmci_hashtable.c optional vmci dev/vmware/vmci/vmci_kernel_if.c optional vmci dev/vmware/vmci/vmci_qpair.c optional vmci dev/vmware/vmci/vmci_queue_pair.c optional vmci dev/vmware/vmci/vmci_resource.c optional vmci dev/vmware/pvscsi/pvscsi.c optional pvscsi dev/vr/if_vr.c optional vr pci dev/vt/colors/vt_termcolors.c optional vt dev/vt/font/vt_font_default.c optional vt dev/vt/font/vt_mouse_cursor.c optional vt dev/vt/hw/efifb/efifb.c optional vt_efifb dev/vt/hw/simplefb/simplefb.c optional vt_simplefb fdt dev/vt/hw/vbefb/vbefb.c optional vt_vbefb dev/vt/hw/fb/vt_fb.c optional vt dev/vt/hw/vga/vt_vga.c optional vt vt_vga dev/vt/logo/logo_freebsd.c optional vt splash dev/vt/logo/logo_beastie.c optional vt splash dev/vt/vt_buf.c optional vt dev/vt/vt_consolectl.c optional vt dev/vt/vt_core.c optional vt dev/vt/vt_cpulogos.c optional vt splash dev/vt/vt_font.c optional vt dev/vt/vt_sysmouse.c optional vt dev/vte/if_vte.c optional vte pci dev/watchdog/watchdog.c standard dev/wg/if_wg.c optional wg \ compile-with "${NORMAL_C} -include $S/dev/wg/compat.h" dev/wg/wg_cookie.c optional wg \ compile-with "${NORMAL_C} -include $S/dev/wg/compat.h" dev/wg/wg_crypto.c optional wg \ compile-with "${NORMAL_C} -include $S/dev/wg/compat.h" dev/wg/wg_noise.c optional wg \ compile-with "${NORMAL_C} -include $S/dev/wg/compat.h" dev/wpi/if_wpi.c optional wpi pci wpifw.c optional wpifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk wpi.fw:wpifw:153229 -mwpi -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "wpifw.c" wpifw.fwo optional wpifw \ dependency "wpi.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "wpifw.fwo" wpi.fw optional wpifw \ dependency "$S/contrib/dev/wpi/iwlwifi-3945-15.32.2.9.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "wpi.fw" dev/xdma/controller/pl330.c optional xdma pl330 fdt dev/xdma/xdma.c optional xdma dev/xdma/xdma_bank.c optional xdma dev/xdma/xdma_bio.c optional xdma dev/xdma/xdma_fdt_test.c optional xdma xdma_test fdt dev/xdma/xdma_if.m optional xdma dev/xdma/xdma_iommu.c optional xdma dev/xdma/xdma_mbuf.c optional xdma dev/xdma/xdma_queue.c optional xdma dev/xdma/xdma_sg.c optional xdma dev/xdma/xdma_sglist.c optional xdma dev/xen/balloon/balloon.c optional xenhvm dev/xen/blkfront/blkfront.c optional xenhvm dev/xen/blkback/blkback.c optional xenhvm dev/xen/bus/xen_intr.c optional xenhvm dev/xen/bus/xenpv.c optional xenhvm dev/xen/console/xen_console.c optional xenhvm dev/xen/control/control.c optional xenhvm dev/xen/cpu/xen_acpi_cpu.c optional xenhvm acpi dev/xen/efi/pvefi.c optional xenhvm xenefi efirt dev/xen/grant_table/grant_table.c optional xenhvm dev/xen/netback/netback.c optional xenhvm dev/xen/netfront/netfront.c optional xenhvm dev/xen/timer/xen_timer.c optional xenhvm xentimer dev/xen/xenpci/xenpci.c optional xenpci dev/xen/xenstore/xenstore.c optional xenhvm dev/xen/xenstore/xenstore_dev.c optional xenhvm dev/xen/xenstore/xenstored_dev.c optional xenhvm dev/xen/evtchn/evtchn_dev.c optional xenhvm dev/xen/privcmd/privcmd.c optional xenhvm dev/xen/gntdev/gntdev.c optional xenhvm dev/xen/debug/debug.c optional xenhvm dev/xl/if_xl.c optional xl pci dev/xl/xlphy.c optional xl pci fs/autofs/autofs.c optional autofs fs/autofs/autofs_vfsops.c optional autofs fs/autofs/autofs_vnops.c optional autofs fs/deadfs/dead_vnops.c standard fs/devfs/devfs_devs.c standard fs/devfs/devfs_dir.c standard fs/devfs/devfs_rule.c standard fs/devfs/devfs_vfsops.c standard fs/devfs/devfs_vnops.c standard fs/fdescfs/fdesc_vfsops.c optional fdescfs fs/fdescfs/fdesc_vnops.c optional fdescfs fs/fifofs/fifo_vnops.c standard fs/cuse/cuse.c optional cuse fs/fuse/fuse_device.c optional fusefs fs/fuse/fuse_file.c optional fusefs fs/fuse/fuse_internal.c optional fusefs fs/fuse/fuse_io.c optional fusefs fs/fuse/fuse_ipc.c optional fusefs fs/fuse/fuse_main.c optional fusefs fs/fuse/fuse_node.c optional fusefs fs/fuse/fuse_vfsops.c optional fusefs fs/fuse/fuse_vnops.c optional fusefs fs/mntfs/mntfs_vnops.c standard fs/msdosfs/msdosfs_conv.c optional msdosfs fs/msdosfs/msdosfs_denode.c optional msdosfs fs/msdosfs/msdosfs_fat.c optional msdosfs fs/msdosfs/msdosfs_iconv.c optional msdosfs_iconv fs/msdosfs/msdosfs_lookup.c optional msdosfs fs/msdosfs/msdosfs_vfsops.c optional msdosfs fs/msdosfs/msdosfs_vnops.c optional msdosfs fs/nfs/nfs_commonkrpc.c optional nfscl | nfslockd | nfsd fs/nfs/nfs_commonsubs.c optional nfscl | nfslockd | nfsd fs/nfs/nfs_commonport.c optional nfscl | nfslockd | nfsd fs/nfs/nfs_commonacl.c optional nfscl | nfslockd | nfsd fs/nfsclient/nfs_clcomsubs.c optional nfscl fs/nfsclient/nfs_clsubs.c optional nfscl fs/nfsclient/nfs_clstate.c optional nfscl fs/nfsclient/nfs_clkrpc.c optional nfscl fs/nfsclient/nfs_clrpcops.c optional nfscl fs/nfsclient/nfs_clvnops.c optional nfscl fs/nfsclient/nfs_clnode.c optional nfscl fs/nfsclient/nfs_clvfsops.c optional nfscl fs/nfsclient/nfs_clport.c optional nfscl fs/nfsclient/nfs_clbio.c optional nfscl fs/nfsclient/nfs_clnfsiod.c optional nfscl fs/nfsserver/nfs_fha_new.c optional nfsd inet fs/nfsserver/nfs_nfsdsocket.c optional nfsd inet fs/nfsserver/nfs_nfsdsubs.c optional nfsd inet fs/nfsserver/nfs_nfsdstate.c optional nfsd inet fs/nfsserver/nfs_nfsdkrpc.c optional nfsd inet fs/nfsserver/nfs_nfsdserv.c optional nfsd inet fs/nfsserver/nfs_nfsdport.c optional nfsd inet fs/nfsserver/nfs_nfsdcache.c optional nfsd inet fs/nullfs/null_subr.c optional nullfs fs/nullfs/null_vfsops.c optional nullfs fs/nullfs/null_vnops.c optional nullfs fs/procfs/procfs.c optional procfs fs/procfs/procfs_dbregs.c optional procfs fs/procfs/procfs_fpregs.c optional procfs fs/procfs/procfs_map.c optional procfs fs/procfs/procfs_mem.c optional procfs fs/procfs/procfs_note.c optional procfs fs/procfs/procfs_osrel.c optional procfs fs/procfs/procfs_regs.c optional procfs fs/procfs/procfs_rlimit.c optional procfs fs/procfs/procfs_status.c optional procfs fs/procfs/procfs_type.c optional procfs fs/pseudofs/pseudofs.c optional pseudofs fs/pseudofs/pseudofs_fileno.c optional pseudofs fs/pseudofs/pseudofs_vncache.c optional pseudofs fs/pseudofs/pseudofs_vnops.c optional pseudofs fs/smbfs/smbfs_io.c optional smbfs fs/smbfs/smbfs_node.c optional smbfs fs/smbfs/smbfs_smb.c optional smbfs fs/smbfs/smbfs_subr.c optional smbfs fs/smbfs/smbfs_vfsops.c optional smbfs fs/smbfs/smbfs_vnops.c optional smbfs fs/tarfs/tarfs_io.c optional tarfs compile-with "${NORMAL_C} -I$S/contrib/zstd/lib/freebsd" fs/tarfs/tarfs_subr.c optional tarfs fs/tarfs/tarfs_vfsops.c optional tarfs fs/tarfs/tarfs_vnops.c optional tarfs fs/udf/osta.c optional udf fs/udf/udf_iconv.c optional udf_iconv fs/udf/udf_vfsops.c optional udf fs/udf/udf_vnops.c optional udf fs/unionfs/union_subr.c optional unionfs fs/unionfs/union_vfsops.c optional unionfs fs/unionfs/union_vnops.c optional unionfs fs/tmpfs/tmpfs_vnops.c optional tmpfs fs/tmpfs/tmpfs_fifoops.c optional tmpfs fs/tmpfs/tmpfs_vfsops.c optional tmpfs fs/tmpfs/tmpfs_subr.c optional tmpfs gdb/gdb_cons.c optional gdb gdb/gdb_main.c optional gdb gdb/gdb_packet.c optional gdb gdb/netgdb.c optional ddb debugnet gdb netgdb inet geom/bde/g_bde.c optional geom_bde geom/bde/g_bde_crypt.c optional geom_bde geom/bde/g_bde_lock.c optional geom_bde geom/bde/g_bde_work.c optional geom_bde geom/cache/g_cache.c optional geom_cache geom/concat/g_concat.c optional geom_concat geom/eli/g_eli.c optional geom_eli geom/eli/g_eli_crypto.c optional geom_eli geom/eli/g_eli_ctl.c optional geom_eli geom/eli/g_eli_hmac.c optional geom_eli geom/eli/g_eli_integrity.c optional geom_eli geom/eli/g_eli_key.c optional geom_eli geom/eli/g_eli_key_cache.c optional geom_eli geom/eli/g_eli_privacy.c optional geom_eli geom/eli/pkcs5v2.c optional geom_eli geom/gate/g_gate.c optional geom_gate geom/geom_bsd_enc.c optional geom_part_bsd geom/geom_ccd.c optional ccd | geom_ccd geom/geom_ctl.c standard geom/geom_dev.c standard geom/geom_disk.c standard geom/geom_dump.c standard geom/geom_event.c standard geom/geom_flashmap.c optional fdt cfi | fdt mx25l | mmcsd | fdt n25q | fdt at45d geom/geom_io.c standard geom/geom_kern.c standard geom/geom_map.c optional geom_map geom/geom_redboot.c optional geom_redboot geom/geom_slice.c standard geom/geom_subr.c standard geom/geom_vfs.c standard geom/journal/g_journal.c optional geom_journal geom/journal/g_journal_ufs.c optional geom_journal geom/label/g_label.c optional geom_label | geom_label_gpt geom/label/g_label_ext2fs.c optional geom_label geom/label/g_label_flashmap.c optional geom_label geom/label/g_label_iso9660.c optional geom_label geom/label/g_label_msdosfs.c optional geom_label geom/label/g_label_ntfs.c optional geom_label geom/label/g_label_reiserfs.c optional geom_label geom/label/g_label_ufs.c optional geom_label geom/label/g_label_gpt.c optional geom_label | geom_label_gpt geom/label/g_label_disk_ident.c optional geom_label geom/linux_lvm/g_linux_lvm.c optional geom_linux_lvm geom/mirror/g_mirror.c optional geom_mirror geom/mirror/g_mirror_ctl.c optional geom_mirror geom/mountver/g_mountver.c optional geom_mountver geom/multipath/g_multipath.c optional geom_multipath geom/nop/g_nop.c optional geom_nop geom/part/g_part.c standard geom/part/g_part_if.m standard geom/part/g_part_apm.c optional geom_part_apm geom/part/g_part_bsd.c optional geom_part_bsd geom/part/g_part_bsd64.c optional geom_part_bsd64 geom/part/g_part_ebr.c optional geom_part_ebr geom/part/g_part_gpt.c optional geom_part_gpt geom/part/g_part_ldm.c optional geom_part_ldm geom/part/g_part_mbr.c optional geom_part_mbr geom/raid/g_raid.c optional geom_raid geom/raid/g_raid_ctl.c optional geom_raid geom/raid/g_raid_md_if.m optional geom_raid geom/raid/g_raid_tr_if.m optional geom_raid geom/raid/md_ddf.c optional geom_raid geom/raid/md_intel.c optional geom_raid geom/raid/md_jmicron.c optional geom_raid geom/raid/md_nvidia.c optional geom_raid geom/raid/md_promise.c optional geom_raid geom/raid/md_sii.c optional geom_raid geom/raid/tr_concat.c optional geom_raid geom/raid/tr_raid0.c optional geom_raid geom/raid/tr_raid1.c optional geom_raid geom/raid/tr_raid1e.c optional geom_raid geom/raid/tr_raid5.c optional geom_raid geom/raid3/g_raid3.c optional geom_raid3 geom/raid3/g_raid3_ctl.c optional geom_raid3 geom/shsec/g_shsec.c optional geom_shsec geom/stripe/g_stripe.c optional geom_stripe geom/union/g_union.c optional geom_union geom/uzip/g_uzip.c optional geom_uzip geom/uzip/g_uzip_lzma.c optional geom_uzip geom/uzip/g_uzip_wrkthr.c optional geom_uzip geom/uzip/g_uzip_zlib.c optional geom_uzip geom/uzip/g_uzip_zstd.c optional geom_uzip zstdio \ compile-with "${NORMAL_C} -I$S/contrib/zstd/lib/freebsd" geom/vinum/geom_vinum.c optional geom_vinum geom/vinum/geom_vinum_create.c optional geom_vinum geom/vinum/geom_vinum_drive.c optional geom_vinum geom/vinum/geom_vinum_plex.c optional geom_vinum geom/vinum/geom_vinum_volume.c optional geom_vinum geom/vinum/geom_vinum_subr.c optional geom_vinum geom/vinum/geom_vinum_raid5.c optional geom_vinum geom/vinum/geom_vinum_share.c optional geom_vinum geom/vinum/geom_vinum_list.c optional geom_vinum geom/vinum/geom_vinum_rm.c optional geom_vinum geom/vinum/geom_vinum_init.c optional geom_vinum geom/vinum/geom_vinum_state.c optional geom_vinum geom/vinum/geom_vinum_rename.c optional geom_vinum geom/vinum/geom_vinum_move.c optional geom_vinum geom/vinum/geom_vinum_events.c optional geom_vinum geom/virstor/binstream.c optional geom_virstor geom/virstor/g_virstor.c optional geom_virstor geom/virstor/g_virstor_md.c optional geom_virstor geom/zero/g_zero.c optional geom_zero fs/ext2fs/ext2_acl.c optional ext2fs fs/ext2fs/ext2_alloc.c optional ext2fs fs/ext2fs/ext2_balloc.c optional ext2fs fs/ext2fs/ext2_bmap.c optional ext2fs fs/ext2fs/ext2_csum.c optional ext2fs fs/ext2fs/ext2_extattr.c optional ext2fs fs/ext2fs/ext2_extents.c optional ext2fs fs/ext2fs/ext2_inode.c optional ext2fs fs/ext2fs/ext2_inode_cnv.c optional ext2fs fs/ext2fs/ext2_hash.c optional ext2fs fs/ext2fs/ext2_htree.c optional ext2fs fs/ext2fs/ext2_lookup.c optional ext2fs fs/ext2fs/ext2_subr.c optional ext2fs fs/ext2fs/ext2_vfsops.c optional ext2fs fs/ext2fs/ext2_vnops.c optional ext2fs # isa/isa_if.m standard isa/isa_common.c optional isa isa/isahint.c optional isa isa/pnp.c optional isa isapnp isa/pnpparse.c optional isa isapnp fs/cd9660/cd9660_bmap.c optional cd9660 fs/cd9660/cd9660_lookup.c optional cd9660 fs/cd9660/cd9660_node.c optional cd9660 fs/cd9660/cd9660_rrip.c optional cd9660 fs/cd9660/cd9660_util.c optional cd9660 fs/cd9660/cd9660_vfsops.c optional cd9660 fs/cd9660/cd9660_vnops.c optional cd9660 fs/cd9660/cd9660_iconv.c optional cd9660_iconv gnu/gcov/gcc_4_7.c optional gcov \ warning "kernel contains GPL licensed gcov support" gnu/gcov/gcov_fs.c optional gcov lindebugfs \ compile-with "${LINUXKPI_C}" gnu/gcov/gcov_subr.c optional gcov kern/bus_if.m standard kern/clock_if.m standard kern/cpufreq_if.m standard kern/device_if.m standard kern/imgact_binmisc.c optional imgact_binmisc kern/imgact_elf.c standard kern/imgact_elf32.c optional compat_freebsd32 kern/imgact_shell.c standard kern/init_main.c standard kern/init_sysent.c standard kern/ksched.c optional _kposix_priority_scheduling kern/kern_acct.c standard kern/kern_alq.c optional alq kern/kern_boottrace.c standard kern/kern_clock.c standard kern/kern_clocksource.c standard kern/kern_condvar.c standard kern/kern_conf.c standard kern/kern_cons.c standard kern/kern_cpu.c standard kern/kern_cpuset.c standard kern/kern_context.c standard kern/kern_descrip.c standard kern/kern_devctl.c standard kern/kern_dtrace.c optional kdtrace_hooks kern/kern_dump.c standard kern/kern_environment.c standard kern/kern_et.c standard kern/kern_event.c standard kern/kern_exec.c standard kern/kern_exit.c standard kern/kern_fail.c standard kern/kern_ffclock.c standard kern/kern_fork.c standard kern/kern_hhook.c standard kern/kern_idle.c standard kern/kern_intr.c standard kern/kern_jail.c standard kern/kern_kcov.c optional kcov \ compile-with "${NORMAL_C:N-fsanitize*} ${NORMAL_C:M-fsanitize=kernel-memory}" kern/kern_khelp.c standard kern/kern_kthread.c standard kern/kern_ktr.c optional ktr kern/kern_ktrace.c standard kern/kern_linker.c standard kern/kern_lock.c standard kern/kern_lockf.c standard kern/kern_lockstat.c optional kdtrace_hooks kern/kern_loginclass.c standard kern/kern_malloc.c standard kern/kern_mbuf.c standard kern/kern_membarrier.c standard kern/kern_mib.c standard kern/kern_module.c standard kern/kern_mtxpool.c standard kern/kern_mutex.c standard kern/kern_ntptime.c standard kern/kern_osd.c standard kern/kern_physio.c standard kern/kern_pmc.c standard kern/kern_poll.c optional device_polling kern/kern_priv.c standard kern/kern_proc.c standard kern/kern_procctl.c standard kern/kern_prot.c standard kern/kern_racct.c optional racct kern/kern_rangelock.c standard kern/kern_rctl.c standard kern/kern_resource.c standard kern/kern_rmlock.c standard kern/kern_rwlock.c standard kern/kern_sdt.c optional kdtrace_hooks kern/kern_sema.c standard kern/kern_sendfile.c standard kern/kern_sharedpage.c standard kern/kern_shutdown.c standard kern/kern_sig.c standard kern/kern_switch.c standard kern/kern_sx.c standard kern/kern_synch.c standard kern/kern_syscalls.c standard kern/kern_sysctl.c standard kern/kern_tc.c standard kern/kern_thr.c standard kern/kern_thread.c standard kern/kern_time.c standard kern/kern_timeout.c standard kern/kern_tslog.c optional tslog kern/kern_ubsan.c optional kubsan kern/kern_umtx.c standard kern/kern_uuid.c standard kern/kern_vnodedumper.c standard kern/kern_xxx.c standard kern/link_elf.c standard kern/linker_if.m standard kern/md4c.c optional netsmb kern/md5c.c standard kern/p1003_1b.c standard kern/posix4_mib.c standard kern/sched_4bsd.c optional sched_4bsd kern/sched_ule.c optional sched_ule kern/serdev_if.m standard kern/stack_protector.c standard \ compile-with "${NORMAL_C:N-fstack-protector*}" kern/subr_acl_nfs4.c optional ufs_acl | zfs kern/subr_acl_posix1e.c optional ufs_acl kern/subr_asan.c optional kasan \ compile-with "${NORMAL_C:N-fsanitize*:N-fstack-protector*}" kern/subr_autoconf.c standard kern/subr_blist.c standard kern/subr_boot.c standard kern/subr_bus.c standard kern/subr_bus_dma.c standard kern/subr_bufring.c standard kern/subr_capability.c standard kern/subr_clock.c standard kern/subr_compressor.c standard \ compile-with "${NORMAL_C} -I$S/contrib/zstd/lib/freebsd" kern/subr_coverage.c optional coverage \ compile-with "${NORMAL_C:N-fsanitize*}" kern/subr_counter.c standard kern/subr_csan.c optional kcsan \ compile-with "${NORMAL_C:N-fsanitize*:N-fstack-protector*}" kern/subr_devstat.c standard kern/subr_disk.c standard kern/subr_early.c standard kern/subr_epoch.c standard kern/subr_eventhandler.c standard kern/subr_fattime.c standard kern/subr_firmware.c optional firmware kern/subr_filter.c standard kern/subr_gtaskqueue.c standard kern/subr_hash.c standard kern/subr_hints.c standard kern/subr_kdb.c standard kern/subr_kobj.c standard kern/subr_lock.c standard kern/subr_log.c standard kern/subr_mchain.c optional libmchain kern/subr_memdesc.c standard kern/subr_module.c standard kern/subr_msan.c optional kmsan \ compile-with "${NORMAL_C:N-fsanitize*:N-fno-sanitize*:N-fstack-protector*}" kern/subr_msgbuf.c standard kern/subr_param.c standard kern/subr_pcpu.c standard kern/subr_pctrie.c standard kern/subr_pidctrl.c standard kern/subr_power.c standard kern/subr_prf.c standard kern/subr_prng.c standard kern/subr_prof.c standard kern/subr_rangeset.c standard kern/subr_rman.c standard kern/subr_rtc.c standard kern/subr_sbuf.c standard kern/subr_scanf.c standard kern/subr_sglist.c standard kern/subr_sleepqueue.c standard kern/subr_smp.c standard kern/subr_smr.c standard kern/subr_stack.c optional ddb | stack | ktr kern/subr_stats.c optional stats kern/subr_taskqueue.c standard kern/subr_terminal.c optional vt kern/subr_trap.c standard kern/subr_turnstile.c standard kern/subr_uio.c standard kern/subr_unit.c standard kern/subr_vmem.c standard kern/subr_witness.c optional witness kern/sys_capability.c standard kern/sys_eventfd.c standard kern/sys_generic.c standard kern/sys_getrandom.c standard kern/sys_pipe.c standard kern/sys_procdesc.c standard kern/sys_process.c standard kern/sys_socket.c standard kern/sys_timerfd.c standard kern/syscalls.c standard kern/sysv_ipc.c standard kern/sysv_msg.c optional sysvmsg kern/sysv_sem.c optional sysvsem kern/sysv_shm.c optional sysvshm kern/tty.c standard kern/tty_compat.c optional compat_43tty kern/tty_info.c standard kern/tty_inq.c standard kern/tty_outq.c standard kern/tty_pts.c standard kern/tty_tty.c standard kern/tty_ttydisc.c standard kern/uipc_accf.c standard kern/uipc_debug.c optional ddb kern/uipc_domain.c standard kern/uipc_ktls.c optional kern_tls kern/uipc_mbuf.c standard kern/uipc_mbuf2.c standard kern/uipc_mbufhash.c standard kern/uipc_mqueue.c optional p1003_1b_mqueue kern/uipc_sem.c optional p1003_1b_semaphores kern/uipc_shm.c standard kern/uipc_sockbuf.c standard kern/uipc_socket.c standard kern/uipc_syscalls.c standard kern/uipc_usrreq.c standard kern/vfs_acl.c standard kern/vfs_aio.c standard kern/vfs_bio.c standard kern/vfs_cache.c standard kern/vfs_cluster.c standard kern/vfs_default.c standard kern/vfs_export.c standard kern/vfs_extattr.c standard kern/vfs_hash.c standard kern/vfs_init.c standard kern/vfs_lookup.c standard kern/vfs_mount.c standard kern/vfs_mountroot.c standard kern/vfs_subr.c standard kern/vfs_syscalls.c standard kern/vfs_vnops.c standard # # Kernel GSS-API # gssd.h optional kgssapi \ dependency "$S/kgssapi/gssd.x" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -hM $S/kgssapi/gssd.x | grep -v pthread.h > gssd.h" \ no-obj no-implicit-rule before-depend local \ clean "gssd.h" gssd_xdr.c optional kgssapi \ dependency "$S/kgssapi/gssd.x gssd.h" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -c $S/kgssapi/gssd.x -o gssd_xdr.c" \ no-ctfconvert no-implicit-rule before-depend local \ clean "gssd_xdr.c" gssd_clnt.c optional kgssapi \ dependency "$S/kgssapi/gssd.x gssd.h" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -lM $S/kgssapi/gssd.x | grep -v string.h > gssd_clnt.c" \ no-ctfconvert no-implicit-rule before-depend local \ clean "gssd_clnt.c" kgssapi/gss_accept_sec_context.c optional kgssapi kgssapi/gss_add_oid_set_member.c optional kgssapi kgssapi/gss_acquire_cred.c optional kgssapi kgssapi/gss_canonicalize_name.c optional kgssapi kgssapi/gss_create_empty_oid_set.c optional kgssapi kgssapi/gss_delete_sec_context.c optional kgssapi kgssapi/gss_display_status.c optional kgssapi kgssapi/gss_export_name.c optional kgssapi kgssapi/gss_get_mic.c optional kgssapi kgssapi/gss_init_sec_context.c optional kgssapi kgssapi/gss_impl.c optional kgssapi kgssapi/gss_import_name.c optional kgssapi kgssapi/gss_ip_to_dns.c optional kgssapi kgssapi/gss_names.c optional kgssapi kgssapi/gss_pname_to_uid.c optional kgssapi kgssapi/gss_release_buffer.c optional kgssapi kgssapi/gss_release_cred.c optional kgssapi kgssapi/gss_release_name.c optional kgssapi kgssapi/gss_release_oid_set.c optional kgssapi kgssapi/gss_set_cred_option.c optional kgssapi kgssapi/gss_test_oid_set_member.c optional kgssapi kgssapi/gss_unwrap.c optional kgssapi kgssapi/gss_verify_mic.c optional kgssapi kgssapi/gss_wrap.c optional kgssapi kgssapi/gss_wrap_size_limit.c optional kgssapi kgssapi/gssd_prot.c optional kgssapi kgssapi/krb5/krb5_mech.c optional kgssapi kgssapi/krb5/kcrypto.c optional kgssapi kgssapi/krb5/kcrypto_aes.c optional kgssapi kgssapi/kgss_if.m optional kgssapi kgssapi/gsstest.c optional kgssapi_debug # These files in libkern/ are those needed by all architectures. Some # of the files in libkern/ are only needed on some architectures, e.g., # libkern/divdi3.c is needed by i386 but not alpha. Also, some of these # routines may be optimized for a particular platform. In either case, # the file should be moved to conf/files. from here. # libkern/arc4random.c standard libkern/arc4random_uniform.c standard libkern/asprintf.c standard libkern/bcd.c standard libkern/bsearch.c standard libkern/crc16.c standard libkern/explicit_bzero.c standard libkern/fnmatch.c standard libkern/gsb_crc32.c standard libkern/iconv.c optional libiconv libkern/iconv_converter_if.m optional libiconv libkern/iconv_ucs.c optional libiconv libkern/iconv_xlat.c optional libiconv libkern/iconv_xlat16.c optional libiconv libkern/inet_aton.c standard libkern/inet_ntoa.c standard libkern/inet_ntop.c standard libkern/inet_pton.c standard libkern/jenkins_hash.c standard libkern/murmur3_32.c standard libkern/memcchr.c standard libkern/memchr.c standard libkern/memmem.c optional gdb libkern/qsort.c standard libkern/qsort_r.c standard libkern/random.c standard libkern/scanc.c standard libkern/strcasecmp.c standard libkern/strcasestr.c standard libkern/strcat.c standard libkern/strchr.c standard libkern/strchrnul.c standard libkern/strcpy.c standard libkern/strcspn.c standard libkern/strdup.c standard libkern/strndup.c standard libkern/strlcat.c standard libkern/strlcpy.c standard libkern/strncat.c standard libkern/strncpy.c standard libkern/strnlen.c standard libkern/strnstr.c standard libkern/strrchr.c standard libkern/strsep.c standard libkern/strspn.c standard libkern/strstr.c standard libkern/strtol.c standard libkern/strtoq.c standard libkern/strtoul.c standard libkern/strtouq.c standard libkern/strvalid.c standard libkern/timingsafe_bcmp.c standard contrib/zlib/adler32.c optional crypto | geom_uzip | \ mxge | ddb_ctf | gzio | zfs | zlib \ compile-with "${ZLIB_C}" contrib/zlib/compress.c optional crypto | geom_uzip | \ mxge | ddb_ctf | gzio | zfs | zlib \ compile-with "${ZLIB_C}" contrib/zlib/crc32.c optional crypto | geom_uzip | \ mxge | ddb_ctf | gzio | zfs | zlib \ compile-with "${ZLIB_C}" contrib/zlib/deflate.c optional crypto | geom_uzip | \ mxge | ddb_ctf | gzio | zfs | zlib \ compile-with "${ZLIB_C}" contrib/zlib/inffast.c optional crypto | geom_uzip | \ mxge | ddb_ctf | gzio | zfs | zlib \ compile-with "${ZLIB_C}" contrib/zlib/inflate.c optional crypto | geom_uzip | \ mxge | ddb_ctf | gzio | zfs | zlib \ compile-with "${ZLIB_C}" contrib/zlib/inftrees.c optional crypto | geom_uzip | \ mxge | ddb_ctf | gzio | zfs | zlib \ compile-with "${ZLIB_C}" contrib/zlib/trees.c optional crypto | geom_uzip | \ mxge | ddb_ctf | gzio | zfs | zlib \ compile-with "${ZLIB_C}" contrib/zlib/uncompr.c optional crypto | geom_uzip | \ mxge | ddb_ctf | gzio | zfs | zlib \ compile-with "${ZLIB_C}" contrib/zlib/zutil.c optional crypto | geom_uzip | \ mxge | ddb_ctf | gzio | zfs | zlib \ compile-with "${ZLIB_C}" dev/zlib/zlib_mod.c optional crypto | geom_uzip | \ mxge | ddb_ctf | gzio | zfs | zlib dev/zlib/zcalloc.c optional crypto | geom_uzip | \ mxge | ddb_ctf | gzio | zfs | zlib net/altq/altq_cbq.c optional altq net/altq/altq_codel.c optional altq net/altq/altq_hfsc.c optional altq net/altq/altq_fairq.c optional altq net/altq/altq_priq.c optional altq net/altq/altq_red.c optional altq net/altq/altq_rio.c optional altq net/altq/altq_rmclass.c optional altq net/altq/altq_subr.c optional altq net/bpf.c standard net/bpf_buffer.c optional bpf net/bpf_jitter.c optional bpf_jitter net/bpf_filter.c optional bpf | netgraph_bpf net/bpf_zerocopy.c optional bpf net/bridgestp.c optional bridge | if_bridge net/ieee8023ad_lacp.c optional lagg net/if.c standard net/ifq.c standard net/if_bridge.c optional bridge inet | if_bridge inet net/if_clone.c standard net/if_dead.c standard net/if_disc.c optional disc net/if_edsc.c optional edsc net/if_enc.c optional enc inet | enc inet6 net/if_epair.c optional epair net/if_ethersubr.c optional ether net/if_fwsubr.c optional fwip net/if_gif.c optional gif inet | gif inet6 | \ netgraph_gif inet | netgraph_gif inet6 net/if_gre.c optional gre inet | gre inet6 net/if_ipsec.c optional inet ipsec | inet6 ipsec net/if_lagg.c optional lagg net/if_loop.c optional loop net/if_llatbl.c standard net/if_me.c optional me inet net/if_media.c standard net/if_mib.c standard net/if_ovpn.c optional ovpn inet | ovpn inet6 net/if_stf.c optional stf inet inet6 net/if_tuntap.c optional tuntap net/if_vlan.c optional vlan net/if_vxlan.c optional vxlan inet | vxlan inet6 net/ifdi_if.m optional ether pci iflib net/iflib.c optional ether pci iflib net/mp_ring.c optional ether iflib net/mppcc.c optional netgraph_mppc_compression net/mppcd.c optional netgraph_mppc_compression net/netisr.c standard net/debugnet.c optional inet debugnet net/debugnet_inet.c optional inet debugnet net/pfil.c optional ether | inet net/radix.c standard net/route.c standard net/route/nhgrp.c optional route_mpath net/route/nhgrp_ctl.c optional route_mpath net/route/nhop.c standard net/route/nhop_ctl.c standard net/route/nhop_utils.c standard net/route/fib_algo.c optional fib_algo net/route/route_ctl.c standard net/route/route_ddb.c optional ddb net/route/route_helpers.c standard net/route/route_ifaddrs.c standard net/route/route_rtentry.c standard net/route/route_subscription.c standard net/route/route_tables.c standard net/route/route_temporal.c standard net/rss_config.c optional inet rss | inet6 rss net/rtsock.c standard net/slcompress.c optional netgraph_vjc net/toeplitz.c optional inet rss | inet6 rss | route_mpath net/vnet.c optional vimage net80211/ieee80211.c optional wlan net80211/ieee80211_acl.c optional wlan wlan_acl net80211/ieee80211_action.c optional wlan net80211/ieee80211_adhoc.c optional wlan \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_ageq.c optional wlan net80211/ieee80211_amrr.c optional wlan | wlan_amrr net80211/ieee80211_crypto.c optional wlan \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_crypto_ccmp.c optional wlan wlan_ccmp net80211/ieee80211_crypto_none.c optional wlan net80211/ieee80211_crypto_tkip.c optional wlan wlan_tkip net80211/ieee80211_crypto_wep.c optional wlan wlan_wep net80211/ieee80211_ddb.c optional wlan ddb net80211/ieee80211_dfs.c optional wlan net80211/ieee80211_freebsd.c optional wlan net80211/ieee80211_hostap.c optional wlan \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_ht.c optional wlan net80211/ieee80211_hwmp.c optional wlan ieee80211_support_mesh net80211/ieee80211_input.c optional wlan net80211/ieee80211_ioctl.c optional wlan net80211/ieee80211_mesh.c optional wlan ieee80211_support_mesh \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_monitor.c optional wlan net80211/ieee80211_node.c optional wlan net80211/ieee80211_output.c optional wlan net80211/ieee80211_phy.c optional wlan net80211/ieee80211_power.c optional wlan net80211/ieee80211_proto.c optional wlan net80211/ieee80211_radiotap.c optional wlan net80211/ieee80211_ratectl.c optional wlan net80211/ieee80211_ratectl_none.c optional wlan net80211/ieee80211_regdomain.c optional wlan net80211/ieee80211_rssadapt.c optional wlan wlan_rssadapt net80211/ieee80211_scan.c optional wlan net80211/ieee80211_scan_sta.c optional wlan net80211/ieee80211_sta.c optional wlan \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_superg.c optional wlan ieee80211_support_superg net80211/ieee80211_scan_sw.c optional wlan net80211/ieee80211_tdma.c optional wlan ieee80211_support_tdma net80211/ieee80211_vht.c optional wlan net80211/ieee80211_wds.c optional wlan net80211/ieee80211_xauth.c optional wlan wlan_xauth net80211/ieee80211_alq.c optional wlan ieee80211_alq netgraph/bluetooth/common/ng_bluetooth.c optional netgraph_bluetooth netgraph/bluetooth/drivers/ubt/ng_ubt.c optional netgraph_bluetooth_ubt usb netgraph/bluetooth/drivers/ubt/ng_ubt_intel.c optional netgraph_bluetooth_ubt usb netgraph/bluetooth/drivers/ubtbcmfw/ubtbcmfw.c optional netgraph_bluetooth_ubtbcmfw usb netgraph/bluetooth/hci/ng_hci_cmds.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_evnt.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_main.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_misc.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_ulpi.c optional netgraph_bluetooth_hci netgraph/bluetooth/l2cap/ng_l2cap_cmds.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_evnt.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_llpi.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_main.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_misc.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_ulpi.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/socket/ng_btsocket.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_hci_raw.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_l2cap.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_l2cap_raw.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_rfcomm.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_sco.c optional netgraph_bluetooth_socket netgraph/netflow/netflow.c optional netgraph_netflow netgraph/netflow/netflow_v9.c optional netgraph_netflow netgraph/netflow/ng_netflow.c optional netgraph_netflow netgraph/ng_UI.c optional netgraph_UI netgraph/ng_async.c optional netgraph_async netgraph/ng_base.c optional netgraph netgraph/ng_bpf.c optional netgraph_bpf netgraph/ng_bridge.c optional netgraph_bridge netgraph/ng_car.c optional netgraph_car netgraph/ng_checksum.c optional netgraph_checksum netgraph/ng_cisco.c optional netgraph_cisco netgraph/ng_deflate.c optional netgraph_deflate netgraph/ng_device.c optional netgraph_device netgraph/ng_echo.c optional netgraph_echo netgraph/ng_eiface.c optional netgraph_eiface netgraph/ng_ether.c optional netgraph_ether netgraph/ng_ether_echo.c optional netgraph_ether_echo netgraph/ng_frame_relay.c optional netgraph_frame_relay netgraph/ng_gif.c optional netgraph_gif inet6 | netgraph_gif inet netgraph/ng_gif_demux.c optional netgraph_gif_demux netgraph/ng_hole.c optional netgraph_hole netgraph/ng_iface.c optional netgraph_iface netgraph/ng_ip_input.c optional netgraph_ip_input netgraph/ng_ipfw.c optional netgraph_ipfw inet ipfirewall netgraph/ng_ksocket.c optional netgraph_ksocket netgraph/ng_l2tp.c optional netgraph_l2tp netgraph/ng_lmi.c optional netgraph_lmi netgraph/ng_macfilter.c optional netgraph_macfilter netgraph/ng_mppc.c optional netgraph_mppc_compression | \ netgraph_mppc_encryption netgraph/ng_nat.c optional netgraph_nat inet libalias netgraph/ng_one2many.c optional netgraph_one2many netgraph/ng_parse.c optional netgraph netgraph/ng_patch.c optional netgraph_patch netgraph/ng_pipe.c optional netgraph_pipe netgraph/ng_ppp.c optional netgraph_ppp netgraph/ng_pppoe.c optional netgraph_pppoe netgraph/ng_pptpgre.c optional netgraph_pptpgre netgraph/ng_pred1.c optional netgraph_pred1 netgraph/ng_rfc1490.c optional netgraph_rfc1490 netgraph/ng_socket.c optional netgraph_socket netgraph/ng_split.c optional netgraph_split netgraph/ng_tag.c optional netgraph_tag netgraph/ng_tcpmss.c optional netgraph_tcpmss netgraph/ng_tee.c optional netgraph_tee netgraph/ng_tty.c optional netgraph_tty netgraph/ng_vjc.c optional netgraph_vjc netgraph/ng_vlan.c optional netgraph_vlan netgraph/ng_vlan_rotate.c optional netgraph_vlan_rotate netinet/accf_data.c optional accept_filter_data inet netinet/accf_dns.c optional accept_filter_dns inet netinet/accf_http.c optional accept_filter_http inet netinet/if_ether.c optional inet ether netinet/igmp.c optional inet netinet/in.c optional inet netinet/in_cksum.c optional inet | inet6 netinet/in_debug.c optional inet ddb netinet/in_kdtrace.c optional inet | inet6 netinet/ip_carp.c optional inet carp | inet6 carp netinet/in_fib.c optional inet netinet/in_fib_algo.c optional inet fib_algo netinet/in_gif.c optional gif inet | netgraph_gif inet netinet/ip_gre.c optional gre inet netinet/ip_id.c optional inet netinet/in_jail.c optional inet netinet/in_mcast.c optional inet netinet/in_pcb.c optional inet | inet6 netinet/in_prot.c optional inet | inet6 netinet/in_proto.c optional inet | inet6 netinet/in_rmx.c optional inet netinet/in_rss.c optional inet rss netinet/ip_divert.c optional ipdivert inet | ipdivert inet6 netinet/ip_ecn.c optional inet | inet6 netinet/ip_encap.c optional inet | inet6 netinet/ip_fastfwd.c optional inet netinet/ip_icmp.c optional inet | inet6 netinet/ip_input.c optional inet netinet/ip_mroute.c optional mrouting inet netinet/ip_options.c optional inet netinet/ip_output.c optional inet netinet/ip_reass.c optional inet netinet/raw_ip.c optional inet | inet6 netinet/cc/cc.c optional cc_newreno inet | cc_vegas inet | \ cc_htcp inet | cc_hd inet | cc_dctcp inet | cc_cubic inet | \ cc_chd inet | cc_cdg inet | cc_newreno inet6 | cc_vegas inet6 | \ cc_htcp inet6 | cc_hd inet6 |cc_dctcp inet6 | cc_cubic inet6 | \ cc_chd inet6 | cc_cdg inet6 netinet/cc/cc_cdg.c optional inet cc_cdg tcp_hhook netinet/cc/cc_chd.c optional inet cc_chd tcp_hhook netinet/cc/cc_cubic.c optional inet cc_cubic | inet6 cc_cubic netinet/cc/cc_dctcp.c optional inet cc_dctcp | inet6 cc_dctcp netinet/cc/cc_hd.c optional inet cc_hd tcp_hhook netinet/cc/cc_htcp.c optional inet cc_htcp | inet6 cc_htcp netinet/cc/cc_newreno.c optional inet cc_newreno | inet6 cc_newreno netinet/cc/cc_vegas.c optional inet cc_vegas tcp_hhook netinet/khelp/h_ertt.c optional inet tcp_hhook netinet/sctp_asconf.c optional inet sctp | inet6 sctp netinet/sctp_auth.c optional inet sctp | inet6 sctp netinet/sctp_bsd_addr.c optional inet sctp | inet6 sctp netinet/sctp_cc_functions.c optional inet sctp | inet6 sctp netinet/sctp_crc32.c optional inet | inet6 netinet/sctp_indata.c optional inet sctp | inet6 sctp netinet/sctp_input.c optional inet sctp | inet6 sctp netinet/sctp_kdtrace.c optional inet sctp | inet6 sctp netinet/sctp_module.c optional inet sctp | inet6 sctp netinet/sctp_output.c optional inet sctp | inet6 sctp netinet/sctp_pcb.c optional inet sctp | inet6 sctp netinet/sctp_peeloff.c optional inet sctp | inet6 sctp netinet/sctp_ss_functions.c optional inet sctp | inet6 sctp netinet/sctp_syscalls.c optional inet sctp | inet6 sctp netinet/sctp_sysctl.c optional inet sctp | inet6 sctp netinet/sctp_timer.c optional inet sctp | inet6 sctp netinet/sctp_usrreq.c optional inet sctp | inet6 sctp netinet/sctputil.c optional inet sctp | inet6 sctp netinet/siftr.c optional inet siftr alq | inet6 siftr alq netinet/tcp_ecn.c optional inet | inet6 netinet/tcp_fastopen.c optional inet tcp_rfc7413 | inet6 tcp_rfc7413 netinet/tcp_hostcache.c optional inet | inet6 netinet/tcp_input.c optional inet | inet6 netinet/tcp_log_buf.c optional tcp_blackbox inet | tcp_blackbox inet6 netinet/tcp_lro.c optional inet | inet6 netinet/tcp_lro_hpts.c optional tcphpts inet | tcphpts inet6 netinet/tcp_output.c optional inet | inet6 netinet/tcp_offload.c optional tcp_offload inet | tcp_offload inet6 netinet/tcp_hpts.c optional tcphpts inet | tcphpts inet6 netinet/tcp_ratelimit.c optional ratelimit inet | ratelimit inet6 netinet/tcp_pcap.c optional inet tcppcap | inet6 tcppcap \ compile-with "${NORMAL_C} ${NO_WNONNULL}" netinet/tcp_reass.c optional inet | inet6 netinet/tcp_sack.c optional inet | inet6 netinet/tcp_stacks/bbr.c optional inet tcphpts tcp_bbr | inet6 tcphpts tcp_bbr \ compile-with "${NORMAL_C} -DMODNAME=tcp_bbr -DSTACKNAME=bbr" netinet/tcp_stacks/rack.c optional inet tcphpts tcp_rack | inet6 tcphpts tcp_rack \ compile-with "${NORMAL_C} -DMODNAME=tcp_rack -DSTACKNAME=rack" netinet/tcp_stacks/rack_bbr_common.c optional inet tcphpts tcp_bbr | inet tcphpts tcp_rack | inet6 tcphpts tcp_bbr | inet6 tcphpts tcp_rack netinet/tcp_stacks/sack_filter.c optional inet tcphpts tcp_bbr | inet tcphpts tcp_rack | inet6 tcphpts tcp_bbr | inet6 tcphpts tcp_rack netinet/tcp_stacks/tailq_hash.c optional inet tcphpts tcp_bbr | inet tcphpts tcp_rack | inet6 tcphpts tcp_bbr | inet6 tcphpts tcp_rack netinet/tcp_stats.c optional stats inet | stats inet6 netinet/tcp_subr.c optional inet | inet6 netinet/tcp_syncache.c optional inet | inet6 netinet/tcp_timer.c optional inet | inet6 netinet/tcp_timewait.c optional inet | inet6 netinet/tcp_usrreq.c optional inet | inet6 netinet/udp_usrreq.c optional inet | inet6 netinet/libalias/alias.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_db.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_mod.c optional libalias | netgraph_nat netinet/libalias/alias_proxy.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_util.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_sctp.c optional libalias inet | netgraph_nat inet netinet/netdump/netdump_client.c optional inet debugnet netdump netinet6/dest6.c optional inet6 netinet6/frag6.c optional inet6 netinet6/icmp6.c optional inet6 netinet6/in6.c optional inet6 netinet6/in6_cksum.c optional inet6 netinet6/in6_fib.c optional inet6 netinet6/in6_fib_algo.c optional inet6 fib_algo netinet6/in6_gif.c optional gif inet6 | netgraph_gif inet6 netinet6/in6_ifattach.c optional inet6 netinet6/in6_jail.c optional inet6 netinet6/in6_mcast.c optional inet6 netinet6/in6_pcb.c optional inet6 netinet6/in6_proto.c optional inet6 netinet6/in6_rmx.c optional inet6 netinet6/in6_rss.c optional inet6 rss netinet6/in6_src.c optional inet6 netinet6/ip6_fastfwd.c optional inet6 netinet6/ip6_forward.c optional inet6 netinet6/ip6_gre.c optional gre inet6 netinet6/ip6_id.c optional inet6 netinet6/ip6_input.c optional inet6 netinet6/ip6_mroute.c optional mrouting inet6 netinet6/ip6_output.c optional inet6 netinet6/mld6.c optional inet6 netinet6/nd6.c optional inet6 netinet6/nd6_nbr.c optional inet6 netinet6/nd6_rtr.c optional inet6 netinet6/raw_ip6.c optional inet6 netinet6/route6.c optional inet6 netinet6/scope6.c optional inet6 netinet6/sctp6_usrreq.c optional inet6 sctp netinet6/udp6_usrreq.c optional inet6 netipsec/ipsec.c optional ipsec inet | ipsec inet6 netipsec/ipsec_input.c optional ipsec inet | ipsec inet6 netipsec/ipsec_mbuf.c optional ipsec inet | ipsec inet6 netipsec/ipsec_mod.c optional ipsec inet | ipsec inet6 netipsec/ipsec_output.c optional ipsec inet | ipsec inet6 netipsec/ipsec_pcb.c optional ipsec inet | ipsec inet6 | \ ipsec_support inet | ipsec_support inet6 netipsec/key.c optional ipsec inet | ipsec inet6 | \ ipsec_support inet | ipsec_support inet6 netipsec/key_debug.c optional ipsec inet | ipsec inet6 | \ ipsec_support inet | ipsec_support inet6 netipsec/keysock.c optional ipsec inet | ipsec inet6 | \ ipsec_support inet | ipsec_support inet6 netipsec/subr_ipsec.c optional ipsec inet | ipsec inet6 | \ ipsec_support inet | ipsec_support inet6 netipsec/udpencap.c optional ipsec inet netipsec/xform_ah.c optional ipsec inet | ipsec inet6 netipsec/xform_esp.c optional ipsec inet | ipsec inet6 netipsec/xform_ipcomp.c optional ipsec inet | ipsec inet6 netipsec/xform_tcp.c optional ipsec inet tcp_signature | \ ipsec inet6 tcp_signature | ipsec_support inet tcp_signature | \ ipsec_support inet6 tcp_signature netlink/netlink_generic_kpi.c standard netlink/netlink_glue.c standard netlink/netlink_message_parser.c standard netlink/netlink_domain.c optional netlink netlink/netlink_generic.c optional netlink netlink/netlink_io.c optional netlink netlink/netlink_message_writer.c optional netlink netlink/netlink_module.c optional netlink netlink/netlink_route.c optional netlink netlink/route/iface_drivers.c optional netlink netlink/route/iface.c optional netlink netlink/route/neigh.c optional netlink netlink/route/nexthop.c optional netlink netlink/route/rt.c optional netlink netpfil/ipfw/dn_aqm_codel.c optional inet dummynet netpfil/ipfw/dn_aqm_pie.c optional inet dummynet netpfil/ipfw/dn_heap.c optional inet dummynet netpfil/ipfw/dn_sched_fifo.c optional inet dummynet netpfil/ipfw/dn_sched_fq_codel.c optional inet dummynet netpfil/ipfw/dn_sched_fq_pie.c optional inet dummynet netpfil/ipfw/dn_sched_prio.c optional inet dummynet netpfil/ipfw/dn_sched_qfq.c optional inet dummynet netpfil/ipfw/dn_sched_rr.c optional inet dummynet netpfil/ipfw/dn_sched_wf2q.c optional inet dummynet netpfil/ipfw/ip_dummynet.c optional inet dummynet netpfil/ipfw/ip_dn_io.c optional inet dummynet netpfil/ipfw/ip_dn_glue.c optional inet dummynet netpfil/ipfw/ip_fw2.c optional inet ipfirewall netpfil/ipfw/ip_fw_bpf.c optional inet ipfirewall netpfil/ipfw/ip_fw_dynamic.c optional inet ipfirewall \ compile-with "${NORMAL_C} -I$S/contrib/ck/include" netpfil/ipfw/ip_fw_eaction.c optional inet ipfirewall netpfil/ipfw/ip_fw_log.c optional inet ipfirewall netpfil/ipfw/ip_fw_pfil.c optional inet ipfirewall netpfil/ipfw/ip_fw_sockopt.c optional inet ipfirewall netpfil/ipfw/ip_fw_table.c optional inet ipfirewall netpfil/ipfw/ip_fw_table_algo.c optional inet ipfirewall netpfil/ipfw/ip_fw_table_value.c optional inet ipfirewall netpfil/ipfw/ip_fw_iface.c optional inet ipfirewall netpfil/ipfw/ip_fw_nat.c optional inet ipfirewall_nat netpfil/ipfw/nat64/ip_fw_nat64.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64clat.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64clat_control.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64lsn.c optional inet inet6 ipfirewall \ ipfirewall_nat64 compile-with "${NORMAL_C} -I$S/contrib/ck/include" netpfil/ipfw/nat64/nat64lsn_control.c optional inet inet6 ipfirewall \ ipfirewall_nat64 compile-with "${NORMAL_C} -I$S/contrib/ck/include" netpfil/ipfw/nat64/nat64stl.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64stl_control.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64_translate.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nptv6/ip_fw_nptv6.c optional inet inet6 ipfirewall \ ipfirewall_nptv6 netpfil/ipfw/nptv6/nptv6.c optional inet inet6 ipfirewall \ ipfirewall_nptv6 netpfil/ipfw/pmod/ip_fw_pmod.c optional inet ipfirewall_pmod netpfil/ipfw/pmod/tcpmod.c optional inet ipfirewall_pmod netpfil/pf/if_pflog.c optional pflog pf inet netpfil/pf/if_pfsync.c optional pfsync pf inet netpfil/pf/pf.c optional pf inet netpfil/pf/pf_if.c optional pf inet netpfil/pf/pf_ioctl.c optional pf inet netpfil/pf/pf_lb.c optional pf inet netpfil/pf/pf_norm.c optional pf inet netpfil/pf/pf_nl.c optional pf inet netpfil/pf/pf_nv.c optional pf inet netpfil/pf/pf_osfp.c optional pf inet netpfil/pf/pf_ruleset.c optional pf inet netpfil/pf/pf_syncookies.c optional pf inet netpfil/pf/pf_table.c optional pf inet netpfil/pf/pfsync_nv.c optional pfsync pf inet netpfil/pf/in4_cksum.c optional pf inet netsmb/smb_conn.c optional netsmb netsmb/smb_crypt.c optional netsmb netsmb/smb_dev.c optional netsmb netsmb/smb_iod.c optional netsmb netsmb/smb_rq.c optional netsmb netsmb/smb_smb.c optional netsmb netsmb/smb_subr.c optional netsmb netsmb/smb_trantcp.c optional netsmb netsmb/smb_usr.c optional netsmb nfs/bootp_subr.c optional bootp nfscl nfs/krpc_subr.c optional bootp nfscl nfs/nfs_diskless.c optional nfscl nfs_root nfs/nfs_nfssvc.c optional nfscl | nfslockd | nfsd nlm/nlm_advlock.c optional nfslockd | nfsd nlm/nlm_prot_clnt.c optional nfslockd | nfsd nlm/nlm_prot_impl.c optional nfslockd | nfsd nlm/nlm_prot_server.c optional nfslockd | nfsd nlm/nlm_prot_svc.c optional nfslockd | nfsd nlm/nlm_prot_xdr.c optional nfslockd | nfsd nlm/sm_inter_xdr.c optional nfslockd | nfsd # Linux Kernel Programming Interface compat/linuxkpi/common/src/linux_80211.c optional compat_linuxkpi wlan \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_80211_macops.c optional compat_linuxkpi wlan \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_kmod.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_acpi.c optional compat_linuxkpi acpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_compat.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_current.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_devres.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_dmi.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_domain.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_firmware.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_fpu.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_hrtimer.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_i2c.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_i2cbb.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_interrupt.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_kobject.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_kthread.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_lock.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_mhi.c optional compat_linuxkpi wlan \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_netdev.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_page.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_pci.c optional compat_linuxkpi pci \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_tasklet.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_idr.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_radix.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_rcu.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C} -I$S/contrib/ck/include" compat/linuxkpi/common/src/linux_schedule.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_shmemfs.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_shrinker.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_skbuff.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_slab.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_usb.c optional compat_linuxkpi usb \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_work.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_xarray.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/lkpi_iic_if.m optional compat_linuxkpi compat/linuxkpi/common/src/linux_seq_file.c optional compat_linuxkpi | lindebugfs \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_simple_attr.c optional compat_linuxkpi | lindebugfs \ compile-with "${LINUXKPI_C}" compat/lindebugfs/lindebugfs.c optional lindebugfs \ compile-with "${LINUXKPI_C}" # OpenFabrics Enterprise Distribution (Infiniband) net/if_infiniband.c optional ofed | lagg ofed/drivers/infiniband/core/ib_addr.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_agent.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_cache.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_cm.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_cma.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_core_uverbs.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_cq.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_device.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_fmr_pool.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_iwcm.c optional ofed \ compile-with "${OFED_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" ofed/drivers/infiniband/core/ib_iwpm_msg.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_iwpm_util.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_mad.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_mad_rmpp.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_multicast.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_packer.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_rdma_core.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_sa_query.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_smi.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_sysfs.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_ucm.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_ucma.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_ud_header.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_umem.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_user_mad.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_cmd.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_ioctl.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_main.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_marshall.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_std_types.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_std_types_async_fd.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_std_types_counters.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_std_types_cq.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_std_types_device.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_std_types_dm.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_std_types_flow_action.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_std_types_mr.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_uapi.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_verbs.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" #ofed/drivers/infiniband/ulp/ipoib/ipoib_fs.c optional ipoib \ # compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_multicast.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_verbs.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" #ofed/drivers/infiniband/ulp/ipoib/ipoib_vlan.c optional ipoib \ # compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/sdp/sdp_bcopy.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_main.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_rx.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/ ${NO_WUNUSED_BUT_SET_VARIABLE}" ofed/drivers/infiniband/ulp/sdp/sdp_cma.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_tx.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/ ${NO_WUNUSED_BUT_SET_VARIABLE}" dev/irdma/icrdma.c optional irdma ice inet inet6 pci ofed \ compile-with "${OFED_C} -I$S/dev/ice/" dev/irdma/irdma_cm.c optional irdma ice inet inet6 pci ofed \ compile-with "${OFED_C} -I$S/dev/ice/" dev/irdma/irdma_ctrl.c optional irdma ice inet inet6 pci ofed \ compile-with "${OFED_C} -I$S/dev/ice/" dev/irdma/irdma_hmc.c optional irdma ice inet inet6 pci ofed \ compile-with "${OFED_C} -I$S/dev/ice/" dev/irdma/irdma_hw.c optional irdma ice inet inet6 pci ofed \ compile-with "${OFED_C} -I$S/dev/ice/" dev/irdma/icrdma_hw.c optional irdma ice inet inet6 pci ofed \ compile-with "${OFED_C} -I$S/dev/ice/" dev/irdma/fbsd_kcompat.c optional irdma ice inet inet6 pci ofed \ compile-with "${OFED_C} -I$S/dev/ice/" dev/irdma/irdma_kcompat.c optional irdma ice inet inet6 pci ofed \ compile-with "${OFED_C} -I$S/dev/ice/" dev/irdma/irdma_pble.c optional irdma ice inet inet6 pci ofed \ compile-with "${OFED_C} -I$S/dev/ice/" dev/irdma/irdma_puda.c optional irdma ice inet inet6 pci ofed \ compile-with "${OFED_C} -I$S/dev/ice/" dev/irdma/irdma_uda.c optional irdma ice inet inet6 pci ofed \ compile-with "${OFED_C} -I$S/dev/ice/" dev/irdma/irdma_uk.c optional irdma ice inet inet6 pci ofed \ compile-with "${OFED_C} -I$S/dev/ice/" dev/irdma/irdma_utils.c optional irdma ice inet inet6 pci ofed \ compile-with "${OFED_C} -I$S/dev/ice/" dev/irdma/irdma_verbs.c optional irdma ice inet inet6 pci ofed \ compile-with "${OFED_C} -I$S/dev/ice/" dev/irdma/irdma_ws.c optional irdma ice inet inet6 pci ofed \ compile-with "${OFED_C} -I$S/dev/ice/" dev/mthca/mthca_allocator.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_av.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_catas.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_cmd.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_cq.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_eq.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_mad.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_main.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_mcg.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_memfree.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_mr.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_pd.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_profile.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_provider.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_qp.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_reset.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_srq.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_uar.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_alias_GUID.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_mcg.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_sysfs.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_cm.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_ah.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_cq.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_doorbell.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_mad.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_main.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_mr.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_qp.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_srq.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_wc.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_alloc.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_catas.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_cmd.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_cq.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_eq.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_fw.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_fw_qos.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_icm.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_intf.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_main.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_mcg.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_mr.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_pd.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_port.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_profile.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_qp.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_reset.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_sense.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_srq.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_resource_tracker.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_cq.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_main.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_netdev.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_port.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_resources.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_rx.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_tx.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_ah.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_cong.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_cq.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_devx.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_doorbell.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_gsi.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_mad.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_main.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_mem.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_mr.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_qp.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_srq.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_virt.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_alloc.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_cmd.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_cq.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_diag_cnt.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_diagnostics.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_eq.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_eswitch.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_fc_cmd.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_fs_cmd.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_fs_counters.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_fs_tcp.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_fs_tree.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_fw.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_fwdump.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_health.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_mad.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_main.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_mcg.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_mpfs.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_mr.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_pagealloc.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_pd.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_port.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_qp.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_rl.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_srq.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_tls.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_transobj.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_uar.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_vport.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_vsc.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_wq.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_lib/mlx5_gid.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_dim.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_ethtool.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_main.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_tx.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_flow_table.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_hw_tls.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_iq.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_rx.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_rl.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_txrx.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_port_buffer.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" # crypto support opencrypto/cbc_mac.c optional crypto opencrypto/criov.c optional crypto opencrypto/crypto.c optional crypto opencrypto/cryptodev.c optional cryptodev opencrypto/cryptodev_if.m optional crypto opencrypto/cryptosoft.c optional crypto opencrypto/cryptodeflate.c optional crypto opencrypto/gmac.c optional crypto opencrypto/gfmult.c optional crypto opencrypto/ktls_ocf.c optional kern_tls opencrypto/rmd160.c optional crypto opencrypto/xform_aes_cbc.c optional crypto opencrypto/xform_aes_icm.c optional crypto opencrypto/xform_aes_xts.c optional crypto opencrypto/xform_cbc_mac.c optional crypto opencrypto/xform_chacha20_poly1305.c optional crypto \ compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include -I$S/crypto/libsodium" opencrypto/xform_cml.c optional crypto opencrypto/xform_deflate.c optional crypto opencrypto/xform_gmac.c optional crypto opencrypto/xform_null.c optional crypto opencrypto/xform_poly1305.c optional crypto \ compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include -I$S/crypto/libsodium" opencrypto/xform_rmd160.c optional crypto opencrypto/xform_sha1.c optional crypto opencrypto/xform_sha2.c optional crypto contrib/libsodium/src/libsodium/crypto_core/ed25519/ref10/ed25519_ref10.c \ optional crypto \ compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium -Wno-unused-function" contrib/libsodium/src/libsodium/crypto_core/hchacha20/core_hchacha20.c \ optional crypto \ compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium" contrib/libsodium/src/libsodium/crypto_onetimeauth/poly1305/onetimeauth_poly1305.c \ optional crypto \ compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium" contrib/libsodium/src/libsodium/crypto_onetimeauth/poly1305/donna/poly1305_donna.c \ optional crypto \ compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium" contrib/libsodium/src/libsodium/crypto_scalarmult/curve25519/scalarmult_curve25519.c \ optional crypto \ compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium" contrib/libsodium/src/libsodium/crypto_scalarmult/curve25519/ref10/x25519_ref10.c \ optional crypto \ compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium -Wno-unused-function" contrib/libsodium/src/libsodium/crypto_stream/chacha20/stream_chacha20.c \ optional crypto \ compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium" contrib/libsodium/src/libsodium/crypto_stream/chacha20/ref/chacha20_ref.c \ optional crypto \ compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium" contrib/libsodium/src/libsodium/crypto_verify/sodium/verify.c \ optional crypto \ compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium" crypto/libsodium/randombytes.c optional crypto \ compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include -I$S/crypto/libsodium" crypto/libsodium/utils.c optional crypto \ compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include -I$S/crypto/libsodium" rpc/auth_none.c optional krpc | nfslockd | nfscl | nfsd rpc/auth_unix.c optional krpc | nfslockd | nfscl | nfsd rpc/authunix_prot.c optional krpc | nfslockd | nfscl | nfsd rpc/clnt_bck.c optional krpc | nfslockd | nfscl | nfsd rpc/clnt_dg.c optional krpc | nfslockd | nfscl | nfsd rpc/clnt_rc.c optional krpc | nfslockd | nfscl | nfsd rpc/clnt_vc.c optional krpc | nfslockd | nfscl | nfsd rpc/getnetconfig.c optional krpc | nfslockd | nfscl | nfsd rpc/replay.c optional krpc | nfslockd | nfscl | nfsd rpc/rpc_callmsg.c optional krpc | nfslockd | nfscl | nfsd rpc/rpc_generic.c optional krpc | nfslockd | nfscl | nfsd rpc/rpc_prot.c optional krpc | nfslockd | nfscl | nfsd rpc/rpcb_clnt.c optional krpc | nfslockd | nfscl | nfsd rpc/rpcb_prot.c optional krpc | nfslockd | nfscl | nfsd rpc/svc.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_auth.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_auth_unix.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_dg.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_generic.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_vc.c optional krpc | nfslockd | nfscl | nfsd # # Kernel RPC-over-TLS # rpctlscd.h optional krpc | nfslockd | nfscl | nfsd \ dependency "$S/rpc/rpcsec_tls/rpctlscd.x" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -hM $S/rpc/rpcsec_tls/rpctlscd.x | grep -v pthread.h > rpctlscd.h" \ no-obj no-implicit-rule before-depend local \ clean "rpctlscd.h" rpctlscd_xdr.c optional krpc | nfslockd | nfscl | nfsd \ dependency "$S/rpc/rpcsec_tls/rpctlscd.x rpctlscd.h" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -c $S/rpc/rpcsec_tls/rpctlscd.x -o rpctlscd_xdr.c" no-ctfconvert \ no-implicit-rule before-depend local \ clean "rpctlscd_xdr.c" rpctlscd_clnt.c optional krpc | nfslockd | nfscl | nfsd \ dependency "$S/rpc/rpcsec_tls/rpctlscd.x rpctlscd.h" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -lM $S/rpc/rpcsec_tls/rpctlscd.x | grep -v string.h > rpctlscd_clnt.c" no-ctfconvert \ no-implicit-rule before-depend local \ clean "rpctlscd_clnt.c" rpctlssd.h optional krpc | nfslockd | nfscl | nfsd \ dependency "$S/rpc/rpcsec_tls/rpctlssd.x" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -hM $S/rpc/rpcsec_tls/rpctlssd.x | grep -v pthread.h > rpctlssd.h" \ no-obj no-implicit-rule before-depend local \ clean "rpctlssd.h" rpctlssd_xdr.c optional krpc | nfslockd | nfscl | nfsd \ dependency "$S/rpc/rpcsec_tls/rpctlssd.x rpctlssd.h" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -c $S/rpc/rpcsec_tls/rpctlssd.x -o rpctlssd_xdr.c" no-ctfconvert \ no-implicit-rule before-depend local \ clean "rpctlssd_xdr.c" rpctlssd_clnt.c optional krpc | nfslockd | nfscl | nfsd \ dependency "$S/rpc/rpcsec_tls/rpctlssd.x rpctlssd.h" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -lM $S/rpc/rpcsec_tls/rpctlssd.x | grep -v string.h > rpctlssd_clnt.c" no-ctfconvert \ no-implicit-rule before-depend local \ clean "rpctlssd_clnt.c" rpc/rpcsec_tls/rpctls_impl.c optional krpc | nfslockd | nfscl | nfsd rpc/rpcsec_tls/auth_tls.c optional krpc | nfslockd | nfscl | nfsd rpc/rpcsec_gss/rpcsec_gss.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/rpcsec_gss_conf.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/rpcsec_gss_misc.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/rpcsec_gss_prot.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/svc_rpcsec_gss.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi security/audit/audit.c optional audit security/audit/audit_arg.c optional audit security/audit/audit_bsm.c optional audit security/audit/audit_bsm_db.c optional audit security/audit/audit_bsm_klib.c optional audit security/audit/audit_dtrace.c optional dtaudit audit | dtraceall audit compile-with "${CDDL_C}" security/audit/audit_pipe.c optional audit security/audit/audit_syscalls.c standard security/audit/audit_trigger.c optional audit security/audit/audit_worker.c optional audit security/audit/bsm_domain.c optional audit security/audit/bsm_errno.c optional audit security/audit/bsm_fcntl.c optional audit security/audit/bsm_socket_type.c optional audit security/audit/bsm_token.c optional audit security/mac/mac_audit.c optional mac audit security/mac/mac_cred.c optional mac security/mac/mac_kdb.c optional mac security/mac/mac_framework.c optional mac security/mac/mac_inet.c optional mac inet | mac inet6 security/mac/mac_inet6.c optional mac inet6 security/mac/mac_label.c optional mac security/mac/mac_net.c optional mac security/mac/mac_pipe.c optional mac security/mac/mac_posix_sem.c optional mac security/mac/mac_posix_shm.c optional mac security/mac/mac_priv.c optional mac security/mac/mac_process.c optional mac security/mac/mac_socket.c optional mac security/mac/mac_syscalls.c standard security/mac/mac_system.c optional mac security/mac/mac_sysv_msg.c optional mac security/mac/mac_sysv_sem.c optional mac security/mac/mac_sysv_shm.c optional mac security/mac/mac_vfs.c optional mac security/mac_biba/mac_biba.c optional mac_biba security/mac_ddb/mac_ddb.c optional mac_ddb security/mac_bsdextended/mac_bsdextended.c optional mac_bsdextended security/mac_bsdextended/ugidfw_system.c optional mac_bsdextended security/mac_bsdextended/ugidfw_vnode.c optional mac_bsdextended security/mac_ifoff/mac_ifoff.c optional mac_ifoff security/mac_ipacl/mac_ipacl.c optional mac_ipacl security/mac_lomac/mac_lomac.c optional mac_lomac security/mac_mls/mac_mls.c optional mac_mls security/mac_none/mac_none.c optional mac_none security/mac_ntpd/mac_ntpd.c optional mac_ntpd security/mac_partition/mac_partition.c optional mac_partition security/mac_portacl/mac_portacl.c optional mac_portacl security/mac_priority/mac_priority.c optional mac_priority security/mac_seeotheruids/mac_seeotheruids.c optional mac_seeotheruids security/mac_stub/mac_stub.c optional mac_stub security/mac_test/mac_test.c optional mac_test security/mac_grantbylabel/mac_grantbylabel.c optional mac_grantbylabel security/mac_veriexec/mac_veriexec.c optional mac_veriexec security/mac_veriexec/veriexec_fingerprint.c optional mac_veriexec security/mac_veriexec/veriexec_metadata.c optional mac_veriexec security/mac_veriexec_parser/mac_veriexec_parser.c optional mac_veriexec mac_veriexec_parser security/mac_veriexec/mac_veriexec_rmd160.c optional mac_veriexec_rmd160 security/mac_veriexec/mac_veriexec_sha1.c optional mac_veriexec_sha1 security/mac_veriexec/mac_veriexec_sha256.c optional mac_veriexec_sha256 security/mac_veriexec/mac_veriexec_sha384.c optional mac_veriexec_sha384 security/mac_veriexec/mac_veriexec_sha512.c optional mac_veriexec_sha512 teken/teken.c optional sc !SC_NO_TERM_TEKEN | vt ufs/ffs/ffs_alloc.c optional ffs ufs/ffs/ffs_balloc.c optional ffs ufs/ffs/ffs_inode.c optional ffs ufs/ffs/ffs_snapshot.c optional ffs ufs/ffs/ffs_softdep.c optional ffs ufs/ffs/ffs_subr.c optional ffs | geom_label ufs/ffs/ffs_tables.c optional ffs | geom_label ufs/ffs/ffs_vfsops.c optional ffs ufs/ffs/ffs_vnops.c optional ffs ufs/ffs/ffs_rawread.c optional ffs directio ufs/ffs/ffs_suspend.c optional ffs ufs/ufs/ufs_acl.c optional ffs ufs/ufs/ufs_bmap.c optional ffs ufs/ufs/ufs_dirhash.c optional ffs ufs/ufs/ufs_extattr.c optional ffs ufs/ufs/ufs_gjournal.c optional ffs UFS_GJOURNAL ufs/ufs/ufs_inode.c optional ffs ufs/ufs/ufs_lookup.c optional ffs ufs/ufs/ufs_quota.c optional ffs ufs/ufs/ufs_vfsops.c optional ffs ufs/ufs/ufs_vnops.c optional ffs vm/device_pager.c standard vm/phys_pager.c standard vm/redzone.c optional DEBUG_REDZONE vm/sg_pager.c standard vm/swap_pager.c standard vm/uma_core.c standard vm/uma_dbg.c standard vm/memguard.c optional DEBUG_MEMGUARD vm/vm_domainset.c standard vm/vm_fault.c standard vm/vm_glue.c standard vm/vm_init.c standard vm/vm_kern.c standard vm/vm_map.c standard vm/vm_meter.c standard vm/vm_mmap.c standard vm/vm_object.c standard vm/vm_page.c standard vm/vm_pageout.c standard vm/vm_pager.c standard vm/vm_phys.c standard vm/vm_radix.c standard vm/vm_reserv.c standard vm/vm_swapout.c optional !NO_SWAPPING vm/vm_swapout_dummy.c optional NO_SWAPPING vm/vm_unix.c standard vm/vnode_pager.c standard xen/features.c optional xenhvm xen/xen_common.c optional xenhvm xen/xenbus/xenbus_if.m optional xenhvm xen/xenbus/xenbus.c optional xenhvm xen/xenbus/xenbusb_if.m optional xenhvm xen/xenbus/xenbusb.c optional xenhvm xen/xenbus/xenbusb_front.c optional xenhvm xen/xenbus/xenbusb_back.c optional xenhvm xen/xenmem/xenmem_if.m optional xenhvm xdr/xdr.c optional xdr | krpc | nfslockd | nfscl | nfsd xdr/xdr_array.c optional xdr | krpc | nfslockd | nfscl | nfsd xdr/xdr_mbuf.c optional xdr | krpc | nfslockd | nfscl | nfsd xdr/xdr_mem.c optional xdr | krpc | nfslockd | nfscl | nfsd xdr/xdr_reference.c optional xdr | krpc | nfslockd | nfscl | nfsd xdr/xdr_sizeof.c optional xdr | krpc | nfslockd | nfscl | nfsd diff --git a/sys/dev/ahci/ahci_fsl_fdt.c b/sys/dev/ahci/ahci_fsl_fdt.c index 983ef7c77ee9..4a6fbed470f4 100644 --- a/sys/dev/ahci/ahci_fsl_fdt.c +++ b/sys/dev/ahci/ahci_fsl_fdt.c @@ -1,419 +1,419 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Alstom Group * Copyright (c) 2020 Semihalf * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* AHCI controller driver for NXP QorIQ Layerscape SoCs. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #define AHCI_FSL_REG_PHY1 0xa8 #define AHCI_FSL_REG_PHY2 0xac #define AHCI_FSL_REG_PHY3 0xb0 #define AHCI_FSL_REG_PHY4 0xb4 #define AHCI_FSL_REG_PHY5 0xb8 #define AHCI_FSL_REG_AXICC 0xbc #define AHCI_FSL_REG_PTC 0xc8 #define AHCI_FSL_LS1021A_AXICC 0xc0 #define AHCI_FSL_REG_PHY1_TTA_MASK 0x0001ffff #define AHCI_FSL_REG_PHY1_SNM (1 << 17) #define AHCI_FSL_REG_PHY1_SNR (1 << 18) #define AHCI_FSL_REG_PHY1_FPR (1 << 20) #define AHCI_FSL_REG_PHY1_PBPS_LBP 0 #define AHCI_FSL_REG_PHY1_PBPS_LFTP (0x01 << 21) #define AHCI_FSL_REG_PHY1_PBPS_MFTP (0x02 << 21) #define AHCI_FSL_REG_PHY1_PBPS_HFTP (0x03 << 21) #define AHCI_FSL_REG_PHY1_PBPS_PRBS (0x04 << 21) #define AHCI_FSL_REG_PHY1_PBPS_BIST (0x05 << 21) #define AHCI_FSL_REG_PHY1_PBPE (1 << 24) #define AHCI_FSL_REG_PHY1_PBCE (1 << 25) #define AHCI_FSL_REG_PHY1_PBPNA (1 << 26) #define AHCI_FSL_REG_PHY1_STB (1 << 27) #define AHCI_FSL_REG_PHY1_PSSO (1 << 28) #define AHCI_FSL_REG_PHY1_PSS (1 << 29) #define AHCI_FSL_REG_PHY1_ERSN (1 << 30) #define AHCI_FSL_REG_PHY1_ESDF (1 << 31) #define AHCI_FSL_REG_PHY_MASK 0xff #define AHCI_FSL_PHY2_CIBGMN_SHIFT 0 #define AHCI_FSL_PHY2_CIBGMX_SHIFT 8 #define AHCI_FSL_PHY2_CIBGN_SHIFT 16 #define AHCI_FSL_PHY2_CINMP_SHIFT 24 #define AHCI_FSL_PHY3_CWBGMN_SHIFT 0 #define AHCI_FSL_PHY3_CWBGMX_SHIFT 8 #define AHCI_FSL_PHY3_CWBGN_SHIFT 16 #define AHCI_FSL_PHY3_CWNMP_SHIFT 24 /* Only in LS1021A */ #define AHCI_FSL_PHY4_BMX_SHIFT 0 #define AHCI_FSL_PHY4_BNM_SHIFT 8 #define AHCI_FSL_PHY4_SFD_SHIFT 16 #define AHCI_FSL_PHY4_PTST_SHIFT 24 /* Only in LS1021A */ #define AHCI_FSL_PHY5_RIT_SHIFT 0 #define AHCI_FSL_PHY5_RCT_SHIFT 20 #define AHCI_FSL_REG_PTC_RXWM_MASK 0x0000007f #define AHCI_FSL_REG_PTC_ENBD (1 << 8) #define AHCI_FSL_REG_PTC_ITM (1 << 9) #define AHCI_FSL_REG_PHY1_CFG \ ((0x1fffe & AHCI_FSL_REG_PHY1_TTA_MASK) | \ AHCI_FSL_REG_PHY1_SNM | AHCI_FSL_REG_PHY1_PSS | AHCI_FSL_REG_PHY1_ESDF) #define AHCI_FSL_REG_PHY2_CFG \ ((0x1f << AHCI_FSL_PHY2_CIBGMN_SHIFT) | \ (0x4d << AHCI_FSL_PHY2_CIBGMX_SHIFT) | \ (0x18 << AHCI_FSL_PHY2_CIBGN_SHIFT) | \ (0x28 << AHCI_FSL_PHY2_CINMP_SHIFT)) #define AHCI_FSL_REG_PHY2_CFG_LS1021A \ ((0x14 << AHCI_FSL_PHY2_CIBGMN_SHIFT) | \ (0x34 << AHCI_FSL_PHY2_CIBGMX_SHIFT) | \ (0x18 << AHCI_FSL_PHY2_CIBGN_SHIFT) | \ (0x28 << AHCI_FSL_PHY2_CINMP_SHIFT)) #define AHCI_FSL_REG_PHY3_CFG \ ((0x09 << AHCI_FSL_PHY3_CWBGMN_SHIFT) | \ (0x15 << AHCI_FSL_PHY3_CWBGMX_SHIFT) | \ (0x08 << AHCI_FSL_PHY3_CWBGN_SHIFT) | \ (0x0e << AHCI_FSL_PHY3_CWNMP_SHIFT)) #define AHCI_FSL_REG_PHY3_CFG_LS1021A \ ((0x06 << AHCI_FSL_PHY3_CWBGMN_SHIFT) | \ (0x0e << AHCI_FSL_PHY3_CWBGMX_SHIFT) | \ (0x08 << AHCI_FSL_PHY3_CWBGN_SHIFT) | \ (0x0e << AHCI_FSL_PHY3_CWNMP_SHIFT)) #define AHCI_FSL_REG_PHY4_CFG_LS1021A \ ((0x0b << AHCI_FSL_PHY4_BMX_SHIFT) | \ (0x08 << AHCI_FSL_PHY4_BNM_SHIFT) | \ (0x4a << AHCI_FSL_PHY4_SFD_SHIFT) | \ (0x06 << AHCI_FSL_PHY4_PTST_SHIFT)) #define AHCI_FSL_REG_PHY5_CFG_LS1021A \ ((0x86470 << AHCI_FSL_PHY5_RIT_SHIFT) | \ (0x2aa << AHCI_FSL_PHY5_RCT_SHIFT)) /* Bit 27 enabled so value of reserved bits remains as in documentation. */ #define AHCI_FSL_REG_PTC_CFG \ ((0x29 & AHCI_FSL_REG_PTC_RXWM_MASK) | (1 << 27)) #define AHCI_FSL_REG_AXICC_CFG 0x3fffffff #define AHCI_FSL_REG_ECC 0x0 #define AHCI_FSL_REG_ECC_LS1021A 0x00020000 #define AHCI_FSL_REG_ECC_LS1043A 0x80000000 #define AHCI_FSL_REG_ECC_LS1028A 0x40000000 #define QORIQ_AHCI_LS1021A 1 #define QORIQ_AHCI_LS1028A 2 #define QORIQ_AHCI_LS1043A 3 #define QORIQ_AHCI_LS2080A 4 #define QORIQ_AHCI_LS1046A 5 #define QORIQ_AHCI_LS1088A 6 #define QORIQ_AHCI_LS2088A 7 #define QORIQ_AHCI_LX2160A 8 struct ahci_fsl_fdt_controller { struct ahci_controller ctlr; /* Must be the first field. */ int soc_type; struct resource *r_ecc; int r_ecc_rid; }; static const struct ofw_compat_data ahci_fsl_fdt_compat_data[] = { {"fsl,ls1021a-ahci", QORIQ_AHCI_LS1021A}, {"fsl,ls1028a-ahci", QORIQ_AHCI_LS1028A}, {"fsl,ls1043a-ahci", QORIQ_AHCI_LS1043A}, {"fsl,ls2080a-ahci", QORIQ_AHCI_LS2080A}, {"fsl,ls1046a-ahci", QORIQ_AHCI_LS1046A}, {"fsl,ls1088a-ahci", QORIQ_AHCI_LS1088A}, {"fsl,ls2088a-ahci", QORIQ_AHCI_LS2088A}, {"fsl,lx2160a-ahci", QORIQ_AHCI_LX2160A}, {NULL, 0} }; static bool ecc_inited; static int ahci_fsl_fdt_ecc_init(struct ahci_fsl_fdt_controller *ctrl) { uint32_t val; switch (ctrl->soc_type) { case QORIQ_AHCI_LS2080A: case QORIQ_AHCI_LS2088A: return (0); case QORIQ_AHCI_LS1021A: if (!ecc_inited && ctrl->r_ecc == NULL) return (ENXIO); if (!ecc_inited) ATA_OUTL(ctrl->r_ecc, AHCI_FSL_REG_ECC, AHCI_FSL_REG_ECC_LS1021A); break; case QORIQ_AHCI_LS1043A: case QORIQ_AHCI_LS1046A: if (!ecc_inited && ctrl->r_ecc == NULL) return (ENXIO); if (!ecc_inited) { val = ATA_INL(ctrl->r_ecc, AHCI_FSL_REG_ECC); val = AHCI_FSL_REG_ECC_LS1043A; ATA_OUTL(ctrl->r_ecc, AHCI_FSL_REG_ECC, val); } break; case QORIQ_AHCI_LS1028A: case QORIQ_AHCI_LS1088A: case QORIQ_AHCI_LX2160A: if (!ecc_inited && ctrl->r_ecc == NULL) return (ENXIO); if (!ecc_inited) { val = ATA_INL(ctrl->r_ecc, AHCI_FSL_REG_ECC); val |= AHCI_FSL_REG_ECC_LS1028A; ATA_OUTL(ctrl->r_ecc, AHCI_FSL_REG_ECC, val); } break; default: panic("Unimplemented SOC type: %d", ctrl->soc_type); } ecc_inited = true; return (0); } static void ahci_fsl_fdt_phy_init(struct ahci_fsl_fdt_controller *ctrl) { struct ahci_controller *ahci; ahci = &ctrl->ctlr; if (ctrl->soc_type == QORIQ_AHCI_LS1021A) { ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PHY1, AHCI_FSL_REG_PHY1_CFG); ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PHY2, AHCI_FSL_REG_PHY2_CFG_LS1021A); ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PHY3, AHCI_FSL_REG_PHY3_CFG_LS1021A); ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PHY4, AHCI_FSL_REG_PHY4_CFG_LS1021A); ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PHY5, AHCI_FSL_REG_PHY5_CFG_LS1021A); ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PTC, AHCI_FSL_REG_PTC_CFG); if (ctrl->ctlr.dma_coherent) ATA_OUTL(ahci->r_mem, AHCI_FSL_LS1021A_AXICC, AHCI_FSL_REG_AXICC_CFG); } else { ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PHY1, AHCI_FSL_REG_PHY1_CFG); ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PHY2, AHCI_FSL_REG_PHY2_CFG); ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PHY3, AHCI_FSL_REG_PHY3_CFG); ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PTC, AHCI_FSL_REG_PTC_CFG); if (ctrl->ctlr.dma_coherent) ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_AXICC, AHCI_FSL_REG_AXICC_CFG); } } static int ahci_fsl_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, ahci_fsl_fdt_compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "NXP QorIQ Layerscape AHCI controller"); return (BUS_PROBE_DEFAULT); } static int ahci_fsl_fdt_attach(device_t dev) { struct ahci_fsl_fdt_controller *ctlr; struct ahci_controller *ahci; phandle_t node; clk_t clock; int ret; node = ofw_bus_get_node(dev); ctlr = device_get_softc(dev); ctlr->soc_type = ofw_bus_search_compatible(dev, ahci_fsl_fdt_compat_data)->ocd_data; ahci = &ctlr->ctlr; ahci->dev = dev; ahci->r_rid = 0; ahci->quirks = AHCI_Q_NOPMP; ahci->dma_coherent = OF_hasprop(node, "dma-coherent"); ret = clk_get_by_ofw_index(dev, node, 0, &clock); if (ret != 0) { device_printf(dev, "No clock found.\n"); return (ENXIO); } ret = clk_enable(clock); if (ret !=0) { device_printf(dev, "Could not enable clock.\n"); return (ENXIO); } if (OF_hasprop(node, "reg-names") && ofw_bus_find_string_index(node, "reg-names", "ahci", &ahci->r_rid)) { device_printf(dev, "Could not locate 'ahci' string in the " "'reg-names' property"); return (ENOENT); } ahci->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ahci->r_rid, RF_ACTIVE); if (!ahci->r_mem) { device_printf(dev, "Could not allocate resources for controller\n"); return (ENOMEM); } ret = ofw_bus_find_string_index(node, "reg-names", "sata-ecc", &ctlr->r_ecc_rid); if (ret == 0) { ctlr->r_ecc = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_ecc_rid, RF_ACTIVE| RF_SHAREABLE); if (!ctlr->r_ecc) { device_printf(dev, "Could not allocate resources for controller\n"); ret = ENOMEM; goto err_free_mem; } } else if (ret != ENOENT) { device_printf(dev, "Could not locate 'sata-ecc' string in " "the 'reg-names' property"); goto err_free_mem; } ret = ahci_fsl_fdt_ecc_init(ctlr); if (ret != 0) { device_printf(dev, "Could not initialize 'ecc' registers"); goto err_free_mem; } /* Setup controller defaults. */ ahci->numirqs = 1; ahci_fsl_fdt_phy_init(ctlr); /* Reset controller. */ ret = ahci_ctlr_reset(dev); if (ret) goto err_free_mem; ret = ahci_attach(dev); if (ret) { device_printf(dev, "Could not initialize AHCI, with error: %d\n", ret); goto err_free_ecc; } return (0); err_free_mem: bus_free_resource(dev, SYS_RES_MEMORY, ahci->r_mem); err_free_ecc: if (ctlr->r_ecc) bus_free_resource(dev, SYS_RES_MEMORY, ctlr->r_ecc); return (ret); } static int ahci_fsl_fdt_detach(device_t dev) { struct ahci_fsl_fdt_controller *ctlr; ctlr = device_get_softc(dev); if (ctlr->r_ecc) bus_free_resource(dev, SYS_RES_MEMORY, ctlr->r_ecc); return ahci_detach(dev); } static const device_method_t ahci_fsl_fdt_methods[] = { DEVMETHOD(device_probe, ahci_fsl_fdt_probe), DEVMETHOD(device_attach, ahci_fsl_fdt_attach), DEVMETHOD(device_detach, ahci_fsl_fdt_detach), DEVMETHOD(bus_alloc_resource, ahci_alloc_resource), DEVMETHOD(bus_release_resource, ahci_release_resource), DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr, ahci_teardown_intr), DEVMETHOD(bus_print_child, ahci_print_child), DEVMETHOD(bus_child_location, ahci_child_location), DEVMETHOD(bus_get_dma_tag, ahci_get_dma_tag), DEVMETHOD_END }; static driver_t ahci_fsl_fdt_driver = { "ahci", ahci_fsl_fdt_methods, sizeof(struct ahci_fsl_fdt_controller), }; DRIVER_MODULE(ahci_fsl, simplebus, ahci_fsl_fdt_driver, NULL, NULL); DRIVER_MODULE(ahci_fsl, ofwbus, ahci_fsl_fdt_driver, NULL, NULL); diff --git a/sys/dev/cadence/if_cgem.c b/sys/dev/cadence/if_cgem.c index d89e91ad1c5b..9d2b1d71883e 100644 --- a/sys/dev/cadence/if_cgem.c +++ b/sys/dev/cadence/if_cgem.c @@ -1,2016 +1,2016 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2012-2014 Thomas Skibo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * A network interface driver for Cadence GEM Gigabit Ethernet * interface such as the one used in Xilinx Zynq-7000 SoC. * * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. * (v1.4) November 16, 2012. Xilinx doc UG585. GEM is covered in Ch. 16 * and register definitions are in appendix B.18. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #include #include #include #include #include #include #include #include -#include +#include #if BUS_SPACE_MAXADDR > BUS_SPACE_MAXADDR_32BIT #define CGEM64 #endif #include #include "miibus_if.h" #define IF_CGEM_NAME "cgem" #define CGEM_NUM_RX_DESCS 512 /* size of receive descriptor ring */ #define CGEM_NUM_TX_DESCS 512 /* size of transmit descriptor ring */ /* Default for sysctl rxbufs. Must be < CGEM_NUM_RX_DESCS of course. */ #define DEFAULT_NUM_RX_BUFS 256 /* number of receive bufs to queue. */ #define TX_MAX_DMA_SEGS 8 /* maximum segs in a tx mbuf dma */ #define CGEM_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \ CSUM_TCP_IPV6 | CSUM_UDP_IPV6) #define HWQUIRK_NONE 0 #define HWQUIRK_NEEDNULLQS 1 #define HWQUIRK_RXHANGWAR 2 static struct ofw_compat_data compat_data[] = { { "cdns,zynq-gem", HWQUIRK_RXHANGWAR }, /* Deprecated */ { "cdns,zynqmp-gem", HWQUIRK_NEEDNULLQS }, /* Deprecated */ { "xlnx,zynq-gem", HWQUIRK_RXHANGWAR }, { "xlnx,zynqmp-gem", HWQUIRK_NEEDNULLQS }, { "microchip,mpfs-mss-gem", HWQUIRK_NEEDNULLQS }, { "sifive,fu540-c000-gem", HWQUIRK_NONE }, { "sifive,fu740-c000-gem", HWQUIRK_NONE }, { NULL, 0 } }; struct cgem_softc { if_t ifp; struct mtx sc_mtx; device_t dev; device_t miibus; u_int mii_media_active; /* last active media */ int if_old_flags; struct resource *mem_res; struct resource *irq_res; void *intrhand; struct callout tick_ch; uint32_t net_ctl_shadow; uint32_t net_cfg_shadow; clk_t clk_pclk; clk_t clk_hclk; clk_t clk_txclk; clk_t clk_rxclk; clk_t clk_tsuclk; int neednullqs; int phy_contype; bus_dma_tag_t desc_dma_tag; bus_dma_tag_t mbuf_dma_tag; /* receive descriptor ring */ struct cgem_rx_desc *rxring; bus_addr_t rxring_physaddr; struct mbuf *rxring_m[CGEM_NUM_RX_DESCS]; bus_dmamap_t rxring_m_dmamap[CGEM_NUM_RX_DESCS]; int rxring_hd_ptr; /* where to put rcv bufs */ int rxring_tl_ptr; /* where to get receives */ int rxring_queued; /* how many rcv bufs queued */ bus_dmamap_t rxring_dma_map; int rxbufs; /* tunable number rcv bufs */ int rxhangwar; /* rx hang work-around */ u_int rxoverruns; /* rx overruns */ u_int rxnobufs; /* rx buf ring empty events */ u_int rxdmamapfails; /* rx dmamap failures */ uint32_t rx_frames_prev; /* transmit descriptor ring */ struct cgem_tx_desc *txring; bus_addr_t txring_physaddr; struct mbuf *txring_m[CGEM_NUM_TX_DESCS]; bus_dmamap_t txring_m_dmamap[CGEM_NUM_TX_DESCS]; int txring_hd_ptr; /* where to put next xmits */ int txring_tl_ptr; /* next xmit mbuf to free */ int txring_queued; /* num xmits segs queued */ u_int txfull; /* tx ring full events */ u_int txdefrags; /* tx calls to m_defrag() */ u_int txdefragfails; /* tx m_defrag() failures */ u_int txdmamapfails; /* tx dmamap failures */ /* null descriptor rings */ void *null_qs; bus_addr_t null_qs_physaddr; /* hardware provided statistics */ struct cgem_hw_stats { uint64_t tx_bytes; uint32_t tx_frames; uint32_t tx_frames_bcast; uint32_t tx_frames_multi; uint32_t tx_frames_pause; uint32_t tx_frames_64b; uint32_t tx_frames_65to127b; uint32_t tx_frames_128to255b; uint32_t tx_frames_256to511b; uint32_t tx_frames_512to1023b; uint32_t tx_frames_1024to1536b; uint32_t tx_under_runs; uint32_t tx_single_collisn; uint32_t tx_multi_collisn; uint32_t tx_excsv_collisn; uint32_t tx_late_collisn; uint32_t tx_deferred_frames; uint32_t tx_carrier_sense_errs; uint64_t rx_bytes; uint32_t rx_frames; uint32_t rx_frames_bcast; uint32_t rx_frames_multi; uint32_t rx_frames_pause; uint32_t rx_frames_64b; uint32_t rx_frames_65to127b; uint32_t rx_frames_128to255b; uint32_t rx_frames_256to511b; uint32_t rx_frames_512to1023b; uint32_t rx_frames_1024to1536b; uint32_t rx_frames_undersize; uint32_t rx_frames_oversize; uint32_t rx_frames_jabber; uint32_t rx_frames_fcs_errs; uint32_t rx_frames_length_errs; uint32_t rx_symbol_errs; uint32_t rx_align_errs; uint32_t rx_resource_errs; uint32_t rx_overrun_errs; uint32_t rx_ip_hdr_csum_errs; uint32_t rx_tcp_csum_errs; uint32_t rx_udp_csum_errs; } stats; }; #define RD4(sc, off) (bus_read_4((sc)->mem_res, (off))) #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val))) #define BARRIER(sc, off, len, flags) \ (bus_barrier((sc)->mem_res, (off), (len), (flags)) #define CGEM_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define CGEM_LOCK_INIT(sc) mtx_init(&(sc)->sc_mtx, \ device_get_nameunit((sc)->dev), MTX_NETWORK_LOCK, MTX_DEF) #define CGEM_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx) #define CGEM_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) /* Allow platforms to optionally provide a way to set the reference clock. */ int cgem_set_ref_clk(int unit, int frequency); static int cgem_probe(device_t dev); static int cgem_attach(device_t dev); static int cgem_detach(device_t dev); static void cgem_tick(void *); static void cgem_intr(void *); static void cgem_mediachange(struct cgem_softc *, struct mii_data *); static void cgem_get_mac(struct cgem_softc *sc, u_char eaddr[]) { int i; uint32_t rnd; /* See if boot loader gave us a MAC address already. */ for (i = 0; i < 4; i++) { uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i)); uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff; if (low != 0 || high != 0) { eaddr[0] = low & 0xff; eaddr[1] = (low >> 8) & 0xff; eaddr[2] = (low >> 16) & 0xff; eaddr[3] = (low >> 24) & 0xff; eaddr[4] = high & 0xff; eaddr[5] = (high >> 8) & 0xff; break; } } /* No MAC from boot loader? Assign a random one. */ if (i == 4) { rnd = arc4random(); eaddr[0] = 'b'; eaddr[1] = 's'; eaddr[2] = 'd'; eaddr[3] = (rnd >> 16) & 0xff; eaddr[4] = (rnd >> 8) & 0xff; eaddr[5] = rnd & 0xff; device_printf(sc->dev, "no mac address found, assigning " "random: %02x:%02x:%02x:%02x:%02x:%02x\n", eaddr[0], eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]); } /* Move address to first slot and zero out the rest. */ WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]); for (i = 1; i < 4; i++) { WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0); WR4(sc, CGEM_SPEC_ADDR_HI(i), 0); } } /* * cgem_mac_hash(): map 48-bit address to a 6-bit hash. The 6-bit hash * corresponds to a bit in a 64-bit hash register. Setting that bit in the * hash register enables reception of all frames with a destination address * that hashes to that 6-bit value. * * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech * Reference Manual. Bits 0-5 in the hash are the exclusive-or of * every sixth bit in the destination address. */ static int cgem_mac_hash(u_char eaddr[]) { int hash; int i, j; hash = 0; for (i = 0; i < 6; i++) for (j = i; j < 48; j += 6) if ((eaddr[j >> 3] & (1 << (j & 7))) != 0) hash ^= (1 << i); return hash; } static u_int cgem_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint32_t *hashes = arg; int index; index = cgem_mac_hash(LLADDR(sdl)); if (index > 31) hashes[0] |= (1U << (index - 32)); else hashes[1] |= (1U << index); return (1); } /* * After any change in rx flags or multi-cast addresses, set up hash registers * and net config register bits. */ static void cgem_rx_filter(struct cgem_softc *sc) { if_t ifp = sc->ifp; uint32_t hashes[2] = { 0, 0 }; sc->net_cfg_shadow &= ~(CGEM_NET_CFG_MULTI_HASH_EN | CGEM_NET_CFG_NO_BCAST | CGEM_NET_CFG_COPY_ALL); if ((if_getflags(ifp) & IFF_PROMISC) != 0) sc->net_cfg_shadow |= CGEM_NET_CFG_COPY_ALL; else { if ((if_getflags(ifp) & IFF_BROADCAST) == 0) sc->net_cfg_shadow |= CGEM_NET_CFG_NO_BCAST; if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) { hashes[0] = 0xffffffff; hashes[1] = 0xffffffff; } else if_foreach_llmaddr(ifp, cgem_hash_maddr, hashes); if (hashes[0] != 0 || hashes[1] != 0) sc->net_cfg_shadow |= CGEM_NET_CFG_MULTI_HASH_EN; } WR4(sc, CGEM_HASH_TOP, hashes[0]); WR4(sc, CGEM_HASH_BOT, hashes[1]); WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); } /* For bus_dmamap_load() callback. */ static void cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (nsegs != 1 || error != 0) return; *(bus_addr_t *)arg = segs[0].ds_addr; } /* Set up null queues for priority queues we actually can't disable. */ static void cgem_null_qs(struct cgem_softc *sc) { struct cgem_rx_desc *rx_desc; struct cgem_tx_desc *tx_desc; uint32_t queue_mask; int n; /* Read design config register 6 to determine number of queues. */ queue_mask = (RD4(sc, CGEM_DESIGN_CFG6) & CGEM_DESIGN_CFG6_DMA_PRIO_Q_MASK) >> 1; if (queue_mask == 0) return; /* Create empty RX queue and empty TX buf queues. */ memset(sc->null_qs, 0, sizeof(struct cgem_rx_desc) + sizeof(struct cgem_tx_desc)); rx_desc = sc->null_qs; rx_desc->addr = CGEM_RXDESC_OWN | CGEM_RXDESC_WRAP; tx_desc = (struct cgem_tx_desc *)(rx_desc + 1); tx_desc->ctl = CGEM_TXDESC_USED | CGEM_TXDESC_WRAP; /* Point all valid ring base pointers to the null queues. */ for (n = 1; (queue_mask & 1) != 0; n++, queue_mask >>= 1) { WR4(sc, CGEM_RX_QN_BAR(n), sc->null_qs_physaddr); WR4(sc, CGEM_TX_QN_BAR(n), sc->null_qs_physaddr + sizeof(struct cgem_rx_desc)); } } /* Create DMA'able descriptor rings. */ static int cgem_setup_descs(struct cgem_softc *sc) { int i, err; int desc_rings_size = CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc) + CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc); if (sc->neednullqs) desc_rings_size += sizeof(struct cgem_rx_desc) + sizeof(struct cgem_tx_desc); sc->txring = NULL; sc->rxring = NULL; /* Allocate non-cached DMA space for RX and TX descriptors. */ err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, #ifdef CGEM64 1ULL << 32, /* Do not cross a 4G boundary. */ #else 0, #endif BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, desc_rings_size, 1, desc_rings_size, 0, busdma_lock_mutex, &sc->sc_mtx, &sc->desc_dma_tag); if (err) return (err); /* Set up a bus_dma_tag for mbufs. */ err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, TX_MAX_DMA_SEGS, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx, &sc->mbuf_dma_tag); if (err) return (err); /* * Allocate DMA memory. We allocate transmit, receive and null * descriptor queues all at once because the hardware only provides * one register for the upper 32 bits of rx and tx descriptor queues * hardware addresses. */ err = bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->rxring, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rxring_dma_map); if (err) return (err); /* Load descriptor DMA memory. */ err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map, (void *)sc->rxring, desc_rings_size, cgem_getaddr, &sc->rxring_physaddr, BUS_DMA_NOWAIT); if (err) return (err); /* Initialize RX descriptors. */ for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { sc->rxring[i].addr = CGEM_RXDESC_OWN; sc->rxring[i].ctl = 0; sc->rxring_m[i] = NULL; sc->rxring_m_dmamap[i] = NULL; } sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; sc->rxring_hd_ptr = 0; sc->rxring_tl_ptr = 0; sc->rxring_queued = 0; sc->txring = (struct cgem_tx_desc *)(sc->rxring + CGEM_NUM_RX_DESCS); sc->txring_physaddr = sc->rxring_physaddr + CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc); /* Initialize TX descriptor ring. */ for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { sc->txring[i].addr = 0; sc->txring[i].ctl = CGEM_TXDESC_USED; sc->txring_m[i] = NULL; sc->txring_m_dmamap[i] = NULL; } sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; sc->txring_hd_ptr = 0; sc->txring_tl_ptr = 0; sc->txring_queued = 0; if (sc->neednullqs) { sc->null_qs = (void *)(sc->txring + CGEM_NUM_TX_DESCS); sc->null_qs_physaddr = sc->txring_physaddr + CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc); cgem_null_qs(sc); } return (0); } /* Fill receive descriptor ring with mbufs. */ static void cgem_fill_rqueue(struct cgem_softc *sc) { struct mbuf *m = NULL; bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; int nsegs; CGEM_ASSERT_LOCKED(sc); while (sc->rxring_queued < sc->rxbufs) { /* Get a cluster mbuf. */ m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) break; m->m_len = MCLBYTES; m->m_pkthdr.len = MCLBYTES; m->m_pkthdr.rcvif = sc->ifp; /* Load map and plug in physical address. */ if (bus_dmamap_create(sc->mbuf_dma_tag, 0, &sc->rxring_m_dmamap[sc->rxring_hd_ptr])) { sc->rxdmamapfails++; m_free(m); break; } if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, sc->rxring_m_dmamap[sc->rxring_hd_ptr], m, segs, &nsegs, BUS_DMA_NOWAIT)) { sc->rxdmamapfails++; bus_dmamap_destroy(sc->mbuf_dma_tag, sc->rxring_m_dmamap[sc->rxring_hd_ptr]); sc->rxring_m_dmamap[sc->rxring_hd_ptr] = NULL; m_free(m); break; } sc->rxring_m[sc->rxring_hd_ptr] = m; /* Sync cache with receive buffer. */ bus_dmamap_sync(sc->mbuf_dma_tag, sc->rxring_m_dmamap[sc->rxring_hd_ptr], BUS_DMASYNC_PREREAD); /* Write rx descriptor and increment head pointer. */ sc->rxring[sc->rxring_hd_ptr].ctl = 0; #ifdef CGEM64 sc->rxring[sc->rxring_hd_ptr].addrhi = segs[0].ds_addr >> 32; #endif if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) { sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr | CGEM_RXDESC_WRAP; sc->rxring_hd_ptr = 0; } else sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr; sc->rxring_queued++; } } /* Pull received packets off of receive descriptor ring. */ static void cgem_recv(struct cgem_softc *sc) { if_t ifp = sc->ifp; struct mbuf *m, *m_hd, **m_tl; uint32_t ctl; CGEM_ASSERT_LOCKED(sc); /* Pick up all packets in which the OWN bit is set. */ m_hd = NULL; m_tl = &m_hd; while (sc->rxring_queued > 0 && (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) { ctl = sc->rxring[sc->rxring_tl_ptr].ctl; /* Grab filled mbuf. */ m = sc->rxring_m[sc->rxring_tl_ptr]; sc->rxring_m[sc->rxring_tl_ptr] = NULL; /* Sync cache with receive buffer. */ bus_dmamap_sync(sc->mbuf_dma_tag, sc->rxring_m_dmamap[sc->rxring_tl_ptr], BUS_DMASYNC_POSTREAD); /* Unload and destroy dmamap. */ bus_dmamap_unload(sc->mbuf_dma_tag, sc->rxring_m_dmamap[sc->rxring_tl_ptr]); bus_dmamap_destroy(sc->mbuf_dma_tag, sc->rxring_m_dmamap[sc->rxring_tl_ptr]); sc->rxring_m_dmamap[sc->rxring_tl_ptr] = NULL; /* Increment tail pointer. */ if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS) sc->rxring_tl_ptr = 0; sc->rxring_queued--; /* * Check FCS and make sure entire packet landed in one mbuf * cluster (which is much bigger than the largest ethernet * packet). */ if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 || (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) != (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) { /* discard. */ m_free(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); continue; } /* Ready it to hand off to upper layers. */ m->m_data += ETHER_ALIGN; m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len; /* * Are we using hardware checksumming? Check the status in the * receive descriptor. */ if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { /* TCP or UDP checks out, IP checks out too. */ if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == CGEM_RXDESC_CKSUM_STAT_TCP_GOOD || (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == CGEM_RXDESC_CKSUM_STAT_IP_GOOD) { /* Only IP checks out. */ m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; m->m_pkthdr.csum_data = 0xffff; } } /* Queue it up for delivery below. */ *m_tl = m; m_tl = &m->m_next; } /* Replenish receive buffers. */ cgem_fill_rqueue(sc); /* Unlock and send up packets. */ CGEM_UNLOCK(sc); while (m_hd != NULL) { m = m_hd; m_hd = m_hd->m_next; m->m_next = NULL; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); if_input(ifp, m); } CGEM_LOCK(sc); } /* Find completed transmits and free their mbufs. */ static void cgem_clean_tx(struct cgem_softc *sc) { struct mbuf *m; uint32_t ctl; CGEM_ASSERT_LOCKED(sc); /* free up finished transmits. */ while (sc->txring_queued > 0 && ((ctl = sc->txring[sc->txring_tl_ptr].ctl) & CGEM_TXDESC_USED) != 0) { /* Sync cache. */ bus_dmamap_sync(sc->mbuf_dma_tag, sc->txring_m_dmamap[sc->txring_tl_ptr], BUS_DMASYNC_POSTWRITE); /* Unload and destroy DMA map. */ bus_dmamap_unload(sc->mbuf_dma_tag, sc->txring_m_dmamap[sc->txring_tl_ptr]); bus_dmamap_destroy(sc->mbuf_dma_tag, sc->txring_m_dmamap[sc->txring_tl_ptr]); sc->txring_m_dmamap[sc->txring_tl_ptr] = NULL; /* Free up the mbuf. */ m = sc->txring_m[sc->txring_tl_ptr]; sc->txring_m[sc->txring_tl_ptr] = NULL; m_freem(m); /* Check the status. */ if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) { /* Serious bus error. log to console. */ #ifdef CGEM64 device_printf(sc->dev, "cgem_clean_tx: AHB error, addr=0x%x%08x\n", sc->txring[sc->txring_tl_ptr].addrhi, sc->txring[sc->txring_tl_ptr].addr); #else device_printf(sc->dev, "cgem_clean_tx: AHB error, addr=0x%x\n", sc->txring[sc->txring_tl_ptr].addr); #endif } else if ((ctl & (CGEM_TXDESC_RETRY_ERR | CGEM_TXDESC_LATE_COLL)) != 0) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); } else if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); /* * If the packet spanned more than one tx descriptor, skip * descriptors until we find the end so that only * start-of-frame descriptors are processed. */ while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) { if ((ctl & CGEM_TXDESC_WRAP) != 0) sc->txring_tl_ptr = 0; else sc->txring_tl_ptr++; sc->txring_queued--; ctl = sc->txring[sc->txring_tl_ptr].ctl; sc->txring[sc->txring_tl_ptr].ctl = ctl | CGEM_TXDESC_USED; } /* Next descriptor. */ if ((ctl & CGEM_TXDESC_WRAP) != 0) sc->txring_tl_ptr = 0; else sc->txring_tl_ptr++; sc->txring_queued--; if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE); } } /* Start transmits. */ static void cgem_start_locked(if_t ifp) { struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); struct mbuf *m; bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; uint32_t ctl; int i, nsegs, wrap, err; CGEM_ASSERT_LOCKED(sc); if ((if_getdrvflags(ifp) & IFF_DRV_OACTIVE) != 0) return; for (;;) { /* Check that there is room in the descriptor ring. */ if (sc->txring_queued >= CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) { /* Try to make room. */ cgem_clean_tx(sc); /* Still no room? */ if (sc->txring_queued >= CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) { if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); sc->txfull++; break; } } /* Grab next transmit packet. */ m = if_dequeue(ifp); if (m == NULL) break; /* Create and load DMA map. */ if (bus_dmamap_create(sc->mbuf_dma_tag, 0, &sc->txring_m_dmamap[sc->txring_hd_ptr])) { m_freem(m); sc->txdmamapfails++; continue; } err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs, &nsegs, BUS_DMA_NOWAIT); if (err == EFBIG) { /* Too many segments! defrag and try again. */ struct mbuf *m2 = m_defrag(m, M_NOWAIT); if (m2 == NULL) { sc->txdefragfails++; m_freem(m); bus_dmamap_destroy(sc->mbuf_dma_tag, sc->txring_m_dmamap[sc->txring_hd_ptr]); sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL; continue; } m = m2; err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs, &nsegs, BUS_DMA_NOWAIT); sc->txdefrags++; } if (err) { /* Give up. */ m_freem(m); bus_dmamap_destroy(sc->mbuf_dma_tag, sc->txring_m_dmamap[sc->txring_hd_ptr]); sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL; sc->txdmamapfails++; continue; } sc->txring_m[sc->txring_hd_ptr] = m; /* Sync tx buffer with cache. */ bus_dmamap_sync(sc->mbuf_dma_tag, sc->txring_m_dmamap[sc->txring_hd_ptr], BUS_DMASYNC_PREWRITE); /* Set wrap flag if next packet might run off end of ring. */ wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >= CGEM_NUM_TX_DESCS; /* * Fill in the TX descriptors back to front so that USED bit in * first descriptor is cleared last. */ for (i = nsegs - 1; i >= 0; i--) { /* Descriptor address. */ sc->txring[sc->txring_hd_ptr + i].addr = segs[i].ds_addr; #ifdef CGEM64 sc->txring[sc->txring_hd_ptr + i].addrhi = segs[i].ds_addr >> 32; #endif /* Descriptor control word. */ ctl = segs[i].ds_len; if (i == nsegs - 1) { ctl |= CGEM_TXDESC_LAST_BUF; if (wrap) ctl |= CGEM_TXDESC_WRAP; } sc->txring[sc->txring_hd_ptr + i].ctl = ctl; if (i != 0) sc->txring_m[sc->txring_hd_ptr + i] = NULL; } if (wrap) sc->txring_hd_ptr = 0; else sc->txring_hd_ptr += nsegs; sc->txring_queued += nsegs; /* Kick the transmitter. */ WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow | CGEM_NET_CTRL_START_TX); /* If there is a BPF listener, bounce a copy to him. */ ETHER_BPF_MTAP(ifp, m); } } static void cgem_start(if_t ifp) { struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); CGEM_LOCK(sc); cgem_start_locked(ifp); CGEM_UNLOCK(sc); } static void cgem_poll_hw_stats(struct cgem_softc *sc) { uint32_t n; CGEM_ASSERT_LOCKED(sc); sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT); sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32; sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX); sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX); sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX); sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX); sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX); sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX); sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX); sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX); sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX); sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX); sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS); n = RD4(sc, CGEM_SINGLE_COLL_FRAMES); sc->stats.tx_single_collisn += n; if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); n = RD4(sc, CGEM_MULTI_COLL_FRAMES); sc->stats.tx_multi_collisn += n; if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); n = RD4(sc, CGEM_EXCESSIVE_COLL_FRAMES); sc->stats.tx_excsv_collisn += n; if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); n = RD4(sc, CGEM_LATE_COLL); sc->stats.tx_late_collisn += n; if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES); sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS); sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT); sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32; sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX); sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX); sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX); sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX); sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX); sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX); sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX); sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX); sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX); sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX); sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX); sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX); sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX); sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS); sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS); sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS); sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS); sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS); sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS); sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS); sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS); sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS); } static void cgem_tick(void *arg) { struct cgem_softc *sc = (struct cgem_softc *)arg; struct mii_data *mii; CGEM_ASSERT_LOCKED(sc); /* Poll the phy. */ if (sc->miibus != NULL) { mii = device_get_softc(sc->miibus); mii_tick(mii); } /* Poll statistics registers. */ cgem_poll_hw_stats(sc); /* Check for receiver hang. */ if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) { /* * Reset receiver logic by toggling RX_EN bit. 1usec * delay is necessary especially when operating at 100mbps * and 10mbps speeds. */ WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow & ~CGEM_NET_CTRL_RX_EN); DELAY(1); WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); } sc->rx_frames_prev = sc->stats.rx_frames; /* Next callout in one second. */ callout_reset(&sc->tick_ch, hz, cgem_tick, sc); } /* Interrupt handler. */ static void cgem_intr(void *arg) { struct cgem_softc *sc = (struct cgem_softc *)arg; if_t ifp = sc->ifp; uint32_t istatus; CGEM_LOCK(sc); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { CGEM_UNLOCK(sc); return; } /* Read interrupt status and immediately clear the bits. */ istatus = RD4(sc, CGEM_INTR_STAT); WR4(sc, CGEM_INTR_STAT, istatus); /* Packets received. */ if ((istatus & CGEM_INTR_RX_COMPLETE) != 0) cgem_recv(sc); /* Free up any completed transmit buffers. */ cgem_clean_tx(sc); /* Hresp not ok. Something is very bad with DMA. Try to clear. */ if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) { device_printf(sc->dev, "cgem_intr: hresp not okay! rx_status=0x%x\n", RD4(sc, CGEM_RX_STAT)); WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK); } /* Receiver overrun. */ if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) { /* Clear status bit. */ WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_OVERRUN); sc->rxoverruns++; } /* Receiver ran out of bufs. */ if ((istatus & CGEM_INTR_RX_USED_READ) != 0) { WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow | CGEM_NET_CTRL_FLUSH_DPRAM_PKT); cgem_fill_rqueue(sc); sc->rxnobufs++; } /* Restart transmitter if needed. */ if (!if_sendq_empty(ifp)) cgem_start_locked(ifp); CGEM_UNLOCK(sc); } /* Reset hardware. */ static void cgem_reset(struct cgem_softc *sc) { CGEM_ASSERT_LOCKED(sc); /* Determine data bus width from design configuration register. */ switch (RD4(sc, CGEM_DESIGN_CFG1) & CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_MASK) { case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_64: sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_64; break; case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_128: sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_128; break; default: sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_32; } WR4(sc, CGEM_NET_CTRL, 0); WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS); WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL); WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL); WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL); WR4(sc, CGEM_HASH_BOT, 0); WR4(sc, CGEM_HASH_TOP, 0); WR4(sc, CGEM_TX_QBAR, 0); /* manual says do this. */ WR4(sc, CGEM_RX_QBAR, 0); /* Get management port running even if interface is down. */ sc->net_cfg_shadow |= CGEM_NET_CFG_MDC_CLK_DIV_48; WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN; WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); } /* Bring up the hardware. */ static void cgem_config(struct cgem_softc *sc) { if_t ifp = sc->ifp; uint32_t dma_cfg; u_char *eaddr = if_getlladdr(ifp); CGEM_ASSERT_LOCKED(sc); /* Program Net Config Register. */ sc->net_cfg_shadow &= (CGEM_NET_CFG_MDC_CLK_DIV_MASK | CGEM_NET_CFG_DBUS_WIDTH_MASK); sc->net_cfg_shadow |= (CGEM_NET_CFG_FCS_REMOVE | CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) | CGEM_NET_CFG_GIGE_EN | CGEM_NET_CFG_1536RXEN | CGEM_NET_CFG_FULL_DUPLEX | CGEM_NET_CFG_SPEED100); /* Check connection type, enable SGMII bits if necessary. */ if (sc->phy_contype == MII_CONTYPE_SGMII) { sc->net_cfg_shadow |= CGEM_NET_CFG_SGMII_EN; sc->net_cfg_shadow |= CGEM_NET_CFG_PCS_SEL; } /* Enable receive checksum offloading? */ if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) sc->net_cfg_shadow |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); /* Program DMA Config Register. */ dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) | CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K | CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL | CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 | #ifdef CGEM64 CGEM_DMA_CFG_ADDR_BUS_64 | #endif CGEM_DMA_CFG_DISC_WHEN_NO_AHB; /* Enable transmit checksum offloading? */ if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN; WR4(sc, CGEM_DMA_CFG, dma_cfg); /* Write the rx and tx descriptor ring addresses to the QBAR regs. */ WR4(sc, CGEM_RX_QBAR, (uint32_t)sc->rxring_physaddr); WR4(sc, CGEM_TX_QBAR, (uint32_t)sc->txring_physaddr); #ifdef CGEM64 WR4(sc, CGEM_RX_QBAR_HI, (uint32_t)(sc->rxring_physaddr >> 32)); WR4(sc, CGEM_TX_QBAR_HI, (uint32_t)(sc->txring_physaddr >> 32)); #endif /* Enable rx and tx. */ sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN); WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); /* Set receive address in case it changed. */ WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]); /* Set up interrupts. */ WR4(sc, CGEM_INTR_EN, CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN | CGEM_INTR_TX_USED_READ | CGEM_INTR_RX_USED_READ | CGEM_INTR_HRESP_NOT_OK); } /* Turn on interface and load up receive ring with buffers. */ static void cgem_init_locked(struct cgem_softc *sc) { struct mii_data *mii; CGEM_ASSERT_LOCKED(sc); if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0) return; cgem_config(sc); cgem_fill_rqueue(sc); if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); if (sc->miibus != NULL) { mii = device_get_softc(sc->miibus); mii_mediachg(mii); } callout_reset(&sc->tick_ch, hz, cgem_tick, sc); } static void cgem_init(void *arg) { struct cgem_softc *sc = (struct cgem_softc *)arg; CGEM_LOCK(sc); cgem_init_locked(sc); CGEM_UNLOCK(sc); } /* Turn off interface. Free up any buffers in transmit or receive queues. */ static void cgem_stop(struct cgem_softc *sc) { int i; CGEM_ASSERT_LOCKED(sc); callout_stop(&sc->tick_ch); /* Shut down hardware. */ cgem_reset(sc); /* Clear out transmit queue. */ memset(sc->txring, 0, CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc)); for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { sc->txring[i].ctl = CGEM_TXDESC_USED; if (sc->txring_m[i]) { /* Unload and destroy dmamap. */ bus_dmamap_unload(sc->mbuf_dma_tag, sc->txring_m_dmamap[i]); bus_dmamap_destroy(sc->mbuf_dma_tag, sc->txring_m_dmamap[i]); sc->txring_m_dmamap[i] = NULL; m_freem(sc->txring_m[i]); sc->txring_m[i] = NULL; } } sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; sc->txring_hd_ptr = 0; sc->txring_tl_ptr = 0; sc->txring_queued = 0; /* Clear out receive queue. */ memset(sc->rxring, 0, CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc)); for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { sc->rxring[i].addr = CGEM_RXDESC_OWN; if (sc->rxring_m[i]) { /* Unload and destroy dmamap. */ bus_dmamap_unload(sc->mbuf_dma_tag, sc->rxring_m_dmamap[i]); bus_dmamap_destroy(sc->mbuf_dma_tag, sc->rxring_m_dmamap[i]); sc->rxring_m_dmamap[i] = NULL; m_freem(sc->rxring_m[i]); sc->rxring_m[i] = NULL; } } sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; sc->rxring_hd_ptr = 0; sc->rxring_tl_ptr = 0; sc->rxring_queued = 0; /* Force next statchg or linkchg to program net config register. */ sc->mii_media_active = 0; } static int cgem_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct cgem_softc *sc = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; int error = 0, mask; switch (cmd) { case SIOCSIFFLAGS: CGEM_LOCK(sc); if ((if_getflags(ifp) & IFF_UP) != 0) { if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { if (((if_getflags(ifp) ^ sc->if_old_flags) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { cgem_rx_filter(sc); } } else { cgem_init_locked(sc); } } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); cgem_stop(sc); } sc->if_old_flags = if_getflags(ifp); CGEM_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* Set up multi-cast filters. */ if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { CGEM_LOCK(sc); cgem_rx_filter(sc); CGEM_UNLOCK(sc); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (sc->miibus == NULL) return (ENXIO); mii = device_get_softc(sc->miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; case SIOCSIFCAP: CGEM_LOCK(sc); mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap; if ((mask & IFCAP_TXCSUM) != 0) { if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) { /* Turn on TX checksumming. */ if_setcapenablebit(ifp, IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6, 0); if_sethwassistbits(ifp, CGEM_CKSUM_ASSIST, 0); WR4(sc, CGEM_DMA_CFG, RD4(sc, CGEM_DMA_CFG) | CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); } else { /* Turn off TX checksumming. */ if_setcapenablebit(ifp, 0, IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6); if_sethwassistbits(ifp, 0, CGEM_CKSUM_ASSIST); WR4(sc, CGEM_DMA_CFG, RD4(sc, CGEM_DMA_CFG) & ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); } } if ((mask & IFCAP_RXCSUM) != 0) { if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) { /* Turn on RX checksumming. */ if_setcapenablebit(ifp, IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6, 0); sc->net_cfg_shadow |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); } else { /* Turn off RX checksumming. */ if_setcapenablebit(ifp, 0, IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6); sc->net_cfg_shadow &= ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); } } if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) == (IFCAP_RXCSUM | IFCAP_TXCSUM)) if_setcapenablebit(ifp, IFCAP_VLAN_HWCSUM, 0); else if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWCSUM); CGEM_UNLOCK(sc); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } /* MII bus support routines. */ static int cgem_ifmedia_upd(if_t ifp) { struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); struct mii_data *mii; struct mii_softc *miisc; int error = 0; mii = device_get_softc(sc->miibus); CGEM_LOCK(sc); if ((if_getflags(ifp) & IFF_UP) != 0) { LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); } CGEM_UNLOCK(sc); return (error); } static void cgem_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); struct mii_data *mii; mii = device_get_softc(sc->miibus); CGEM_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; CGEM_UNLOCK(sc); } static int cgem_miibus_readreg(device_t dev, int phy, int reg) { struct cgem_softc *sc = device_get_softc(dev); int tries, val; WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_READ | (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) | (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT)); /* Wait for completion. */ tries=0; while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) { DELAY(5); if (++tries > 200) { device_printf(dev, "phy read timeout: %d\n", reg); return (-1); } } val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK; if (reg == MII_EXTSR) /* * MAC does not support half-duplex at gig speeds. * Let mii(4) exclude the capability. */ val &= ~(EXTSR_1000XHDX | EXTSR_1000THDX); return (val); } static int cgem_miibus_writereg(device_t dev, int phy, int reg, int data) { struct cgem_softc *sc = device_get_softc(dev); int tries; WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_WRITE | (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) | (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) | (data & CGEM_PHY_MAINT_DATA_MASK)); /* Wait for completion. */ tries = 0; while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) { DELAY(5); if (++tries > 200) { device_printf(dev, "phy write timeout: %d\n", reg); return (-1); } } return (0); } static void cgem_miibus_statchg(device_t dev) { struct cgem_softc *sc = device_get_softc(dev); struct mii_data *mii = device_get_softc(sc->miibus); CGEM_ASSERT_LOCKED(sc); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID) && sc->mii_media_active != mii->mii_media_active) cgem_mediachange(sc, mii); } static void cgem_miibus_linkchg(device_t dev) { struct cgem_softc *sc = device_get_softc(dev); struct mii_data *mii = device_get_softc(sc->miibus); CGEM_ASSERT_LOCKED(sc); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID) && sc->mii_media_active != mii->mii_media_active) cgem_mediachange(sc, mii); } /* * Overridable weak symbol cgem_set_ref_clk(). This allows platforms to * provide a function to set the cgem's reference clock. */ static int __used cgem_default_set_ref_clk(int unit, int frequency) { return 0; } __weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk); /* Call to set reference clock and network config bits according to media. */ static void cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii) { int ref_clk_freq; CGEM_ASSERT_LOCKED(sc); /* Update hardware to reflect media. */ sc->net_cfg_shadow &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN | CGEM_NET_CFG_FULL_DUPLEX); switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_1000_T: sc->net_cfg_shadow |= (CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN); ref_clk_freq = 125000000; break; case IFM_100_TX: sc->net_cfg_shadow |= CGEM_NET_CFG_SPEED100; ref_clk_freq = 25000000; break; default: ref_clk_freq = 2500000; } if ((mii->mii_media_active & IFM_FDX) != 0) sc->net_cfg_shadow |= CGEM_NET_CFG_FULL_DUPLEX; WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); if (sc->clk_pclk != NULL) { CGEM_UNLOCK(sc); if (clk_set_freq(sc->clk_pclk, ref_clk_freq, 0)) device_printf(sc->dev, "could not set ref clk to %d\n", ref_clk_freq); CGEM_LOCK(sc); } sc->mii_media_active = mii->mii_media_active; } static void cgem_add_sysctls(device_t dev) { struct cgem_softc *sc = device_get_softc(dev); struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child; struct sysctl_oid *tree; ctx = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxbufs", CTLFLAG_RW, &sc->rxbufs, 0, "Number receive buffers to provide"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxhangwar", CTLFLAG_RW, &sc->rxhangwar, 0, "Enable receive hang work-around"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxoverruns", CTLFLAG_RD, &sc->rxoverruns, 0, "Receive overrun events"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxnobufs", CTLFLAG_RD, &sc->rxnobufs, 0, "Receive buf queue empty events"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxdmamapfails", CTLFLAG_RD, &sc->rxdmamapfails, 0, "Receive DMA map failures"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txfull", CTLFLAG_RD, &sc->txfull, 0, "Transmit ring full events"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdmamapfails", CTLFLAG_RD, &sc->txdmamapfails, 0, "Transmit DMA map failures"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefrags", CTLFLAG_RD, &sc->txdefrags, 0, "Transmit m_defrag() calls"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefragfails", CTLFLAG_RD, &sc->txdefragfails, 0, "Transmit m_defrag() failures"); tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "GEM statistics"); child = SYSCTL_CHILDREN(tree); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_bytes", CTLFLAG_RD, &sc->stats.tx_bytes, "Total bytes transmitted"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames", CTLFLAG_RD, &sc->stats.tx_frames, 0, "Total frames transmitted"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_bcast", CTLFLAG_RD, &sc->stats.tx_frames_bcast, 0, "Number broadcast frames transmitted"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_multi", CTLFLAG_RD, &sc->stats.tx_frames_multi, 0, "Number multicast frames transmitted"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_pause", CTLFLAG_RD, &sc->stats.tx_frames_pause, 0, "Number pause frames transmitted"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_64b", CTLFLAG_RD, &sc->stats.tx_frames_64b, 0, "Number frames transmitted of size 64 bytes or less"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_65to127b", CTLFLAG_RD, &sc->stats.tx_frames_65to127b, 0, "Number frames transmitted of size 65-127 bytes"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_128to255b", CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0, "Number frames transmitted of size 128-255 bytes"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_256to511b", CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0, "Number frames transmitted of size 256-511 bytes"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_512to1023b", CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0, "Number frames transmitted of size 512-1023 bytes"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_1024to1536b", CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0, "Number frames transmitted of size 1024-1536 bytes"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_under_runs", CTLFLAG_RD, &sc->stats.tx_under_runs, 0, "Number transmit under-run events"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_single_collisn", CTLFLAG_RD, &sc->stats.tx_single_collisn, 0, "Number single-collision transmit frames"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_multi_collisn", CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0, "Number multi-collision transmit frames"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_excsv_collisn", CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0, "Number excessive collision transmit frames"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_late_collisn", CTLFLAG_RD, &sc->stats.tx_late_collisn, 0, "Number late-collision transmit frames"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_deferred_frames", CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0, "Number deferred transmit frames"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_carrier_sense_errs", CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0, "Number carrier sense errors on transmit"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_bytes", CTLFLAG_RD, &sc->stats.rx_bytes, "Total bytes received"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames", CTLFLAG_RD, &sc->stats.rx_frames, 0, "Total frames received"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_bcast", CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0, "Number broadcast frames received"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_multi", CTLFLAG_RD, &sc->stats.rx_frames_multi, 0, "Number multicast frames received"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_pause", CTLFLAG_RD, &sc->stats.rx_frames_pause, 0, "Number pause frames received"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_64b", CTLFLAG_RD, &sc->stats.rx_frames_64b, 0, "Number frames received of size 64 bytes or less"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_65to127b", CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0, "Number frames received of size 65-127 bytes"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_128to255b", CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0, "Number frames received of size 128-255 bytes"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_256to511b", CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0, "Number frames received of size 256-511 bytes"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_512to1023b", CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0, "Number frames received of size 512-1023 bytes"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_1024to1536b", CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0, "Number frames received of size 1024-1536 bytes"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_undersize", CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0, "Number undersize frames received"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_oversize", CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0, "Number oversize frames received"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_jabber", CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0, "Number jabber frames received"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_fcs_errs", CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0, "Number frames received with FCS errors"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_length_errs", CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0, "Number frames received with length errors"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_symbol_errs", CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0, "Number receive symbol errors"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_align_errs", CTLFLAG_RD, &sc->stats.rx_align_errs, 0, "Number receive alignment errors"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_resource_errs", CTLFLAG_RD, &sc->stats.rx_resource_errs, 0, "Number frames received when no rx buffer available"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overrun_errs", CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0, "Number frames received but not copied due to receive overrun"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_ip_hdr_csum_errs", CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0, "Number frames received with IP header checksum errors"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_tcp_csum_errs", CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0, "Number frames received with TCP checksum errors"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_udp_csum_errs", CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0, "Number frames received with UDP checksum errors"); } static int cgem_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_str == NULL) return (ENXIO); device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface"); return (0); } static int cgem_attach(device_t dev) { struct cgem_softc *sc = device_get_softc(dev); if_t ifp = NULL; int rid, err; u_char eaddr[ETHER_ADDR_LEN]; int hwquirks; phandle_t node; sc->dev = dev; CGEM_LOCK_INIT(sc); /* Key off of compatible string and set hardware-specific options. */ hwquirks = ofw_bus_search_compatible(dev, compat_data)->ocd_data; if ((hwquirks & HWQUIRK_NEEDNULLQS) != 0) sc->neednullqs = 1; if ((hwquirks & HWQUIRK_RXHANGWAR) != 0) sc->rxhangwar = 1; /* * Both pclk and hclk are mandatory but we don't have a proper * clock driver for Zynq so don't make it fatal if we can't * get them. */ if (clk_get_by_ofw_name(dev, 0, "pclk", &sc->clk_pclk) != 0) device_printf(dev, "could not retrieve pclk.\n"); else { if (clk_enable(sc->clk_pclk) != 0) device_printf(dev, "could not enable pclk.\n"); } if (clk_get_by_ofw_name(dev, 0, "hclk", &sc->clk_hclk) != 0) device_printf(dev, "could not retrieve hclk.\n"); else { if (clk_enable(sc->clk_hclk) != 0) device_printf(dev, "could not enable hclk.\n"); } /* Optional clocks */ if (clk_get_by_ofw_name(dev, 0, "tx_clk", &sc->clk_txclk) == 0) { if (clk_enable(sc->clk_txclk) != 0) { device_printf(dev, "could not enable tx_clk.\n"); err = ENXIO; goto err_pclk; } } if (clk_get_by_ofw_name(dev, 0, "rx_clk", &sc->clk_rxclk) == 0) { if (clk_enable(sc->clk_rxclk) != 0) { device_printf(dev, "could not enable rx_clk.\n"); err = ENXIO; goto err_tx_clk; } } if (clk_get_by_ofw_name(dev, 0, "tsu_clk", &sc->clk_tsuclk) == 0) { if (clk_enable(sc->clk_tsuclk) != 0) { device_printf(dev, "could not enable tsu_clk.\n"); err = ENXIO; goto err_rx_clk; } } node = ofw_bus_get_node(dev); sc->phy_contype = mii_fdt_get_contype(node); /* Get memory resource. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "could not allocate memory resources.\n"); err = ENOMEM; goto err_tsu_clk; } /* Get IRQ resource. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "could not allocate interrupt resource.\n"); cgem_detach(dev); return (ENOMEM); } /* Set up ifnet structure. */ ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "could not allocate ifnet structure\n"); cgem_detach(dev); return (ENOMEM); } if_setsoftc(ifp, sc); if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev)); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setinitfn(ifp, cgem_init); if_setioctlfn(ifp, cgem_ioctl); if_setstartfn(ifp, cgem_start); if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM, 0); if_setsendqlen(ifp, CGEM_NUM_TX_DESCS); if_setsendqready(ifp); /* Disable hardware checksumming by default. */ if_sethwassist(ifp, 0); if_setcapenable(ifp, if_getcapabilities(ifp) & ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM)); sc->if_old_flags = if_getflags(ifp); sc->rxbufs = DEFAULT_NUM_RX_BUFS; /* Reset hardware. */ CGEM_LOCK(sc); cgem_reset(sc); CGEM_UNLOCK(sc); /* Attach phy to mii bus. */ err = mii_attach(dev, &sc->miibus, ifp, cgem_ifmedia_upd, cgem_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (err) device_printf(dev, "warning: attaching PHYs failed\n"); /* Set up TX and RX descriptor area. */ err = cgem_setup_descs(sc); if (err) { device_printf(dev, "could not set up dma mem for descs.\n"); cgem_detach(dev); goto err; } /* Get a MAC address. */ cgem_get_mac(sc, eaddr); /* Start ticks. */ callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0); ether_ifattach(ifp, eaddr); err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE | INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand); if (err) { device_printf(dev, "could not set interrupt handler.\n"); ether_ifdetach(ifp); cgem_detach(dev); goto err; } cgem_add_sysctls(dev); return (0); err_tsu_clk: if (sc->clk_tsuclk) clk_release(sc->clk_tsuclk); err_rx_clk: if (sc->clk_rxclk) clk_release(sc->clk_rxclk); err_tx_clk: if (sc->clk_txclk) clk_release(sc->clk_txclk); err_pclk: if (sc->clk_pclk) clk_release(sc->clk_pclk); if (sc->clk_hclk) clk_release(sc->clk_hclk); err: return (err); } static int cgem_detach(device_t dev) { struct cgem_softc *sc = device_get_softc(dev); int i; if (sc == NULL) return (ENODEV); if (device_is_attached(dev)) { CGEM_LOCK(sc); cgem_stop(sc); CGEM_UNLOCK(sc); callout_drain(&sc->tick_ch); if_setflagbits(sc->ifp, 0, IFF_UP); ether_ifdetach(sc->ifp); } if (sc->miibus != NULL) { device_delete_child(dev, sc->miibus); sc->miibus = NULL; } /* Release resources. */ if (sc->mem_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); sc->mem_res = NULL; } if (sc->irq_res != NULL) { if (sc->intrhand) bus_teardown_intr(dev, sc->irq_res, sc->intrhand); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res), sc->irq_res); sc->irq_res = NULL; } /* Release DMA resources. */ if (sc->rxring != NULL) { if (sc->rxring_physaddr != 0) { bus_dmamap_unload(sc->desc_dma_tag, sc->rxring_dma_map); sc->rxring_physaddr = 0; sc->txring_physaddr = 0; sc->null_qs_physaddr = 0; } bus_dmamem_free(sc->desc_dma_tag, sc->rxring, sc->rxring_dma_map); sc->rxring = NULL; sc->txring = NULL; sc->null_qs = NULL; for (i = 0; i < CGEM_NUM_RX_DESCS; i++) if (sc->rxring_m_dmamap[i] != NULL) { bus_dmamap_destroy(sc->mbuf_dma_tag, sc->rxring_m_dmamap[i]); sc->rxring_m_dmamap[i] = NULL; } for (i = 0; i < CGEM_NUM_TX_DESCS; i++) if (sc->txring_m_dmamap[i] != NULL) { bus_dmamap_destroy(sc->mbuf_dma_tag, sc->txring_m_dmamap[i]); sc->txring_m_dmamap[i] = NULL; } } if (sc->desc_dma_tag != NULL) { bus_dma_tag_destroy(sc->desc_dma_tag); sc->desc_dma_tag = NULL; } if (sc->mbuf_dma_tag != NULL) { bus_dma_tag_destroy(sc->mbuf_dma_tag); sc->mbuf_dma_tag = NULL; } bus_generic_detach(dev); if (sc->clk_tsuclk) clk_release(sc->clk_tsuclk); if (sc->clk_rxclk) clk_release(sc->clk_rxclk); if (sc->clk_txclk) clk_release(sc->clk_txclk); if (sc->clk_pclk) clk_release(sc->clk_pclk); if (sc->clk_hclk) clk_release(sc->clk_hclk); CGEM_LOCK_DESTROY(sc); return (0); } static device_method_t cgem_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cgem_probe), DEVMETHOD(device_attach, cgem_attach), DEVMETHOD(device_detach, cgem_detach), /* MII interface */ DEVMETHOD(miibus_readreg, cgem_miibus_readreg), DEVMETHOD(miibus_writereg, cgem_miibus_writereg), DEVMETHOD(miibus_statchg, cgem_miibus_statchg), DEVMETHOD(miibus_linkchg, cgem_miibus_linkchg), DEVMETHOD_END }; static driver_t cgem_driver = { "cgem", cgem_methods, sizeof(struct cgem_softc), }; DRIVER_MODULE(cgem, simplebus, cgem_driver, NULL, NULL); DRIVER_MODULE(miibus, cgem, miibus_driver, NULL, NULL); MODULE_DEPEND(cgem, miibus, 1, 1, 1); MODULE_DEPEND(cgem, ether, 1, 1, 1); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/dev/clk/allwinner/aw_ccu.c b/sys/dev/clk/allwinner/aw_ccu.c index fc963f7a6485..9ee08f8f2dd1 100644 --- a/sys/dev/clk/allwinner/aw_ccu.c +++ b/sys/dev/clk/allwinner/aw_ccu.c @@ -1,247 +1,247 @@ /*- * Copyright (c) 2016 Jared McNeill * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Allwinner oscillator clock */ #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include "clkdev_if.h" #define CCU_BASE 0x01c20000 #define CCU_SIZE 0x400 struct aw_ccu_softc { struct simplebus_softc sc; bus_space_tag_t bst; bus_space_handle_t bsh; struct mtx mtx; int flags; }; static struct ofw_compat_data compat_data[] = { { "allwinner,sun7i-a20", 1 }, { "allwinner,sun6i-a31", 1 }, { "allwinner,sun6i-a31s", 1 }, { NULL, 0 } }; static int aw_ccu_check_addr(struct aw_ccu_softc *sc, bus_addr_t addr, bus_space_handle_t *pbsh, bus_size_t *poff) { if (addr >= CCU_BASE && addr < (CCU_BASE + CCU_SIZE)) { *poff = addr - CCU_BASE; *pbsh = sc->bsh; return (0); } return (EINVAL); } static int aw_ccu_write_4(device_t dev, bus_addr_t addr, uint32_t val) { struct aw_ccu_softc *sc; bus_space_handle_t bsh; bus_size_t reg; sc = device_get_softc(dev); if (aw_ccu_check_addr(sc, addr, &bsh, ®) != 0) return (EINVAL); mtx_assert(&sc->mtx, MA_OWNED); bus_space_write_4(sc->bst, bsh, reg, val); return (0); } static int aw_ccu_read_4(device_t dev, bus_addr_t addr, uint32_t *val) { struct aw_ccu_softc *sc; bus_space_handle_t bsh; bus_size_t reg; sc = device_get_softc(dev); if (aw_ccu_check_addr(sc, addr, &bsh, ®) != 0) return (EINVAL); mtx_assert(&sc->mtx, MA_OWNED); *val = bus_space_read_4(sc->bst, bsh, reg); return (0); } static int aw_ccu_modify_4(device_t dev, bus_addr_t addr, uint32_t clr, uint32_t set) { struct aw_ccu_softc *sc; bus_space_handle_t bsh; bus_size_t reg; uint32_t val; sc = device_get_softc(dev); if (aw_ccu_check_addr(sc, addr, &bsh, ®) != 0) return (EINVAL); mtx_assert(&sc->mtx, MA_OWNED); val = bus_space_read_4(sc->bst, bsh, reg); val &= ~clr; val |= set; bus_space_write_4(sc->bst, bsh, reg, val); return (0); } static void aw_ccu_device_lock(device_t dev) { struct aw_ccu_softc *sc; sc = device_get_softc(dev); mtx_lock(&sc->mtx); } static void aw_ccu_device_unlock(device_t dev) { struct aw_ccu_softc *sc; sc = device_get_softc(dev); mtx_unlock(&sc->mtx); } static const struct ofw_compat_data * aw_ccu_search_compatible(void) { const struct ofw_compat_data *compat; phandle_t root; root = OF_finddevice("/"); for (compat = compat_data; compat->ocd_str != NULL; compat++) if (ofw_bus_node_is_compatible(root, compat->ocd_str)) break; return (compat); } static int aw_ccu_probe(device_t dev) { const char *name; name = ofw_bus_get_name(dev); if (name == NULL || strcmp(name, "clocks") != 0) return (ENXIO); if (aw_ccu_search_compatible()->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner Clock Control Unit"); return (BUS_PROBE_SPECIFIC); } static int aw_ccu_attach(device_t dev) { struct aw_ccu_softc *sc; phandle_t node, child; device_t cdev; int error; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); simplebus_init(dev, node); sc->flags = aw_ccu_search_compatible()->ocd_data; /* * Map registers. The DT doesn't have a "reg" property * for the /clocks node and child nodes have conflicting "reg" * properties. */ sc->bst = bus_get_bus_tag(dev); error = bus_space_map(sc->bst, CCU_BASE, CCU_SIZE, 0, &sc->bsh); if (error != 0) { device_printf(dev, "couldn't map CCU: %d\n", error); return (error); } mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); /* Attach child devices */ for (child = OF_child(node); child > 0; child = OF_peer(child)) { cdev = simplebus_add_device(dev, child, 0, NULL, -1, NULL); if (cdev != NULL) device_probe_and_attach(cdev); } return (bus_generic_attach(dev)); } static device_method_t aw_ccu_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aw_ccu_probe), DEVMETHOD(device_attach, aw_ccu_attach), /* clkdev interface */ DEVMETHOD(clkdev_write_4, aw_ccu_write_4), DEVMETHOD(clkdev_read_4, aw_ccu_read_4), DEVMETHOD(clkdev_modify_4, aw_ccu_modify_4), DEVMETHOD(clkdev_device_lock, aw_ccu_device_lock), DEVMETHOD(clkdev_device_unlock, aw_ccu_device_unlock), DEVMETHOD_END }; DEFINE_CLASS_1(aw_ccu, aw_ccu_driver, aw_ccu_methods, sizeof(struct aw_ccu_softc), simplebus_driver); EARLY_DRIVER_MODULE(aw_ccu, simplebus, aw_ccu_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(aw_ccu, 1); diff --git a/sys/dev/clk/allwinner/aw_ccung.c b/sys/dev/clk/allwinner/aw_ccung.c index 2a6cbd8b9a20..f446b77b6226 100644 --- a/sys/dev/clk/allwinner/aw_ccung.c +++ b/sys/dev/clk/allwinner/aw_ccung.c @@ -1,359 +1,359 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2017,2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Allwinner Clock Control Unit */ #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include +#include +#include #include #include #include #ifdef __aarch64__ #include "opt_soc.h" #endif #include "clkdev_if.h" #include "hwreset_if.h" #if 0 #define dprintf(format, arg...) device_printf(dev, "%s: " format, __func__, arg) #else #define dprintf(format, arg...) #endif static struct resource_spec aw_ccung_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; #define CCU_READ4(sc, reg) bus_read_4((sc)->res, (reg)) #define CCU_WRITE4(sc, reg, val) bus_write_4((sc)->res, (reg), (val)) static int aw_ccung_write_4(device_t dev, bus_addr_t addr, uint32_t val) { struct aw_ccung_softc *sc; sc = device_get_softc(dev); dprintf("offset=%lx write %x\n", addr, val); CCU_WRITE4(sc, addr, val); return (0); } static int aw_ccung_read_4(device_t dev, bus_addr_t addr, uint32_t *val) { struct aw_ccung_softc *sc; sc = device_get_softc(dev); *val = CCU_READ4(sc, addr); dprintf("offset=%lx Read %x\n", addr, *val); return (0); } static int aw_ccung_modify_4(device_t dev, bus_addr_t addr, uint32_t clr, uint32_t set) { struct aw_ccung_softc *sc; uint32_t reg; sc = device_get_softc(dev); dprintf("offset=%lx clr: %x set: %x\n", addr, clr, set); reg = CCU_READ4(sc, addr); reg &= ~clr; reg |= set; CCU_WRITE4(sc, addr, reg); return (0); } static int aw_ccung_reset_assert(device_t dev, intptr_t id, bool reset) { struct aw_ccung_softc *sc; uint32_t val; sc = device_get_softc(dev); dprintf("%sassert reset id %ld\n", reset ? "" : "De", id); if (id >= sc->nresets || sc->resets[id].offset == 0) return (0); mtx_lock(&sc->mtx); val = CCU_READ4(sc, sc->resets[id].offset); dprintf("offset=%x Read %x\n", sc->resets[id].offset, val); if (reset) val &= ~(1 << sc->resets[id].shift); else val |= 1 << sc->resets[id].shift; dprintf("offset=%x Write %x\n", sc->resets[id].offset, val); CCU_WRITE4(sc, sc->resets[id].offset, val); mtx_unlock(&sc->mtx); return (0); } static int aw_ccung_reset_is_asserted(device_t dev, intptr_t id, bool *reset) { struct aw_ccung_softc *sc; uint32_t val; sc = device_get_softc(dev); if (id >= sc->nresets || sc->resets[id].offset == 0) return (0); mtx_lock(&sc->mtx); val = CCU_READ4(sc, sc->resets[id].offset); dprintf("offset=%x Read %x\n", sc->resets[id].offset, val); *reset = (val & (1 << sc->resets[id].shift)) != 0 ? false : true; mtx_unlock(&sc->mtx); return (0); } static void aw_ccung_device_lock(device_t dev) { struct aw_ccung_softc *sc; sc = device_get_softc(dev); mtx_lock(&sc->mtx); } static void aw_ccung_device_unlock(device_t dev) { struct aw_ccung_softc *sc; sc = device_get_softc(dev); mtx_unlock(&sc->mtx); } static int aw_ccung_register_gates(struct aw_ccung_softc *sc) { struct clk_gate_def def; int i; for (i = 0; i < sc->ngates; i++) { if (sc->gates[i].name == NULL) continue; memset(&def, 0, sizeof(def)); def.clkdef.id = i; def.clkdef.name = sc->gates[i].name; def.clkdef.parent_names = &sc->gates[i].parent_name; def.clkdef.parent_cnt = 1; def.offset = sc->gates[i].offset; def.shift = sc->gates[i].shift; def.mask = 1; def.on_value = 1; def.off_value = 0; clknode_gate_register(sc->clkdom, &def); } return (0); } static void aw_ccung_init_clocks(struct aw_ccung_softc *sc) { struct clknode *clknode; int i, error; for (i = 0; i < sc->n_clk_init; i++) { clknode = clknode_find_by_name(sc->clk_init[i].name); if (clknode == NULL) { device_printf(sc->dev, "Cannot find clock %s\n", sc->clk_init[i].name); continue; } if (sc->clk_init[i].parent_name != NULL) { if (bootverbose) device_printf(sc->dev, "Setting %s as parent for %s\n", sc->clk_init[i].parent_name, sc->clk_init[i].name); error = clknode_set_parent_by_name(clknode, sc->clk_init[i].parent_name); if (error != 0) { device_printf(sc->dev, "Cannot set parent to %s for %s\n", sc->clk_init[i].parent_name, sc->clk_init[i].name); continue; } } if (sc->clk_init[i].default_freq != 0) { if (bootverbose) device_printf(sc->dev, "Setting freq %ju for %s\n", sc->clk_init[i].default_freq, sc->clk_init[i].name); error = clknode_set_freq(clknode, sc->clk_init[i].default_freq, 0 , 0); if (error != 0) { device_printf(sc->dev, "Cannot set frequency for %s to %ju\n", sc->clk_init[i].name, sc->clk_init[i].default_freq); continue; } } if (sc->clk_init[i].enable) { error = clknode_enable(clknode); if (error != 0) { device_printf(sc->dev, "Cannot enable %s\n", sc->clk_init[i].name); continue; } } } } int aw_ccung_attach(device_t dev) { struct aw_ccung_softc *sc; int i; sc = device_get_softc(dev); sc->dev = dev; if (bus_alloc_resources(dev, aw_ccung_spec, &sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); return (ENXIO); } mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); sc->clkdom = clkdom_create(dev); if (sc->clkdom == NULL) panic("Cannot create clkdom\n"); for (i = 0; i < sc->nclks; i++) { switch (sc->clks[i].type) { case AW_CLK_UNDEFINED: break; case AW_CLK_MUX: clknode_mux_register(sc->clkdom, sc->clks[i].clk.mux); break; case AW_CLK_DIV: clknode_div_register(sc->clkdom, sc->clks[i].clk.div); break; case AW_CLK_FIXED: clknode_fixed_register(sc->clkdom, sc->clks[i].clk.fixed); break; case AW_CLK_NKMP: aw_clk_nkmp_register(sc->clkdom, sc->clks[i].clk.nkmp); break; case AW_CLK_NM: aw_clk_nm_register(sc->clkdom, sc->clks[i].clk.nm); break; case AW_CLK_M: aw_clk_m_register(sc->clkdom, sc->clks[i].clk.m); break; case AW_CLK_PREDIV_MUX: aw_clk_prediv_mux_register(sc->clkdom, sc->clks[i].clk.prediv_mux); break; case AW_CLK_FRAC: aw_clk_frac_register(sc->clkdom, sc->clks[i].clk.frac); break; case AW_CLK_MIPI: aw_clk_mipi_register(sc->clkdom, sc->clks[i].clk.mipi); break; case AW_CLK_NP: aw_clk_np_register(sc->clkdom, sc->clks[i].clk.np); break; case AW_CLK_NMM: aw_clk_nmm_register(sc->clkdom, sc->clks[i].clk.nmm); break; } } if (sc->gates) aw_ccung_register_gates(sc); if (clkdom_finit(sc->clkdom) != 0) panic("cannot finalize clkdom initialization\n"); clkdom_xlock(sc->clkdom); aw_ccung_init_clocks(sc); clkdom_unlock(sc->clkdom); if (bootverbose) clkdom_dump(sc->clkdom); /* If we have resets, register our self as a reset provider */ if (sc->resets) hwreset_register_ofw_provider(dev); return (0); } static device_method_t aw_ccung_methods[] = { /* clkdev interface */ DEVMETHOD(clkdev_write_4, aw_ccung_write_4), DEVMETHOD(clkdev_read_4, aw_ccung_read_4), DEVMETHOD(clkdev_modify_4, aw_ccung_modify_4), DEVMETHOD(clkdev_device_lock, aw_ccung_device_lock), DEVMETHOD(clkdev_device_unlock, aw_ccung_device_unlock), /* Reset interface */ DEVMETHOD(hwreset_assert, aw_ccung_reset_assert), DEVMETHOD(hwreset_is_asserted, aw_ccung_reset_is_asserted), DEVMETHOD_END }; DEFINE_CLASS_0(aw_ccung, aw_ccung_driver, aw_ccung_methods, sizeof(struct aw_ccung_softc)); diff --git a/sys/dev/clk/allwinner/aw_ccung.h b/sys/dev/clk/allwinner/aw_ccung.h index 61d99fc3d50f..555a8a95e38a 100644 --- a/sys/dev/clk/allwinner/aw_ccung.h +++ b/sys/dev/clk/allwinner/aw_ccung.h @@ -1,108 +1,108 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2017,2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef __CCU_NG_H__ #define __CCU_NG_H__ #include #include #include #include #include #include #include #include #include -#include -#include -#include +#include +#include +#include enum aw_ccung_clk_type { AW_CLK_UNDEFINED = 0, AW_CLK_MUX, AW_CLK_DIV, AW_CLK_FIXED, AW_CLK_NKMP, AW_CLK_NM, AW_CLK_PREDIV_MUX, AW_CLK_FRAC, AW_CLK_M, AW_CLK_MIPI, AW_CLK_NP, AW_CLK_NMM, }; struct aw_ccung_clk { enum aw_ccung_clk_type type; union { struct clk_mux_def *mux; struct clk_div_def *div; struct clk_fixed_def *fixed; struct aw_clk_nkmp_def *nkmp; struct aw_clk_nm_def *nm; struct aw_clk_prediv_mux_def *prediv_mux; struct aw_clk_frac_def *frac; struct aw_clk_m_def *m; struct aw_clk_mipi_def *mipi; struct aw_clk_np_def *np; struct aw_clk_nmm_def *nmm; } clk; }; struct aw_ccung_softc { device_t dev; struct resource *res; struct clkdom *clkdom; struct mtx mtx; struct aw_ccung_reset *resets; int nresets; struct aw_ccung_gate *gates; int ngates; struct aw_ccung_clk *clks; int nclks; struct aw_clk_init *clk_init; int n_clk_init; }; struct aw_ccung_reset { uint32_t offset; uint32_t shift; }; struct aw_ccung_gate { const char *name; const char *parent_name; uint32_t id; uint32_t offset; uint32_t shift; }; DECLARE_CLASS(aw_ccung_driver); int aw_ccung_attach(device_t dev); #endif /* __CCU_NG_H__ */ diff --git a/sys/dev/clk/allwinner/aw_clk_frac.c b/sys/dev/clk/allwinner/aw_clk_frac.c index 696f6c2ebfbf..57249dafbcc2 100644 --- a/sys/dev/clk/allwinner/aw_clk_frac.c +++ b/sys/dev/clk/allwinner/aw_clk_frac.c @@ -1,394 +1,394 @@ /*- * Copyright (c) 2019 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include -#include +#include #include #include #include "clkdev_if.h" /* #define dprintf(format, arg...) printf("%s:(%s)" format, __func__, clknode_get_name(clk), arg) */ #define dprintf(format, arg...) /* * clknode for clocks matching the formula : * * clk = (24Mhz * n) / m in integer mode * clk = frac_out1 or frac_out2 in fractional mode * */ struct aw_clk_frac_sc { uint32_t offset; struct aw_clk_factor m; struct aw_clk_factor n; struct aw_clk_frac frac; uint64_t min_freq; uint64_t max_freq; uint32_t mux_shift; uint32_t mux_mask; uint32_t gate_shift; uint32_t lock_shift; uint32_t lock_retries; uint32_t flags; }; #define WRITE4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define READ4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) static int aw_clk_frac_init(struct clknode *clk, device_t dev) { struct aw_clk_frac_sc *sc; uint32_t val, idx; sc = clknode_get_softc(clk); idx = 0; if ((sc->flags & AW_CLK_HAS_MUX) != 0) { DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); DEVICE_UNLOCK(clk); idx = (val & sc->mux_mask) >> sc->mux_shift; } dprintf("init parent idx %d\n", idx); clknode_init_parent_idx(clk, idx); return (0); } static int aw_clk_frac_set_gate(struct clknode *clk, bool enable) { struct aw_clk_frac_sc *sc; uint32_t val; sc = clknode_get_softc(clk); if ((sc->flags & AW_CLK_HAS_GATE) == 0) return (0); dprintf("%sabling gate\n", enable ? "En" : "Dis"); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); if (enable) val |= (1 << sc->gate_shift); else val &= ~(1 << sc->gate_shift); WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); return (0); } static int aw_clk_frac_set_mux(struct clknode *clk, int index) { struct aw_clk_frac_sc *sc; uint32_t val; sc = clknode_get_softc(clk); if ((sc->flags & AW_CLK_HAS_MUX) == 0) return (0); dprintf("Set mux to %d\n", index); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); val &= ~sc->mux_mask; val |= index << sc->mux_shift; WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); return (0); } static uint64_t aw_clk_frac_find_best(struct aw_clk_frac_sc *sc, uint64_t fparent, uint64_t fout, uint32_t *factor_n, uint32_t *factor_m) { uint64_t cur, best; uint32_t m, n, max_m, max_n, min_m, min_n; *factor_n = *factor_m = 0; best = cur = 0; max_m = aw_clk_factor_get_max(&sc->m); max_n = aw_clk_factor_get_max(&sc->n); min_m = aw_clk_factor_get_min(&sc->m); min_n = sc->min_freq / fparent; for (n = min_n; n <= max_n; n++) { for (m = min_m; m <= max_m; m++) { cur = fparent * n / m; if (cur < sc->min_freq) { continue; } if (cur > sc->max_freq) { continue; } if (cur == fout) { *factor_n = n; *factor_m = m; return (cur); } if (abs((fout - cur)) < abs((fout - best))) { best = cur; *factor_n = n; *factor_m = m; } } } return (best); } static int aw_clk_frac_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout, int flags, int *stop) { struct aw_clk_frac_sc *sc; uint64_t cur, best, best_frac; uint32_t val, m, n, best_m, best_n; int retry, multiple, max_mult, best_mult; sc = clknode_get_softc(clk); best = best_frac = cur = 0; best_mult = 0; max_mult = 1; dprintf("Trying to find freq %ju with parent %ju\n", *fout, fparent); if ((flags & CLK_SET_ROUND_MULTIPLE) != 0) max_mult = 10; for (multiple = 1; multiple <= max_mult; multiple++) { /* First test the fractional frequencies */ dprintf("Testing with multiple %d\n", multiple); if (*fout * multiple == sc->frac.freq0) { best = best_frac = sc->frac.freq0; best_mult = multiple; dprintf("Found with using frac.freq0 and multiple %d\n", multiple); break; } else if (*fout * multiple == sc->frac.freq1) { best = best_frac = sc->frac.freq1; best_mult = multiple; dprintf("Found with using frac.freq1 and multiple %d\n", multiple); break; } else { cur = aw_clk_frac_find_best(sc, fparent, *fout * multiple, &n, &m); dprintf("Got %ju with n=%d, m=%d\n", cur, n, m); if (cur == (*fout * multiple)) { best = cur; best_mult = multiple; best_n = n; best_m = m; dprintf("This is the one: n=%d m=%d mult=%d\n", best_n, best_m, best_mult); break; } if (abs(((*fout * multiple) - cur)) < abs(((*fout * multiple) - best))) { best = cur; best_mult = multiple; best_n = n; best_m = m; dprintf("This is the best for now: n=%d m=%d mult=%d\n", best_n, best_m, best_mult); } } } if (best < sc->min_freq || best > sc->max_freq) { printf("%s: Cannot set %ju for %s (min=%ju max=%ju)\n", __func__, best, clknode_get_name(clk), sc->min_freq, sc->max_freq); return (ERANGE); } if ((flags & CLK_SET_DRYRUN) != 0) { *fout = best; *stop = 1; return (0); } if ((best < (*fout * best_mult)) && ((flags & CLK_SET_ROUND_DOWN) == 0)) { *stop = 1; return (ERANGE); } if ((best > *fout * best_mult) && ((flags & CLK_SET_ROUND_UP) == 0)) { *stop = 1; return (ERANGE); } DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); /* Disable clock during freq changes */ val &= ~(1 << sc->gate_shift); WRITE4(clk, sc->offset, val); if (best_frac != 0) { val &= ~sc->frac.mode_sel; /* M should be 0 per the manual */ val &= ~sc->m.mask; if (best_frac == sc->frac.freq0) val &= ~sc->frac.freq_sel; else val |= sc->frac.freq_sel; } else { val |= sc->frac.mode_sel; /* Select integer mode */ n = aw_clk_factor_get_value(&sc->n, best_n); m = aw_clk_factor_get_value(&sc->m, best_m); val &= ~sc->n.mask; val &= ~sc->m.mask; val |= n << sc->n.shift; val |= m << sc->m.shift; } /* Write the clock changes */ WRITE4(clk, sc->offset, val); /* Enable clock now that we've change it */ val |= 1 << sc->gate_shift; WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); for (retry = 0; retry < sc->lock_retries; retry++) { READ4(clk, sc->offset, &val); if ((val & (1 << sc->lock_shift)) != 0) break; DELAY(1000); } *fout = best; *stop = 1; return (0); } static int aw_clk_frac_recalc(struct clknode *clk, uint64_t *freq) { struct aw_clk_frac_sc *sc; uint32_t val, m, n; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); DEVICE_UNLOCK(clk); if ((val & sc->frac.mode_sel) == 0) { if (val & sc->frac.freq_sel) *freq = sc->frac.freq1; else *freq = sc->frac.freq0; } else { m = aw_clk_get_factor(val, &sc->m); n = aw_clk_get_factor(val, &sc->n); *freq = *freq * n / m; } return (0); } static clknode_method_t aw_frac_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, aw_clk_frac_init), CLKNODEMETHOD(clknode_set_gate, aw_clk_frac_set_gate), CLKNODEMETHOD(clknode_set_mux, aw_clk_frac_set_mux), CLKNODEMETHOD(clknode_recalc_freq, aw_clk_frac_recalc), CLKNODEMETHOD(clknode_set_freq, aw_clk_frac_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(aw_frac_clknode, aw_frac_clknode_class, aw_frac_clknode_methods, sizeof(struct aw_clk_frac_sc), clknode_class); int aw_clk_frac_register(struct clkdom *clkdom, struct aw_clk_frac_def *clkdef) { struct clknode *clk; struct aw_clk_frac_sc *sc; clk = clknode_create(clkdom, &aw_frac_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->offset = clkdef->offset; sc->m.shift = clkdef->m.shift; sc->m.width = clkdef->m.width; sc->m.mask = ((1 << sc->m.width) - 1) << sc->m.shift; sc->m.value = clkdef->m.value; sc->m.flags = clkdef->m.flags; sc->n.shift = clkdef->n.shift; sc->n.width = clkdef->n.width; sc->n.mask = ((1 << sc->n.width) - 1) << sc->n.shift; sc->n.value = clkdef->n.value; sc->n.flags = clkdef->n.flags; sc->frac.freq0 = clkdef->frac.freq0; sc->frac.freq1 = clkdef->frac.freq1; sc->frac.mode_sel = 1 << clkdef->frac.mode_sel; sc->frac.freq_sel = 1 << clkdef->frac.freq_sel; sc->min_freq = clkdef->min_freq; sc->max_freq = clkdef->max_freq; sc->mux_shift = clkdef->mux_shift; sc->mux_mask = ((1 << clkdef->mux_width) - 1) << sc->mux_shift; sc->gate_shift = clkdef->gate_shift; sc->lock_shift = clkdef->lock_shift; sc->lock_retries = clkdef->lock_retries; sc->flags = clkdef->flags; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/allwinner/aw_clk_frac.h b/sys/dev/clk/allwinner/aw_clk_frac.h index 8ee59bbebcc1..107c04ba3552 100644 --- a/sys/dev/clk/allwinner/aw_clk_frac.h +++ b/sys/dev/clk/allwinner/aw_clk_frac.h @@ -1,53 +1,53 @@ /*- * Copyright (c) 2019 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef __AW_CLK_FRAC_H__ #define __AW_CLK_FRAC_H__ -#include +#include struct aw_clk_frac_def { struct clknode_init_def clkdef; uint32_t offset; struct aw_clk_factor m; struct aw_clk_factor n; struct aw_clk_frac frac; uint64_t min_freq; uint64_t max_freq; uint32_t mux_shift; uint32_t mux_width; uint32_t gate_shift; uint32_t lock_shift; uint32_t lock_retries; uint32_t flags; }; int aw_clk_frac_register(struct clkdom *clkdom, struct aw_clk_frac_def *clkdef); #endif /* __AW_CLK_FRAC_H__ */ diff --git a/sys/dev/clk/allwinner/aw_clk_m.c b/sys/dev/clk/allwinner/aw_clk_m.c index d57434617d23..5d7dd971cc7a 100644 --- a/sys/dev/clk/allwinner/aw_clk_m.c +++ b/sys/dev/clk/allwinner/aw_clk_m.c @@ -1,284 +1,284 @@ /*- * Copyright (c) 2019 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include -#include +#include #include #include #include "clkdev_if.h" /* * clknode for clocks matching the formula : * * clk = clkin / m * And that needs to potentially : * 1) Set the parent freq * 2) Support Setting the parent to a multiple * */ struct aw_clk_m_sc { uint32_t offset; struct aw_clk_factor m; uint32_t mux_shift; uint32_t mux_mask; uint32_t gate_shift; uint32_t flags; }; #define WRITE4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define READ4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) static int aw_clk_m_init(struct clknode *clk, device_t dev) { struct aw_clk_m_sc *sc; uint32_t val, idx; sc = clknode_get_softc(clk); idx = 0; if ((sc->flags & AW_CLK_HAS_MUX) != 0) { DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); DEVICE_UNLOCK(clk); idx = (val & sc->mux_mask) >> sc->mux_shift; } clknode_init_parent_idx(clk, idx); return (0); } static int aw_clk_m_set_gate(struct clknode *clk, bool enable) { struct aw_clk_m_sc *sc; uint32_t val; sc = clknode_get_softc(clk); if ((sc->flags & AW_CLK_HAS_GATE) == 0) return (0); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); if (enable) val |= (1 << sc->gate_shift); else val &= ~(1 << sc->gate_shift); WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); return (0); } static int aw_clk_m_set_mux(struct clknode *clk, int index) { struct aw_clk_m_sc *sc; uint32_t val; sc = clknode_get_softc(clk); if ((sc->flags & AW_CLK_HAS_MUX) == 0) return (0); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); val &= ~sc->mux_mask; val |= index << sc->mux_shift; WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); return (0); } static uint64_t aw_clk_m_find_best(struct aw_clk_m_sc *sc, uint64_t fparent, uint64_t *fout, uint32_t *factor_m) { uint64_t cur, best = 0; uint32_t m, max_m, min_m; *factor_m = 0; max_m = aw_clk_factor_get_max(&sc->m); min_m = aw_clk_factor_get_min(&sc->m); for (m = min_m; m <= max_m; ) { cur = fparent / m; if (abs(*fout - cur) < abs(*fout - best)) { best = cur; *factor_m = m; } if ((sc->m.flags & AW_CLK_FACTOR_POWER_OF_TWO) != 0) m <<= 1; else m++; } return (best); } static int aw_clk_m_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout, int flags, int *stop) { struct aw_clk_m_sc *sc; struct clknode *p_clk; uint64_t cur, best; uint32_t val, m, best_m; sc = clknode_get_softc(clk); best = cur = 0; best = aw_clk_m_find_best(sc, fparent, fout, &best_m); if ((best != *fout) && ((sc->flags & AW_CLK_SET_PARENT) != 0)) { p_clk = clknode_get_parent(clk); if (p_clk == NULL) { printf("%s: Cannot get parent for clock %s\n", __func__, clknode_get_name(clk)); return (ENXIO); } clknode_set_freq(p_clk, *fout, CLK_SET_ROUND_MULTIPLE, 0); clknode_get_freq(p_clk, &fparent); best = aw_clk_m_find_best(sc, fparent, fout, &best_m); } if ((flags & CLK_SET_DRYRUN) != 0) { *fout = best; *stop = 1; return (0); } if ((best < *fout) && ((flags & CLK_SET_ROUND_DOWN) == 0)) { *stop = 1; return (ERANGE); } if ((best > *fout) && ((flags & CLK_SET_ROUND_UP) == 0)) { *stop = 1; return (ERANGE); } DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); m = aw_clk_factor_get_value(&sc->m, best_m); val &= ~sc->m.mask; val |= m << sc->m.shift; WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); *fout = best; *stop = 1; return (0); } static int aw_clk_m_recalc(struct clknode *clk, uint64_t *freq) { struct aw_clk_m_sc *sc; uint32_t val, m; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); DEVICE_UNLOCK(clk); m = aw_clk_get_factor(val, &sc->m); *freq = *freq / m; return (0); } static clknode_method_t aw_m_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, aw_clk_m_init), CLKNODEMETHOD(clknode_set_gate, aw_clk_m_set_gate), CLKNODEMETHOD(clknode_set_mux, aw_clk_m_set_mux), CLKNODEMETHOD(clknode_recalc_freq, aw_clk_m_recalc), CLKNODEMETHOD(clknode_set_freq, aw_clk_m_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(aw_m_clknode, aw_m_clknode_class, aw_m_clknode_methods, sizeof(struct aw_clk_m_sc), clknode_class); int aw_clk_m_register(struct clkdom *clkdom, struct aw_clk_m_def *clkdef) { struct clknode *clk; struct aw_clk_m_sc *sc; clk = clknode_create(clkdom, &aw_m_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->offset = clkdef->offset; sc->m.shift = clkdef->m.shift; sc->m.width = clkdef->m.width; sc->m.mask = ((1 << sc->m.width) - 1) << sc->m.shift; sc->m.value = clkdef->m.value; sc->m.flags = clkdef->m.flags; sc->mux_shift = clkdef->mux_shift; sc->mux_mask = ((1 << clkdef->mux_width) - 1) << sc->mux_shift; sc->gate_shift = clkdef->gate_shift; sc->flags = clkdef->flags; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/allwinner/aw_clk_m.h b/sys/dev/clk/allwinner/aw_clk_m.h index 45a231bfdb37..a2a9b0664662 100644 --- a/sys/dev/clk/allwinner/aw_clk_m.h +++ b/sys/dev/clk/allwinner/aw_clk_m.h @@ -1,46 +1,46 @@ /*- * Copyright (c) 2019 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef __AW_CLK_M_H__ #define __AW_CLK_M_H__ -#include +#include struct aw_clk_m_def { struct clknode_init_def clkdef; uint32_t offset; struct aw_clk_factor m; uint32_t mux_shift; uint32_t mux_width; uint32_t gate_shift; uint32_t flags; }; int aw_clk_m_register(struct clkdom *clkdom, struct aw_clk_m_def *clkdef); #endif /* __AW_CLK_M_H__ */ diff --git a/sys/dev/clk/allwinner/aw_clk_mipi.c b/sys/dev/clk/allwinner/aw_clk_mipi.c index e57b5347994a..9875e179a277 100644 --- a/sys/dev/clk/allwinner/aw_clk_mipi.c +++ b/sys/dev/clk/allwinner/aw_clk_mipi.c @@ -1,289 +1,289 @@ /*- * Copyright (c) 2019 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include -#include +#include #include #include #include "clkdev_if.h" /* #define dprintf(format, arg...) printf("%s:(%s)" format, __func__, clknode_get_name(clk), arg) */ #define dprintf(format, arg...) /* * clknode for PLL_MIPI : * * clk = (pll_video0 * n * k) / m when vfb_sel=0 * clk depend on sint_frac, sdiv2, s6p25_7p5, pll_feedback_div when vfb_sel=1 * */ struct aw_clk_mipi_sc { uint32_t offset; struct aw_clk_factor k; struct aw_clk_factor m; struct aw_clk_factor n; uint64_t min_freq; uint64_t max_freq; uint32_t gate_shift; uint32_t lock_shift; uint32_t lock_retries; uint32_t flags; }; #define WRITE4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define READ4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) #define LDO1_EN_SHIFT 23 #define LDO2_EN_SHIFT 22 #define VFB_SEL_SHIFT 16 static int aw_clk_mipi_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static int aw_clk_mipi_set_gate(struct clknode *clk, bool enable) { struct aw_clk_mipi_sc *sc; uint32_t val; sc = clknode_get_softc(clk); dprintf("%sabling gate\n", enable ? "En" : "Dis"); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); if (enable) { val |= (1 << sc->gate_shift); val |= (1 << LDO1_EN_SHIFT); val |= (1 << LDO2_EN_SHIFT); } else { val &= ~(1 << sc->gate_shift); val &= ~(1 << LDO1_EN_SHIFT); val &= ~(1 << LDO2_EN_SHIFT); } WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); return (0); } static uint64_t aw_clk_mipi_find_best(struct aw_clk_mipi_sc *sc, uint64_t fparent, uint64_t *fout, uint32_t *factor_k, uint32_t *factor_m, uint32_t *factor_n) { uint64_t cur, best; uint32_t n, k, m; best = 0; *factor_n = 0; *factor_k = 0; *factor_m = 0; for (n = aw_clk_factor_get_min(&sc->n); n <= aw_clk_factor_get_max(&sc->n); n++) { for (k = aw_clk_factor_get_min(&sc->k); k <= aw_clk_factor_get_max(&sc->k); k++) { for (m = aw_clk_factor_get_min(&sc->m); m <= aw_clk_factor_get_max(&sc->m); m++) { cur = (fparent * n * k) / m; if ((*fout - cur) < (*fout - best)) { best = cur; *factor_n = n; *factor_k = k; *factor_m = m; } if (best == *fout) return (best); } } } return best; } static int aw_clk_mipi_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout, int flags, int *stop) { struct aw_clk_mipi_sc *sc; uint64_t best = 0; uint32_t best_k, best_m, best_n; uint32_t k, m, n; uint32_t val; uint32_t retry; sc = clknode_get_softc(clk); best = aw_clk_mipi_find_best(sc, fparent, fout, &best_k, &best_m, &best_n); if (best < sc->min_freq || best > sc->max_freq) { printf("%s: Cannot set %ju for %s (min=%ju max=%ju)\n", __func__, best, clknode_get_name(clk), sc->min_freq, sc->max_freq); return (ERANGE); } if ((flags & CLK_SET_DRYRUN) != 0) { *fout = best; *stop = 1; return (0); } DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); /* Disable clock during freq changes */ val &= ~(1 << sc->gate_shift); WRITE4(clk, sc->offset, val); k = aw_clk_factor_get_value(&sc->k, best_k); n = aw_clk_factor_get_value(&sc->n, best_n); m = aw_clk_factor_get_value(&sc->m, best_m); val &= ~sc->k.mask; val &= ~sc->m.mask; val &= ~sc->n.mask; val |= k << sc->k.shift; val |= m << sc->m.shift; val |= n << sc->n.shift; /* Write the clock changes */ WRITE4(clk, sc->offset, val); /* Enable clock now that we've change it */ val |= 1 << sc->gate_shift; WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); for (retry = 0; retry < sc->lock_retries; retry++) { READ4(clk, sc->offset, &val); if ((val & (1 << sc->lock_shift)) != 0) break; DELAY(1000); } *fout = best; *stop = 1; return (0); } static int aw_clk_mipi_recalc(struct clknode *clk, uint64_t *freq) { struct aw_clk_mipi_sc *sc; uint32_t val, m, n, k; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); DEVICE_UNLOCK(clk); k = aw_clk_get_factor(val, &sc->k); m = aw_clk_get_factor(val, &sc->m); n = aw_clk_get_factor(val, &sc->n); *freq = (*freq * n * k) / m; return (0); } static clknode_method_t aw_mipi_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, aw_clk_mipi_init), CLKNODEMETHOD(clknode_set_gate, aw_clk_mipi_set_gate), CLKNODEMETHOD(clknode_recalc_freq, aw_clk_mipi_recalc), CLKNODEMETHOD(clknode_set_freq, aw_clk_mipi_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(aw_mipi_clknode, aw_mipi_clknode_class, aw_mipi_clknode_methods, sizeof(struct aw_clk_mipi_sc), clknode_class); int aw_clk_mipi_register(struct clkdom *clkdom, struct aw_clk_mipi_def *clkdef) { struct clknode *clk; struct aw_clk_mipi_sc *sc; clk = clknode_create(clkdom, &aw_mipi_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->offset = clkdef->offset; sc->k.shift = clkdef->k.shift; sc->k.width = clkdef->k.width; sc->k.mask = ((1 << sc->k.width) - 1) << sc->k.shift; sc->k.value = clkdef->k.value; sc->k.flags = clkdef->k.flags; sc->k.min_value = clkdef->k.min_value; sc->m.shift = clkdef->m.shift; sc->m.width = clkdef->m.width; sc->m.mask = ((1 << sc->m.width) - 1) << sc->m.shift; sc->m.value = clkdef->m.value; sc->m.flags = clkdef->m.flags; sc->m.min_value = clkdef->m.min_value; sc->n.shift = clkdef->n.shift; sc->n.width = clkdef->n.width; sc->n.mask = ((1 << sc->n.width) - 1) << sc->n.shift; sc->n.value = clkdef->n.value; sc->n.flags = clkdef->n.flags; sc->n.min_value = clkdef->n.min_value; sc->min_freq = clkdef->min_freq; sc->max_freq = clkdef->max_freq; sc->gate_shift = clkdef->gate_shift; sc->lock_shift = clkdef->lock_shift; sc->lock_retries = clkdef->lock_retries; sc->flags = clkdef->flags; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/allwinner/aw_clk_mipi.h b/sys/dev/clk/allwinner/aw_clk_mipi.h index bc28c0fa685d..81d2bd1bd64c 100644 --- a/sys/dev/clk/allwinner/aw_clk_mipi.h +++ b/sys/dev/clk/allwinner/aw_clk_mipi.h @@ -1,51 +1,51 @@ /*- * Copyright (c) 2019 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef __AW_CLK_MIPI_H__ #define __AW_CLK_MIPI_H__ -#include +#include struct aw_clk_mipi_def { struct clknode_init_def clkdef; uint32_t offset; struct aw_clk_factor k; struct aw_clk_factor m; struct aw_clk_factor n; uint64_t min_freq; uint64_t max_freq; uint32_t gate_shift; uint32_t lock_shift; uint32_t lock_retries; uint32_t flags; }; int aw_clk_mipi_register(struct clkdom *clkdom, struct aw_clk_mipi_def *clkdef); #endif /* __AW_CLK_MIPI_H__ */ diff --git a/sys/dev/clk/allwinner/aw_clk_nkmp.c b/sys/dev/clk/allwinner/aw_clk_nkmp.c index 73bf6a2fafe3..ede89c1d1b8b 100644 --- a/sys/dev/clk/allwinner/aw_clk_nkmp.c +++ b/sys/dev/clk/allwinner/aw_clk_nkmp.c @@ -1,407 +1,407 @@ /*- * Copyright (c) 2017 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include -#include +#include #include #include #include "clkdev_if.h" /* * clknode for clocks matching the formula : * * clk = (clkin * n * k) / (m * p) * */ struct aw_clk_nkmp_sc { uint32_t offset; struct aw_clk_factor n; struct aw_clk_factor k; struct aw_clk_factor m; struct aw_clk_factor p; uint32_t mux_shift; uint32_t mux_mask; uint32_t gate_shift; uint32_t lock_shift; uint32_t lock_retries; uint32_t update_shift; uint32_t flags; }; #define WRITE4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define READ4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define MODIFY4(_clk, off, clr, set ) \ CLKDEV_MODIFY_4(clknode_get_device(_clk), off, clr, set) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) static int aw_clk_nkmp_init(struct clknode *clk, device_t dev) { struct aw_clk_nkmp_sc *sc; uint32_t val, idx; sc = clknode_get_softc(clk); idx = 0; if ((sc->flags & AW_CLK_HAS_MUX) != 0) { DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); DEVICE_UNLOCK(clk); idx = (val & sc->mux_mask) >> sc->mux_shift; } clknode_init_parent_idx(clk, idx); return (0); } static int aw_clk_nkmp_set_gate(struct clknode *clk, bool enable) { struct aw_clk_nkmp_sc *sc; uint32_t val; sc = clknode_get_softc(clk); if ((sc->flags & AW_CLK_HAS_GATE) == 0) return (0); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); if (enable) val |= (1 << sc->gate_shift); else val &= ~(1 << sc->gate_shift); WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); return (0); } static int aw_clk_nkmp_set_mux(struct clknode *clk, int index) { struct aw_clk_nkmp_sc *sc; uint32_t val; sc = clknode_get_softc(clk); if ((sc->flags & AW_CLK_HAS_MUX) == 0) return (0); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); val &= ~sc->mux_mask; val |= index << sc->mux_shift; WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); return (0); } static uint64_t aw_clk_nkmp_find_best(struct aw_clk_nkmp_sc *sc, uint64_t fparent, uint64_t *fout, uint32_t *factor_n, uint32_t *factor_k, uint32_t *factor_m, uint32_t *factor_p) { uint64_t cur, best; uint32_t n, k, m, p; best = 0; *factor_n = 0; *factor_k = 0; *factor_m = 0; *factor_p = 0; for (n = aw_clk_factor_get_min(&sc->n); n <= aw_clk_factor_get_max(&sc->n); ) { for (k = aw_clk_factor_get_min(&sc->k); k <= aw_clk_factor_get_max(&sc->k); ) { for (m = aw_clk_factor_get_min(&sc->m); m <= aw_clk_factor_get_max(&sc->m); ) { for (p = aw_clk_factor_get_min(&sc->p); p <= aw_clk_factor_get_max(&sc->p); ) { cur = (fparent * n * k) / (m * p); if ((*fout - cur) < (*fout - best)) { best = cur; *factor_n = n; *factor_k = k; *factor_m = m; *factor_p = p; } if (best == *fout) return (best); if ((sc->p.flags & AW_CLK_FACTOR_POWER_OF_TWO) != 0) p <<= 1; else p++; } if ((sc->m.flags & AW_CLK_FACTOR_POWER_OF_TWO) != 0) m <<= 1; else m++; } if ((sc->k.flags & AW_CLK_FACTOR_POWER_OF_TWO) != 0) k <<= 1; else k++; } if ((sc->n.flags & AW_CLK_FACTOR_POWER_OF_TWO) != 0) n <<= 1; else n++; } return best; } static void aw_clk_nkmp_set_freq_scale(struct clknode *clk, struct aw_clk_nkmp_sc *sc, uint32_t factor_n, uint32_t factor_k, uint32_t factor_m, uint32_t factor_p) { uint32_t val, m, p; int retry; DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); m = aw_clk_get_factor(val, &sc->m); p = aw_clk_get_factor(val, &sc->p); if (p < factor_p) { val &= ~sc->p.mask; val |= aw_clk_factor_get_value(&sc->p, factor_p) << sc->p.shift; WRITE4(clk, sc->offset, val); DELAY(2000); } if (m < factor_m) { val &= ~sc->m.mask; val |= aw_clk_factor_get_value(&sc->m, factor_m) << sc->m.shift; WRITE4(clk, sc->offset, val); DELAY(2000); } val &= ~sc->n.mask; val &= ~sc->k.mask; val |= aw_clk_factor_get_value(&sc->n, factor_n) << sc->n.shift; val |= aw_clk_factor_get_value(&sc->k, factor_k) << sc->k.shift; WRITE4(clk, sc->offset, val); DELAY(2000); if (m > factor_m) { val &= ~sc->m.mask; val |= aw_clk_factor_get_value(&sc->m, factor_m) << sc->m.shift; WRITE4(clk, sc->offset, val); DELAY(2000); } if (p > factor_p) { val &= ~sc->p.mask; val |= aw_clk_factor_get_value(&sc->p, factor_p) << sc->p.shift; WRITE4(clk, sc->offset, val); DELAY(2000); } if ((sc->flags & AW_CLK_HAS_LOCK) != 0) { for (retry = 0; retry < sc->lock_retries; retry++) { READ4(clk, sc->offset, &val); if ((val & (1 << sc->lock_shift)) != 0) break; DELAY(1000); } } DEVICE_UNLOCK(clk); } static int aw_clk_nkmp_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout, int flags, int *stop) { struct aw_clk_nkmp_sc *sc; uint64_t best; uint32_t val, best_n, best_k, best_m, best_p; int retry; sc = clknode_get_softc(clk); best = aw_clk_nkmp_find_best(sc, fparent, fout, &best_n, &best_k, &best_m, &best_p); if ((flags & CLK_SET_DRYRUN) != 0) { *fout = best; *stop = 1; return (0); } if ((best < *fout) && ((flags & CLK_SET_ROUND_DOWN) != 0)) { *stop = 1; return (ERANGE); } if ((best > *fout) && ((flags & CLK_SET_ROUND_UP) != 0)) { *stop = 1; return (ERANGE); } if ((sc->flags & AW_CLK_SCALE_CHANGE) != 0) aw_clk_nkmp_set_freq_scale(clk, sc, best_n, best_k, best_m, best_p); else { DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); val &= ~sc->n.mask; val &= ~sc->k.mask; val &= ~sc->m.mask; val &= ~sc->p.mask; val |= aw_clk_factor_get_value(&sc->n, best_n) << sc->n.shift; val |= aw_clk_factor_get_value(&sc->k, best_k) << sc->k.shift; val |= aw_clk_factor_get_value(&sc->m, best_m) << sc->m.shift; val |= aw_clk_factor_get_value(&sc->p, best_p) << sc->p.shift; WRITE4(clk, sc->offset, val); DELAY(2000); DEVICE_UNLOCK(clk); if ((sc->flags & AW_CLK_HAS_UPDATE) != 0) { DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); val |= 1 << sc->update_shift; WRITE4(clk, sc->offset, val); DELAY(2000); DEVICE_UNLOCK(clk); } if ((sc->flags & AW_CLK_HAS_LOCK) != 0) { for (retry = 0; retry < sc->lock_retries; retry++) { READ4(clk, sc->offset, &val); if ((val & (1 << sc->lock_shift)) != 0) break; DELAY(1000); } } } *fout = best; *stop = 1; return (0); } static int aw_clk_nkmp_recalc(struct clknode *clk, uint64_t *freq) { struct aw_clk_nkmp_sc *sc; uint32_t val, m, n, k, p; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); DEVICE_UNLOCK(clk); n = aw_clk_get_factor(val, &sc->n); k = aw_clk_get_factor(val, &sc->k); m = aw_clk_get_factor(val, &sc->m); p = aw_clk_get_factor(val, &sc->p); *freq = (*freq * n * k) / (m * p); return (0); } static clknode_method_t aw_nkmp_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, aw_clk_nkmp_init), CLKNODEMETHOD(clknode_set_gate, aw_clk_nkmp_set_gate), CLKNODEMETHOD(clknode_set_mux, aw_clk_nkmp_set_mux), CLKNODEMETHOD(clknode_recalc_freq, aw_clk_nkmp_recalc), CLKNODEMETHOD(clknode_set_freq, aw_clk_nkmp_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(aw_nkmp_clknode, aw_nkmp_clknode_class, aw_nkmp_clknode_methods, sizeof(struct aw_clk_nkmp_sc), clknode_class); int aw_clk_nkmp_register(struct clkdom *clkdom, struct aw_clk_nkmp_def *clkdef) { struct clknode *clk; struct aw_clk_nkmp_sc *sc; clk = clknode_create(clkdom, &aw_nkmp_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->offset = clkdef->offset; sc->n.shift = clkdef->n.shift; sc->n.width = clkdef->n.width; sc->n.mask = ((1 << clkdef->n.width) - 1) << sc->n.shift; sc->n.value = clkdef->n.value; sc->n.flags = clkdef->n.flags; sc->k.shift = clkdef->k.shift; sc->k.width = clkdef->k.width; sc->k.mask = ((1 << clkdef->k.width) - 1) << sc->k.shift; sc->k.value = clkdef->k.value; sc->k.flags = clkdef->k.flags; sc->m.shift = clkdef->m.shift; sc->m.width = clkdef->m.width; sc->m.mask = ((1 << clkdef->m.width) - 1) << sc->m.shift; sc->m.value = clkdef->m.value; sc->m.flags = clkdef->m.flags; sc->p.shift = clkdef->p.shift; sc->p.width = clkdef->p.width; sc->p.mask = ((1 << clkdef->p.width) - 1) << sc->p.shift; sc->p.value = clkdef->p.value; sc->p.flags = clkdef->p.flags; sc->mux_shift = clkdef->mux_shift; sc->mux_mask = ((1 << clkdef->mux_width) - 1) << sc->mux_shift; sc->gate_shift = clkdef->gate_shift; sc->lock_shift = clkdef->lock_shift; sc->lock_retries = clkdef->lock_retries; sc->update_shift = clkdef->update_shift; sc->flags = clkdef->flags; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/allwinner/aw_clk_nm.c b/sys/dev/clk/allwinner/aw_clk_nm.c index 5692ce2c8b99..cb49adda8597 100644 --- a/sys/dev/clk/allwinner/aw_clk_nm.c +++ b/sys/dev/clk/allwinner/aw_clk_nm.c @@ -1,352 +1,352 @@ /*- * Copyright (c) 2017 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include -#include +#include #include #include #include "clkdev_if.h" /* * clknode for clocks matching the formula : * * clk = clkin / n / m * */ struct aw_clk_nm_sc { uint32_t offset; struct aw_clk_factor m; struct aw_clk_factor n; struct aw_clk_factor prediv; uint32_t mux_shift; uint32_t mux_mask; uint32_t gate_shift; uint32_t lock_shift; uint32_t lock_retries; uint32_t flags; }; #define WRITE4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define READ4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) static int aw_clk_nm_init(struct clknode *clk, device_t dev) { struct aw_clk_nm_sc *sc; uint32_t val, idx; sc = clknode_get_softc(clk); idx = 0; if ((sc->flags & AW_CLK_HAS_MUX) != 0) { DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); DEVICE_UNLOCK(clk); idx = (val & sc->mux_mask) >> sc->mux_shift; } clknode_init_parent_idx(clk, idx); return (0); } static int aw_clk_nm_set_gate(struct clknode *clk, bool enable) { struct aw_clk_nm_sc *sc; uint32_t val; sc = clknode_get_softc(clk); if ((sc->flags & AW_CLK_HAS_GATE) == 0) return (0); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); if (enable) val |= (1 << sc->gate_shift); else val &= ~(1 << sc->gate_shift); WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); return (0); } static int aw_clk_nm_set_mux(struct clknode *clk, int index) { struct aw_clk_nm_sc *sc; uint32_t val; sc = clknode_get_softc(clk); if ((sc->flags & AW_CLK_HAS_MUX) == 0) return (0); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); val &= ~sc->mux_mask; val |= index << sc->mux_shift; WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); return (0); } static uint64_t aw_clk_nm_find_best(struct aw_clk_nm_sc *sc, uint64_t fparent, uint64_t *fout, uint32_t *factor_n, uint32_t *factor_m) { uint64_t cur, best = 0; uint32_t m, n, max_m, max_n, min_m, min_n; *factor_n = *factor_m = 0; max_m = aw_clk_factor_get_max(&sc->m); max_n = aw_clk_factor_get_max(&sc->n); min_m = aw_clk_factor_get_min(&sc->m); min_n = aw_clk_factor_get_min(&sc->n); for (m = min_m; m <= max_m; ) { for (n = min_n; n <= max_n; ) { cur = fparent / n / m; if (clk_freq_diff(*fout, cur) < clk_freq_diff(*fout, best)) { best = cur; *factor_n = n; *factor_m = m; } if ((sc->n.flags & AW_CLK_FACTOR_POWER_OF_TWO) != 0) n <<= 1; else n++; } if ((sc->m.flags & AW_CLK_FACTOR_POWER_OF_TWO) != 0) m <<= 1; else m++; } return (best); } static int aw_clk_nm_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout, int flags, int *stop) { struct aw_clk_nm_sc *sc; struct clknode *p_clk; const char **p_names; uint64_t cur, best; uint32_t val, m, n, best_m, best_n; int p_idx, best_parent, retry; sc = clknode_get_softc(clk); best = cur = 0; best_parent = 0; if ((sc->flags & AW_CLK_REPARENT) != 0) { p_names = clknode_get_parent_names(clk); for (p_idx = 0; p_idx != clknode_get_parents_num(clk); p_idx++) { p_clk = clknode_find_by_name(p_names[p_idx]); clknode_get_freq(p_clk, &fparent); cur = aw_clk_nm_find_best(sc, fparent, fout, &n, &m); if (clk_freq_diff(*fout, cur) < clk_freq_diff(*fout, best)) { best = cur; best_parent = p_idx; best_n = n; best_m = m; } } p_idx = clknode_get_parent_idx(clk); p_clk = clknode_get_parent(clk); clknode_get_freq(p_clk, &fparent); } else { best = aw_clk_nm_find_best(sc, fparent, fout, &best_n, &best_m); } if ((flags & CLK_SET_DRYRUN) != 0) { *fout = best; *stop = 1; return (0); } if ((best < *fout) && ((flags & CLK_SET_ROUND_DOWN) == 0)) { *stop = 1; printf("best freq (%ju) < requested freq(%ju)\n", best, *fout); return (ERANGE); } if ((best > *fout) && ((flags & CLK_SET_ROUND_UP) == 0)) { *stop = 1; printf("best freq (%ju) > requested freq(%ju)\n", best, *fout); return (ERANGE); } if ((sc->flags & AW_CLK_REPARENT) != 0 && p_idx != best_parent) clknode_set_parent_by_idx(clk, best_parent); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); n = aw_clk_factor_get_value(&sc->n, best_n); m = aw_clk_factor_get_value(&sc->m, best_m); val &= ~sc->n.mask; val &= ~sc->m.mask; val |= n << sc->n.shift; val |= m << sc->m.shift; WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); if ((sc->flags & AW_CLK_HAS_LOCK) != 0) { for (retry = 0; retry < sc->lock_retries; retry++) { READ4(clk, sc->offset, &val); if ((val & (1 << sc->lock_shift)) != 0) break; DELAY(1000); } } *fout = best; *stop = 1; return (0); } static int aw_clk_nm_recalc(struct clknode *clk, uint64_t *freq) { struct aw_clk_nm_sc *sc; uint32_t val, m, n, prediv; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); DEVICE_UNLOCK(clk); m = aw_clk_get_factor(val, &sc->m); n = aw_clk_get_factor(val, &sc->n); if (sc->flags & AW_CLK_HAS_PREDIV) prediv = aw_clk_get_factor(val, &sc->prediv); else prediv = 1; *freq = *freq / prediv / n / m; return (0); } static clknode_method_t aw_nm_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, aw_clk_nm_init), CLKNODEMETHOD(clknode_set_gate, aw_clk_nm_set_gate), CLKNODEMETHOD(clknode_set_mux, aw_clk_nm_set_mux), CLKNODEMETHOD(clknode_recalc_freq, aw_clk_nm_recalc), CLKNODEMETHOD(clknode_set_freq, aw_clk_nm_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(aw_nm_clknode, aw_nm_clknode_class, aw_nm_clknode_methods, sizeof(struct aw_clk_nm_sc), clknode_class); int aw_clk_nm_register(struct clkdom *clkdom, struct aw_clk_nm_def *clkdef) { struct clknode *clk; struct aw_clk_nm_sc *sc; clk = clknode_create(clkdom, &aw_nm_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->offset = clkdef->offset; sc->m.shift = clkdef->m.shift; sc->m.width = clkdef->m.width; sc->m.mask = ((1 << sc->m.width) - 1) << sc->m.shift; sc->m.value = clkdef->m.value; sc->m.flags = clkdef->m.flags; sc->n.shift = clkdef->n.shift; sc->n.width = clkdef->n.width; sc->n.mask = ((1 << sc->n.width) - 1) << sc->n.shift; sc->n.value = clkdef->n.value; sc->n.flags = clkdef->n.flags; sc->prediv.shift = clkdef->prediv.shift; sc->prediv.width = clkdef->prediv.width; sc->prediv.mask = ((1 << sc->prediv.width) - 1) << sc->prediv.shift; sc->prediv.value = clkdef->prediv.value; sc->prediv.flags = clkdef->prediv.flags; sc->prediv.cond_shift = clkdef->prediv.cond_shift; if (clkdef->prediv.cond_width != 0) sc->prediv.cond_mask = ((1 << clkdef->prediv.cond_width) - 1) << sc->prediv.shift; else sc->prediv.cond_mask = clkdef->prediv.cond_mask; sc->prediv.cond_value = clkdef->prediv.cond_value; sc->mux_shift = clkdef->mux_shift; sc->mux_mask = ((1 << clkdef->mux_width) - 1) << sc->mux_shift; sc->gate_shift = clkdef->gate_shift; sc->lock_shift = clkdef->lock_shift; sc->lock_retries = clkdef->lock_retries; sc->flags = clkdef->flags; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/allwinner/aw_clk_nm.h b/sys/dev/clk/allwinner/aw_clk_nm.h index 10c37cbf6ec7..7fbdc3c49002 100644 --- a/sys/dev/clk/allwinner/aw_clk_nm.h +++ b/sys/dev/clk/allwinner/aw_clk_nm.h @@ -1,50 +1,50 @@ /*- * Copyright (c) 2017 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef __AW_CLK_NM_H__ #define __AW_CLK_NM_H__ -#include +#include struct aw_clk_nm_def { struct clknode_init_def clkdef; uint32_t offset; struct aw_clk_factor m; struct aw_clk_factor n; struct aw_clk_factor prediv; uint32_t mux_shift; uint32_t mux_width; uint32_t gate_shift; uint32_t lock_shift; uint32_t lock_retries; uint32_t flags; }; int aw_clk_nm_register(struct clkdom *clkdom, struct aw_clk_nm_def *clkdef); #endif /* __AW_CLK_NM_H__ */ diff --git a/sys/dev/clk/allwinner/aw_clk_nmm.c b/sys/dev/clk/allwinner/aw_clk_nmm.c index 754c313271cb..ac55c5eea182 100644 --- a/sys/dev/clk/allwinner/aw_clk_nmm.c +++ b/sys/dev/clk/allwinner/aw_clk_nmm.c @@ -1,277 +1,277 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include -#include +#include #include #include #include "clkdev_if.h" /* * clknode for clocks matching the formula : * * clk = clkin * n / m0 / m1 * */ struct aw_clk_nmm_sc { uint32_t offset; struct aw_clk_factor n; struct aw_clk_factor m0; struct aw_clk_factor m1; uint32_t gate_shift; uint32_t lock_shift; uint32_t lock_retries; uint32_t flags; }; #define WRITE4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define READ4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) static int aw_clk_nmm_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static int aw_clk_nmm_set_gate(struct clknode *clk, bool enable) { struct aw_clk_nmm_sc *sc; uint32_t val; sc = clknode_get_softc(clk); if ((sc->flags & AW_CLK_HAS_GATE) == 0) return (0); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); if (enable) val |= (1 << sc->gate_shift); else val &= ~(1 << sc->gate_shift); WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); return (0); } static uint64_t aw_clk_nmm_find_best(struct aw_clk_nmm_sc *sc, uint64_t fparent, uint64_t *fout, uint32_t *factor_n, uint32_t *factor_m0, uint32_t *factor_m1) { uint64_t cur, best; uint32_t n, m0, m1; uint32_t max_n, max_m0, max_m1; uint32_t min_n, min_m0, min_m1; *factor_n = *factor_m0 = *factor_m1 = 0; max_n = aw_clk_factor_get_max(&sc->n); min_n = aw_clk_factor_get_min(&sc->n); max_m0 = aw_clk_factor_get_max(&sc->m0); min_m0 = aw_clk_factor_get_min(&sc->m0); max_m1 = aw_clk_factor_get_max(&sc->m1); min_m1 = aw_clk_factor_get_min(&sc->m1); for (m0 = min_m0; m0 <= max_m0; ) { for (m1 = min_m1; m1 <= max_m1; ) { for (n = min_n; n <= max_n; ) { cur = fparent * n / m0 / m1; if (abs(*fout - cur) < abs(*fout - best)) { best = cur; *factor_n = n; *factor_m0 = m0; *factor_m1 = m1; } n++; } m1++; } m0++; } return (best); } static int aw_clk_nmm_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout, int flags, int *stop) { struct aw_clk_nmm_sc *sc; uint64_t cur, best; uint32_t val, n, m0, m1, best_n, best_m0, best_m1; int retry; sc = clknode_get_softc(clk); best = cur = 0; best = aw_clk_nmm_find_best(sc, fparent, fout, &best_n, &best_m0, &best_m1); if ((flags & CLK_SET_DRYRUN) != 0) { *fout = best; *stop = 1; return (0); } if ((best < *fout) && ((flags & CLK_SET_ROUND_DOWN) == 0)) { *stop = 1; return (ERANGE); } if ((best > *fout) && ((flags & CLK_SET_ROUND_UP) == 0)) { *stop = 1; return (ERANGE); } DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); n = aw_clk_factor_get_value(&sc->n, best_n); m0 = aw_clk_factor_get_value(&sc->m0, best_m0); m1 = aw_clk_factor_get_value(&sc->m1, best_m1); val &= ~sc->n.mask; val &= ~sc->m0.mask; val &= ~sc->m1.mask; val |= n << sc->n.shift; val |= m0 << sc->m0.shift; val |= m1 << sc->m1.shift; WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); if ((sc->flags & AW_CLK_HAS_LOCK) != 0) { for (retry = 0; retry < sc->lock_retries; retry++) { READ4(clk, sc->offset, &val); if ((val & (1 << sc->lock_shift)) != 0) break; DELAY(1000); } } *fout = best; *stop = 1; return (0); } static int aw_clk_nmm_recalc(struct clknode *clk, uint64_t *freq) { struct aw_clk_nmm_sc *sc; uint32_t val, n, m0, m1; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); DEVICE_UNLOCK(clk); n = aw_clk_get_factor(val, &sc->n); m0 = aw_clk_get_factor(val, &sc->m0); m1 = aw_clk_get_factor(val, &sc->m1); *freq = *freq * n / m0 / m1; return (0); } static clknode_method_t aw_nmm_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, aw_clk_nmm_init), CLKNODEMETHOD(clknode_set_gate, aw_clk_nmm_set_gate), CLKNODEMETHOD(clknode_recalc_freq, aw_clk_nmm_recalc), CLKNODEMETHOD(clknode_set_freq, aw_clk_nmm_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(aw_nmm_clknode, aw_nmm_clknode_class, aw_nmm_clknode_methods, sizeof(struct aw_clk_nmm_sc), clknode_class); int aw_clk_nmm_register(struct clkdom *clkdom, struct aw_clk_nmm_def *clkdef) { struct clknode *clk; struct aw_clk_nmm_sc *sc; clk = clknode_create(clkdom, &aw_nmm_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->offset = clkdef->offset; sc->n.shift = clkdef->n.shift; sc->n.width = clkdef->n.width; sc->n.mask = ((1 << sc->n.width) - 1) << sc->n.shift; sc->n.value = clkdef->n.value; sc->n.flags = clkdef->n.flags; sc->m0.shift = clkdef->m0.shift; sc->m0.width = clkdef->m0.width; sc->m0.mask = ((1 << sc->m0.width) - 1) << sc->m0.shift; sc->m0.value = clkdef->m0.value; sc->m0.flags = clkdef->m0.flags; sc->m1.shift = clkdef->m1.shift; sc->m1.width = clkdef->m1.width; sc->m1.mask = ((1 << sc->m1.width) - 1) << sc->m1.shift; sc->m1.value = clkdef->m1.value; sc->m1.flags = clkdef->m1.flags; sc->gate_shift = clkdef->gate_shift; sc->lock_shift = clkdef->lock_shift; sc->lock_retries = clkdef->lock_retries; sc->flags = clkdef->flags; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/allwinner/aw_clk_nmm.h b/sys/dev/clk/allwinner/aw_clk_nmm.h index 7b95ec3e23c8..7100ee72ef4e 100644 --- a/sys/dev/clk/allwinner/aw_clk_nmm.h +++ b/sys/dev/clk/allwinner/aw_clk_nmm.h @@ -1,50 +1,50 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef __AW_CLK_NMM_H__ #define __AW_CLK_NMM_H__ -#include +#include struct aw_clk_nmm_def { struct clknode_init_def clkdef; uint32_t offset; struct aw_clk_factor n; struct aw_clk_factor m0; struct aw_clk_factor m1; uint32_t gate_shift; uint32_t lock_shift; uint32_t lock_retries; uint32_t flags; }; int aw_clk_nmm_register(struct clkdom *clkdom, struct aw_clk_nmm_def *clkdef); #endif /* __AW_CLK_NMM_H__ */ diff --git a/sys/dev/clk/allwinner/aw_clk_np.c b/sys/dev/clk/allwinner/aw_clk_np.c index a06b0fa16796..bca81eb3e937 100644 --- a/sys/dev/clk/allwinner/aw_clk_np.c +++ b/sys/dev/clk/allwinner/aw_clk_np.c @@ -1,259 +1,259 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include -#include +#include #include #include #include "clkdev_if.h" /* * clknode for clocks matching the formula : * * clk = clkin * n / p * */ struct aw_clk_np_sc { uint32_t offset; struct aw_clk_factor n; struct aw_clk_factor p; uint32_t gate_shift; uint32_t lock_shift; uint32_t lock_retries; uint32_t flags; }; #define WRITE4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define READ4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) static int aw_clk_np_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static int aw_clk_np_set_gate(struct clknode *clk, bool enable) { struct aw_clk_np_sc *sc; uint32_t val; sc = clknode_get_softc(clk); if ((sc->flags & AW_CLK_HAS_GATE) == 0) return (0); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); if (enable) val |= (1 << sc->gate_shift); else val &= ~(1 << sc->gate_shift); WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); return (0); } static uint64_t aw_clk_np_find_best(struct aw_clk_np_sc *sc, uint64_t fparent, uint64_t *fout, uint32_t *factor_n, uint32_t *factor_p) { uint64_t cur, best; uint32_t n, p, max_n, max_p, min_n, min_p; *factor_n = *factor_p = 0; max_n = aw_clk_factor_get_max(&sc->n); max_p = aw_clk_factor_get_max(&sc->p); min_n = aw_clk_factor_get_min(&sc->n); min_p = aw_clk_factor_get_min(&sc->p); for (p = min_p; p <= max_p; ) { for (n = min_n; n <= max_n; ) { cur = fparent * n / p; if (abs(*fout - cur) < abs(*fout - best)) { best = cur; *factor_n = n; *factor_p = p; } n++; } p++; } return (best); } static int aw_clk_np_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout, int flags, int *stop) { struct aw_clk_np_sc *sc; uint64_t cur, best; uint32_t val, n, p, best_n, best_p; int retry; sc = clknode_get_softc(clk); best = cur = 0; best = aw_clk_np_find_best(sc, fparent, fout, &best_n, &best_p); if ((flags & CLK_SET_DRYRUN) != 0) { *fout = best; *stop = 1; return (0); } if ((best < *fout) && ((flags & CLK_SET_ROUND_DOWN) == 0)) { *stop = 1; return (ERANGE); } if ((best > *fout) && ((flags & CLK_SET_ROUND_UP) == 0)) { *stop = 1; return (ERANGE); } DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); n = aw_clk_factor_get_value(&sc->n, best_n); p = aw_clk_factor_get_value(&sc->p, best_p); val &= ~sc->n.mask; val &= ~sc->p.mask; val |= n << sc->n.shift; val |= p << sc->p.shift; WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); if ((sc->flags & AW_CLK_HAS_LOCK) != 0) { for (retry = 0; retry < sc->lock_retries; retry++) { READ4(clk, sc->offset, &val); if ((val & (1 << sc->lock_shift)) != 0) break; DELAY(1000); } } *fout = best; *stop = 1; return (0); } static int aw_clk_np_recalc(struct clknode *clk, uint64_t *freq) { struct aw_clk_np_sc *sc; uint32_t val, n, p; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); DEVICE_UNLOCK(clk); n = aw_clk_get_factor(val, &sc->n); p = aw_clk_get_factor(val, &sc->p); *freq = *freq * n / p; return (0); } static clknode_method_t aw_np_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, aw_clk_np_init), CLKNODEMETHOD(clknode_set_gate, aw_clk_np_set_gate), CLKNODEMETHOD(clknode_recalc_freq, aw_clk_np_recalc), CLKNODEMETHOD(clknode_set_freq, aw_clk_np_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(aw_np_clknode, aw_np_clknode_class, aw_np_clknode_methods, sizeof(struct aw_clk_np_sc), clknode_class); int aw_clk_np_register(struct clkdom *clkdom, struct aw_clk_np_def *clkdef) { struct clknode *clk; struct aw_clk_np_sc *sc; clk = clknode_create(clkdom, &aw_np_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->offset = clkdef->offset; sc->n.shift = clkdef->n.shift; sc->n.width = clkdef->n.width; sc->n.mask = ((1 << sc->n.width) - 1) << sc->n.shift; sc->n.value = clkdef->n.value; sc->n.flags = clkdef->n.flags; sc->p.shift = clkdef->p.shift; sc->p.width = clkdef->p.width; sc->p.mask = ((1 << sc->p.width) - 1) << sc->p.shift; sc->p.value = clkdef->p.value; sc->p.flags = clkdef->p.flags; sc->gate_shift = clkdef->gate_shift; sc->lock_shift = clkdef->lock_shift; sc->lock_retries = clkdef->lock_retries; sc->flags = clkdef->flags; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/allwinner/aw_clk_np.h b/sys/dev/clk/allwinner/aw_clk_np.h index d91bcd0dbb1f..bffa61cafe1a 100644 --- a/sys/dev/clk/allwinner/aw_clk_np.h +++ b/sys/dev/clk/allwinner/aw_clk_np.h @@ -1,49 +1,49 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef __AW_CLK_NP_H__ #define __AW_CLK_NP_H__ -#include +#include struct aw_clk_np_def { struct clknode_init_def clkdef; uint32_t offset; struct aw_clk_factor n; struct aw_clk_factor p; uint32_t gate_shift; uint32_t lock_shift; uint32_t lock_retries; uint32_t flags; }; int aw_clk_np_register(struct clkdom *clkdom, struct aw_clk_np_def *clkdef); #endif /* __AW_CLK_NP_H__ */ diff --git a/sys/dev/clk/allwinner/aw_clk_prediv_mux.c b/sys/dev/clk/allwinner/aw_clk_prediv_mux.c index 3a64726ca776..5bcc06b37c8f 100644 --- a/sys/dev/clk/allwinner/aw_clk_prediv_mux.c +++ b/sys/dev/clk/allwinner/aw_clk_prediv_mux.c @@ -1,178 +1,178 @@ /*- * Copyright (c) 2017 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include -#include +#include #include #include #include "clkdev_if.h" /* * clknode for clocks matching the formula : * * clk = clkin / prediv / div * * and where prediv is conditional * */ struct aw_clk_prediv_mux_sc { uint32_t offset; uint32_t mux_shift; uint32_t mux_mask; struct aw_clk_factor div; struct aw_clk_factor prediv; uint32_t flags; }; #define WRITE4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define READ4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define MODIFY4(_clk, off, clr, set ) \ CLKDEV_MODIFY_4(clknode_get_device(_clk), off, clr, set) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) static int aw_clk_prediv_mux_init(struct clknode *clk, device_t dev) { struct aw_clk_prediv_mux_sc *sc; uint32_t val; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); DEVICE_UNLOCK(clk); /* Init the current parent */ val = (val & sc->mux_mask) >> sc->mux_shift; clknode_init_parent_idx(clk, val); return (0); } static int aw_clk_prediv_mux_set_mux(struct clknode *clk, int index) { struct aw_clk_prediv_mux_sc *sc; uint32_t val; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); val &= ~sc->mux_mask; val |= index << sc->mux_shift; WRITE4(clk, sc->offset, val); DEVICE_UNLOCK(clk); return (0); } static int aw_clk_prediv_mux_recalc(struct clknode *clk, uint64_t *freq) { struct aw_clk_prediv_mux_sc *sc; uint32_t val, div, prediv; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->offset, &val); DEVICE_UNLOCK(clk); div = aw_clk_get_factor(val, &sc->div); prediv = aw_clk_get_factor(val, &sc->prediv); *freq = *freq / prediv / div; return (0); } static clknode_method_t aw_prediv_mux_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, aw_clk_prediv_mux_init), CLKNODEMETHOD(clknode_set_mux, aw_clk_prediv_mux_set_mux), CLKNODEMETHOD(clknode_recalc_freq, aw_clk_prediv_mux_recalc), CLKNODEMETHOD_END }; DEFINE_CLASS_1(aw_prediv_mux_clknode, aw_prediv_mux_clknode_class, aw_prediv_mux_clknode_methods, sizeof(struct aw_clk_prediv_mux_sc), clknode_class); int aw_clk_prediv_mux_register(struct clkdom *clkdom, struct aw_clk_prediv_mux_def *clkdef) { struct clknode *clk; struct aw_clk_prediv_mux_sc *sc; clk = clknode_create(clkdom, &aw_prediv_mux_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->offset = clkdef->offset; sc->mux_shift = clkdef->mux_shift; sc->mux_mask = ((1 << clkdef->mux_width) - 1) << sc->mux_shift; sc->div.shift = clkdef->div.shift; sc->div.mask = ((1 << clkdef->div.width) - 1) << sc->div.shift; sc->div.value = clkdef->div.value; sc->div.cond_shift = clkdef->div.cond_shift; sc->div.cond_mask = ((1 << clkdef->div.cond_width) - 1) << sc->div.shift; sc->div.cond_value = clkdef->div.cond_value; sc->div.flags = clkdef->div.flags; sc->prediv.shift = clkdef->prediv.shift; sc->prediv.mask = ((1 << clkdef->prediv.width) - 1) << sc->prediv.shift; sc->prediv.value = clkdef->prediv.value; sc->prediv.cond_shift = clkdef->prediv.cond_shift; if (clkdef->prediv.cond_width != 0) sc->prediv.cond_mask = ((1 << clkdef->prediv.cond_width) - 1) << sc->prediv.shift; else sc->prediv.cond_mask = clkdef->prediv.cond_mask; sc->prediv.cond_value = clkdef->prediv.cond_value; sc->prediv.flags = clkdef->prediv.flags; sc->flags = clkdef->flags; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/allwinner/ccu_a10.c b/sys/dev/clk/allwinner/ccu_a10.c index 0a14583a9f04..491cb4b28d3b 100644 --- a/sys/dev/clk/allwinner/ccu_a10.c +++ b/sys/dev/clk/allwinner/ccu_a10.c @@ -1,618 +1,618 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Kyle Evans * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include -#include -#include +#include +#include +#include #include #include #include #include /* Non-exported resets */ /* Non-exported clocks */ #define CLK_PLL_CORE 2 #define CLK_AXI 3 #define CLK_AHB 4 #define CLK_APB0 5 #define CLK_APB1 6 #define CLK_PLL_VIDEO0 8 #define CLK_PLL_DDR 12 #define CLK_PLL_DDR_OTHER 13 #define CLK_PLL6 14 #define CLK_PLL_PERIPH 15 #define CLK_PLL_SATA 16 #define CLK_PLL_VIDEO1 17 /* Non-exported fixed clocks */ static struct aw_ccung_reset a10_ccu_resets[] = { CCU_RESET(RST_USB_PHY0, 0xcc, 0) CCU_RESET(RST_USB_PHY1, 0xcc, 1) CCU_RESET(RST_USB_PHY2, 0xcc, 2) CCU_RESET(RST_GPS, 0xd0, 0) CCU_RESET(RST_DE_BE0, 0x104, 30) CCU_RESET(RST_DE_BE1, 0x108, 30) CCU_RESET(RST_DE_FE0, 0x10c, 30) CCU_RESET(RST_DE_FE1, 0x110, 30) CCU_RESET(RST_DE_MP, 0x114, 30) CCU_RESET(RST_TVE0, 0x118, 29) CCU_RESET(RST_TCON0, 0x118, 30) CCU_RESET(RST_TVE1, 0x11c, 29) CCU_RESET(RST_TCON1, 0x11c, 30) CCU_RESET(RST_CSI0, 0x134, 30) CCU_RESET(RST_CSI1, 0x138, 30) CCU_RESET(RST_VE, 0x13c, 0) CCU_RESET(RST_ACE, 0x148, 16) CCU_RESET(RST_LVDS, 0x14c, 0) CCU_RESET(RST_GPU, 0x154, 30) CCU_RESET(RST_HDMI_H, 0x170, 0) CCU_RESET(RST_HDMI_SYS, 0x170, 1) CCU_RESET(RST_HDMI_AUDIO_DMA, 0x170, 2) }; static struct aw_ccung_gate a10_ccu_gates[] = { CCU_GATE(CLK_HOSC, "hosc", "osc24M", 0x50, 0) CCU_GATE(CLK_AHB_OTG, "ahb-otg", "ahb", 0x60, 0) CCU_GATE(CLK_AHB_EHCI0, "ahb-ehci0", "ahb", 0x60, 1) CCU_GATE(CLK_AHB_OHCI0, "ahb-ohci0", "ahb", 0x60, 2) CCU_GATE(CLK_AHB_EHCI1, "ahb-ehci1", "ahb", 0x60, 3) CCU_GATE(CLK_AHB_OHCI1, "ahb-ohci1", "ahb", 0x60, 4) CCU_GATE(CLK_AHB_SS, "ahb-ss", "ahb", 0x60, 5) CCU_GATE(CLK_AHB_DMA, "ahb-dma", "ahb", 0x60, 6) CCU_GATE(CLK_AHB_BIST, "ahb-bist", "ahb", 0x60, 7) CCU_GATE(CLK_AHB_MMC0, "ahb-mmc0", "ahb", 0x60, 8) CCU_GATE(CLK_AHB_MMC1, "ahb-mmc1", "ahb", 0x60, 9) CCU_GATE(CLK_AHB_MMC2, "ahb-mmc2", "ahb", 0x60, 10) CCU_GATE(CLK_AHB_MMC3, "ahb-mmc3", "ahb", 0x60, 11) CCU_GATE(CLK_AHB_MS, "ahb-ms", "ahb", 0x60, 12) CCU_GATE(CLK_AHB_NAND, "ahb-nand", "ahb", 0x60, 13) CCU_GATE(CLK_AHB_SDRAM, "ahb-sdram", "ahb", 0x60, 14) CCU_GATE(CLK_AHB_ACE, "ahb-ace", "ahb", 0x60, 16) CCU_GATE(CLK_AHB_EMAC, "ahb-emac", "ahb", 0x60, 17) CCU_GATE(CLK_AHB_TS, "ahb-ts", "ahb", 0x60, 18) CCU_GATE(CLK_AHB_SPI0, "ahb-spi0", "ahb", 0x60, 20) CCU_GATE(CLK_AHB_SPI1, "ahb-spi1", "ahb", 0x60, 21) CCU_GATE(CLK_AHB_SPI2, "ahb-spi2", "ahb", 0x60, 22) CCU_GATE(CLK_AHB_SPI3, "ahb-spi3", "ahb", 0x60, 23) CCU_GATE(CLK_AHB_SATA, "ahb-sata", "ahb", 0x60, 25) CCU_GATE(CLK_AHB_VE, "ahb-ve", "ahb", 0x64, 0) CCU_GATE(CLK_AHB_TVD, "ahb-tvd", "ahb", 0x64, 1) CCU_GATE(CLK_AHB_TVE0, "ahb-tve0", "ahb", 0x64, 2) CCU_GATE(CLK_AHB_TVE1, "ahb-tve1", "ahb", 0x64, 3) CCU_GATE(CLK_AHB_LCD0, "ahb-lcd0", "ahb", 0x64, 4) CCU_GATE(CLK_AHB_LCD1, "ahb-lcd1", "ahb", 0x64, 5) CCU_GATE(CLK_AHB_CSI0, "ahb-csi0", "ahb", 0x64, 8) CCU_GATE(CLK_AHB_CSI1, "ahb-csi1", "ahb", 0x64, 9) CCU_GATE(CLK_AHB_HDMI1, "ahb-hdmi1", "ahb", 0x64, 10) CCU_GATE(CLK_AHB_HDMI0, "ahb-hdmi0", "ahb", 0x64, 11) CCU_GATE(CLK_AHB_DE_BE0, "ahb-de_be0", "ahb", 0x64, 12) CCU_GATE(CLK_AHB_DE_BE1, "ahb-de_be1", "ahb", 0x64, 13) CCU_GATE(CLK_AHB_DE_FE0, "ahb-de_fe0", "ahb", 0x64, 14) CCU_GATE(CLK_AHB_DE_FE1, "ahb-de_fe1", "ahb", 0x64, 15) CCU_GATE(CLK_AHB_GMAC, "ahb-gmac", "ahb", 0x64, 17) CCU_GATE(CLK_AHB_MP, "ahb-mp", "ahb", 0x64, 18) CCU_GATE(CLK_AHB_GPU, "ahb-gpu", "ahb", 0x64, 20) CCU_GATE(CLK_APB0_CODEC, "apb0-codec", "apb0", 0x68, 0) CCU_GATE(CLK_APB0_SPDIF, "apb0-spdif", "apb0", 0x68, 1) CCU_GATE(CLK_APB0_AC97, "apb0-ac97", "apb0", 0x68, 2) CCU_GATE(CLK_APB0_I2S0, "apb0-i2s0", "apb0", 0x68, 3) CCU_GATE(CLK_APB0_I2S1, "apb0-i2s1", "apb0", 0x68, 4) CCU_GATE(CLK_APB0_PIO, "apb0-pi0", "apb0", 0x68, 5) CCU_GATE(CLK_APB0_IR0, "apb0-ir0", "apb0", 0x68, 6) CCU_GATE(CLK_APB0_IR1, "apb0-ir1", "apb0", 0x68, 7) CCU_GATE(CLK_APB0_I2S2, "apb0-i2s2", "apb0",0x68, 8) CCU_GATE(CLK_APB0_KEYPAD, "apb0-keypad", "apb0", 0x68, 10) CCU_GATE(CLK_APB1_I2C0, "apb1-i2c0", "apb1", 0x6c, 0) CCU_GATE(CLK_APB1_I2C1, "apb1-i2c1", "apb1",0x6c, 1) CCU_GATE(CLK_APB1_I2C2, "apb1-i2c2", "apb1",0x6c, 2) CCU_GATE(CLK_APB1_I2C3, "apb1-i2c3", "apb1",0x6c, 3) CCU_GATE(CLK_APB1_CAN, "apb1-can", "apb1",0x6c, 4) CCU_GATE(CLK_APB1_SCR, "apb1-scr", "apb1",0x6c, 5) CCU_GATE(CLK_APB1_PS20, "apb1-ps20", "apb1",0x6c, 6) CCU_GATE(CLK_APB1_PS21, "apb1-ps21", "apb1",0x6c, 7) CCU_GATE(CLK_APB1_I2C4, "apb1-i2c4", "apb1", 0x6c, 15) CCU_GATE(CLK_APB1_UART0, "apb1-uart0", "apb1",0x6c, 16) CCU_GATE(CLK_APB1_UART1, "apb1-uart1", "apb1",0x6c, 17) CCU_GATE(CLK_APB1_UART2, "apb1-uart2", "apb1",0x6c, 18) CCU_GATE(CLK_APB1_UART3, "apb1-uart3", "apb1",0x6c, 19) CCU_GATE(CLK_APB1_UART4, "apb1-uart4", "apb1",0x6c, 20) CCU_GATE(CLK_APB1_UART5, "apb1-uart5", "apb1",0x6c, 21) CCU_GATE(CLK_APB1_UART6, "apb1-uart6", "apb1",0x6c, 22) CCU_GATE(CLK_APB1_UART7, "apb1-uart7", "apb1",0x6c, 23) CCU_GATE(CLK_USB_OHCI0, "usb-ohci0", "ahb", 0xcc, 6) CCU_GATE(CLK_USB_OHCI1, "usb-ohci1", "ahb", 0xcc, 7) CCU_GATE(CLK_USB_PHY, "usb-phy", "ahb", 0xcc, 8) CCU_GATE(CLK_DRAM_VE, "dram-ve", "pll_ddr", 0x100, 0) CCU_GATE(CLK_DRAM_CSI0, "dram-csi0", "pll_ddr", 0x100, 1) CCU_GATE(CLK_DRAM_CSI1, "dram-csi1", "pll_ddr", 0x100, 2) CCU_GATE(CLK_DRAM_TS, "dram-ts", "pll_ddr", 0x100, 3) CCU_GATE(CLK_DRAM_TVD, "dram-tvd", "pll_ddr", 0x100, 4) CCU_GATE(CLK_DRAM_TVE0, "dram-tve0", "pll_ddr", 0x100, 5) CCU_GATE(CLK_DRAM_TVE1, "dram-tve1", "pll_ddr", 0x100, 6) CCU_GATE(CLK_DRAM_OUT, "dram-out", "pll_ddr", 0x100, 15) CCU_GATE(CLK_DRAM_DE_FE1, "dram-de_fe1", "pll_ddr", 0x100, 24) CCU_GATE(CLK_DRAM_DE_FE0, "dram-de_fe0", "pll_ddr", 0x100, 25) CCU_GATE(CLK_DRAM_DE_BE0, "dram-de_be0", "pll_ddr", 0x100, 26) CCU_GATE(CLK_DRAM_DE_BE1, "dram-de_be1", "pll_ddr", 0x100, 27) CCU_GATE(CLK_DRAM_MP, "dram-de_mp", "pll_ddr", 0x100, 28) CCU_GATE(CLK_DRAM_ACE, "dram-ace", "pll_ddr", 0x100, 29) }; static const char *pll_parents[] = {"osc24M"}; NKMP_CLK(pll_core_clk, CLK_PLL_CORE, /* id */ "pll_core", pll_parents, /* name, parents */ 0x00, /* offset */ 8, 5, 0, AW_CLK_FACTOR_ZERO_IS_ONE, /* n factor */ 4, 2, 0, 0, /* k factor */ 0, 2, 0, 0, /* m factor */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* p factor */ 31, /* gate */ 0, 0, /* lock */ AW_CLK_HAS_GATE); /* flags */ FRAC_CLK(pll_video0_clk, CLK_PLL_VIDEO0, /* id */ "pll_video0", pll_parents, /* name, parents */ 0x10, /* offset */ 0, 7, 0, 0, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* m factor (fake) */ 31, 0, 0, /* gate, lock, lock retries */ AW_CLK_HAS_GATE, /* flags */ 270000000, 297000000, /* freq0, freq1 */ 15, 14, /* mode sel, freq sel */ 27000000, 381000000); /* min freq, max freq */ static const char *pll_video0_2x_parents[] = {"pll_video0"}; FIXED_CLK(pll_video0_2x_clk, CLK_PLL_VIDEO0_2X, /* id */ "pll_video0-2x", pll_video0_2x_parents, /* name, parents */ 0, /* freq */ 2, /* mult */ 1, /* div */ 0); /* flags */ FRAC_CLK(pll_video1_clk, CLK_PLL_VIDEO1, /* id */ "pll_video1", pll_parents, /* name, parents */ 0x30, /* offset */ 0, 7, 0, 0, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* m factor (fake) */ 31, 0, 0, /* gate, lock, lock retries */ AW_CLK_HAS_GATE, /* flags */ 270000000, 297000000, /* freq0, freq1 */ 15, 14, /* mode sel, freq sel */ 27000000, 381000000); /* min freq, max freq */ static const char *pll_video1_2x_parents[] = {"pll_video1"}; FIXED_CLK(pll_video1_2x_clk, CLK_PLL_VIDEO1_2X, /* id */ "pll_video1-2x", pll_video1_2x_parents, /* name, parents */ 0, /* freq */ 2, /* mult */ 1, /* div */ 0); /* flags */ static const char *cpu_parents[] = {"osc32k", "osc24M", "pll_core", "pll_periph"}; static const char *axi_parents[] = {"cpu"}; static const char *ahb_parents[] = {"axi", "pll_periph", "pll6"}; static const char *apb0_parents[] = {"ahb"}; static const char *apb1_parents[] = {"osc24M", "pll_periph", "osc32k"}; MUX_CLK(cpu_clk, CLK_CPU, /* id */ "cpu", cpu_parents, /* name, parents */ 0x54, 16, 2); /* offset, shift, width */ NM_CLK(axi_clk, CLK_AXI, /* id */ "axi", axi_parents, /* name, parents */ 0x54, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 2, 0, 0, /* m factor */ 0, 0, /* mux */ 0, /* gate */ 0); /* flags */ NM_CLK(ahb_clk, CLK_AHB, /* id */ "ahb", ahb_parents, /* name, parents */ 0x54, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 4, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* m factor */ 6, 2, /* mux */ 0, /* gate */ AW_CLK_HAS_MUX); /* flags */ NM_CLK(apb0_clk, CLK_APB0, /* id */ "apb0", apb0_parents, /* name, parents */ 0x54, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO | AW_CLK_FACTOR_ZERO_IS_ONE, /* m factor */ 0, 0, /* mux */ 0, /* gate */ 0); /* flags */ NM_CLK(apb1_clk, CLK_APB1, /* id */ "apb1", apb1_parents, /* name, parents */ 0x58, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 5, 0, 0, /* m factor */ 24, 2, /* mux */ 0, /* gate */ AW_CLK_HAS_MUX); /* flags */ NKMP_CLK(pll_ddr_other_clk, CLK_PLL_DDR_OTHER, /* id */ "pll_ddr_other", pll_parents, /* name, parents */ 0x20, /* offset */ 8, 5, 0, AW_CLK_FACTOR_ZERO_BASED, /* n factor */ 4, 2, 0, 0, /* k factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* m factor (fake) */ 2, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* p factor */ 31, /* gate */ 0, 0, /* lock */ AW_CLK_HAS_GATE); /* flags */ NKMP_CLK(pll_ddr_clk, CLK_PLL_DDR, /* id */ "pll_ddr", pll_parents, /* name, parents */ 0x20, /* offset */ 8, 5, 0, AW_CLK_FACTOR_ZERO_BASED, /* n factor */ 4, 2, 0, 0, /* k factor */ 0, 2, 0, 0, /* m factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* p factor (fake) */ 31, /* gate */ 0, 0, /* lock */ AW_CLK_HAS_GATE); /* flags */ NKMP_CLK(pll6_clk, CLK_PLL6, /* id */ "pll6", pll_parents, /* name, parents */ 0x28, /* offset */ 8, 5, 0, AW_CLK_FACTOR_ZERO_BASED, /* n factor */ 4, 2, 0, 0, /* k factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* m factor (fake) */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* p factor (fake) */ 31, /* gate */ 0, 0, /* lock */ AW_CLK_HAS_GATE); /* flags */ static const char *pll6_parents[] = {"pll6"}; FIXED_CLK(pll_periph_clk, CLK_PLL_PERIPH, /* id */ "pll_periph", pll6_parents, /* name, parents */ 0, /* freq */ 1, /* mult */ 2, /* div */ 0); /* flags */ NKMP_CLK(pll_periph_sata_clk, CLK_PLL_SATA, /* id */ "pll_periph_sata", pll6_parents, /* name, parents */ 0x28, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* k factor (fake) */ 0, 2, 0, 0, /* m factor */ 0, 0, 6, AW_CLK_FACTOR_FIXED, /* p factor (fake, 6) */ 14, /* gate */ 0, 0, /* lock */ AW_CLK_HAS_GATE); /* flags */ static const char *mod_parents[] = {"osc24M", "pll_periph", "pll_ddr_other"}; NM_CLK(nand_clk, CLK_NAND, /* id */ "nand", mod_parents, /* name, parents */ 0x80, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(ms_clk, CLK_MS, /* id */ "ms", mod_parents, /* name, parents */ 0x84, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(mmc0_clk, CLK_MMC0, /* id */ "mmc0", mod_parents, /* name, parents */ 0x88, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT); /* flags */ NM_CLK(mmc1_clk, CLK_MMC1, /* id */ "mmc1", mod_parents, /* name, parents */ 0x8c, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT); /* flags */ NM_CLK(mmc2_clk, CLK_MMC2, /* id */ "mmc2", mod_parents, /* name, parents */ 0x90, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT); /* flags */ NM_CLK(mmc3_clk, CLK_MMC3, /* id */ "mmc3", mod_parents, /* name, parents */ 0x94, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT); /* flags */ NM_CLK(ts_clk, CLK_TS, /* id */ "ts", mod_parents, /* name, parents */ 0x94, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(ss_clk, CLK_SS, /* id */ "ss", mod_parents, /* name, parents */ 0x9c, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(spi0_clk, CLK_SPI0, /* id */ "spi0", mod_parents, /* name, parents */ 0xa0, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(spi1_clk, CLK_SPI1, /* id */ "spi1", mod_parents, /* name, parents */ 0xa4, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(spi2_clk, CLK_SPI2, /* id */ "spi2", mod_parents, /* name, parents */ 0xa8, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ /* MISSING CLK_PATA */ NM_CLK(ir0_clk, CLK_IR0, /* id */ "ir0", mod_parents, /* name, parents */ 0xb0, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(ir1_clk, CLK_IR1, /* id */ "ir1", mod_parents, /* name, parents */ 0xb4, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ /* MISSING CLK_I2S0, CLK_AC97, CLK_SPDIF */ static const char *keypad_parents[] = {"osc24M", "osc24M", "osc32k"}; NM_CLK(keypad_clk, CLK_KEYPAD, /* id */ "keypad", keypad_parents, /* name, parents */ 0xc4, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 5, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *sata_parents[] = {"pll_periph_sata", "osc32k"}; NM_CLK(sata_clk, CLK_SATA, /* id */ "sata", sata_parents, /* name, parents */ 0xc8, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* m factor (fake) */ 24, 1, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(spi3_clk, CLK_SPI3, /* id */ "spi3", mod_parents, /* name, parents */ 0xd4, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ /* MISSING CLK_I2S1, CLK_I2S2, DE Clocks */ static struct aw_ccung_clk a10_ccu_clks[] = { { .type = AW_CLK_NKMP, .clk.nkmp = &pll_core_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_ddr_other_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_ddr_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll6_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_periph_sata_clk}, { .type = AW_CLK_NM, .clk.nm = &axi_clk}, { .type = AW_CLK_NM, .clk.nm = &ahb_clk}, { .type = AW_CLK_NM, .clk.nm = &apb0_clk}, { .type = AW_CLK_NM, .clk.nm = &apb1_clk}, { .type = AW_CLK_FRAC, .clk.frac = &pll_video0_clk}, { .type = AW_CLK_FRAC, .clk.frac = &pll_video1_clk}, { .type = AW_CLK_NM, .clk.nm = &nand_clk}, { .type = AW_CLK_NM, .clk.nm = &ms_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc0_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc1_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc2_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc3_clk}, { .type = AW_CLK_NM, .clk.nm = &ts_clk}, { .type = AW_CLK_NM, .clk.nm = &ss_clk}, { .type = AW_CLK_NM, .clk.nm = &spi0_clk}, { .type = AW_CLK_NM, .clk.nm = &spi1_clk}, { .type = AW_CLK_NM, .clk.nm = &spi2_clk}, { .type = AW_CLK_NM, .clk.nm = &ir0_clk}, { .type = AW_CLK_NM, .clk.nm = &ir1_clk}, { .type = AW_CLK_NM, .clk.nm = &keypad_clk}, { .type = AW_CLK_NM, .clk.nm = &sata_clk}, { .type = AW_CLK_NM, .clk.nm = &spi3_clk}, { .type = AW_CLK_MUX, .clk.mux = &cpu_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_periph_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_video0_2x_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_video1_2x_clk}, }; static struct aw_clk_init a10_init_clks[] = { }; static struct ofw_compat_data compat_data[] = { #if defined(SOC_ALLWINNER_A10) { "allwinner,sun4i-a10-ccu", 1 }, #endif #if defined(SOC_ALLWINNER_A20) { "allwinner,sun7i-a20-ccu", 1 }, #endif { NULL, 0}, }; static int ccu_a10_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner A10/A20 Clock Control Unit NG"); return (BUS_PROBE_DEFAULT); } static int ccu_a10_attach(device_t dev) { struct aw_ccung_softc *sc; sc = device_get_softc(dev); sc->resets = a10_ccu_resets; sc->nresets = nitems(a10_ccu_resets); sc->gates = a10_ccu_gates; sc->ngates = nitems(a10_ccu_gates); sc->clks = a10_ccu_clks; sc->nclks = nitems(a10_ccu_clks); sc->clk_init = a10_init_clks; sc->n_clk_init = nitems(a10_init_clks); return (aw_ccung_attach(dev)); } static device_method_t ccu_a10ng_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ccu_a10_probe), DEVMETHOD(device_attach, ccu_a10_attach), DEVMETHOD_END }; DEFINE_CLASS_1(ccu_a10ng, ccu_a10ng_driver, ccu_a10ng_methods, sizeof(struct aw_ccung_softc), aw_ccung_driver); EARLY_DRIVER_MODULE(ccu_a10ng, simplebus, ccu_a10ng_driver, 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/dev/clk/allwinner/ccu_a13.c b/sys/dev/clk/allwinner/ccu_a13.c index 2bddcd382040..76b3b803aec1 100644 --- a/sys/dev/clk/allwinner/ccu_a13.c +++ b/sys/dev/clk/allwinner/ccu_a13.c @@ -1,563 +1,563 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2017,2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include -#include -#include +#include +#include +#include #include #include #include /* Non-exported clocks */ #define CLK_PLL_CORE 2 #define CLK_PLL_AUDIO_BASE 3 #define CLK_PLL_AUDIO 4 #define CLK_PLL_AUDIO_2X 5 #define CLK_PLL_AUDIO_4X 6 #define CLK_PLL_AUDIO_8X 7 #define CLK_PLL_VIDEO0 8 #define CLK_PLL_VE 10 #define CLK_PLL_DDR_BASE 11 #define CLK_PLL_DDR 12 #define CLK_PLL_DDR_OTHER 13 #define CLK_PLL_PERIPH 14 #define CLK_PLL_VIDEO1 15 #define CLK_AXI 18 #define CLK_AHB 19 #define CLK_APB0 20 #define CLK_APB1 21 #define CLK_DRAM_AXI 22 #define CLK_TCON_CH1_SCLK 91 #define CLK_MBUS 99 static struct aw_ccung_reset a13_ccu_resets[] = { CCU_RESET(RST_USB_PHY0, 0xcc, 0) CCU_RESET(RST_USB_PHY1, 0xcc, 1) CCU_RESET(RST_GPS, 0xd0, 30) CCU_RESET(RST_DE_BE, 0x104, 30) CCU_RESET(RST_DE_FE, 0x10c, 30) CCU_RESET(RST_TVE, 0x118, 29) CCU_RESET(RST_LCD, 0x118, 30) CCU_RESET(RST_CSI, 0x134, 30) CCU_RESET(RST_VE, 0x13c, 0) CCU_RESET(RST_GPU, 0x154, 30) CCU_RESET(RST_IEP, 0x160, 30) }; static struct aw_ccung_gate a13_ccu_gates[] = { CCU_GATE(CLK_HOSC, "hosc", "osc24M", 0x50, 0) CCU_GATE(CLK_DRAM_AXI, "axi-dram", "axi", 0x5c, 0) CCU_GATE(CLK_AHB_OTG, "ahb-otg", "ahb", 0x60, 0) CCU_GATE(CLK_AHB_EHCI, "ahb-ehci", "ahb", 0x60, 1) CCU_GATE(CLK_AHB_OHCI, "ahb-ohci", "ahb", 0x60, 2) CCU_GATE(CLK_AHB_SS, "ahb-ss", "ahb", 0x60, 5) CCU_GATE(CLK_AHB_DMA, "ahb-dma", "ahb", 0x60, 6) CCU_GATE(CLK_AHB_BIST, "ahb-bist", "ahb", 0x60, 7) CCU_GATE(CLK_AHB_MMC0, "ahb-mmc0", "ahb", 0x60, 8) CCU_GATE(CLK_AHB_MMC1, "ahb-mmc1", "ahb", 0x60, 9) CCU_GATE(CLK_AHB_MMC2, "ahb-mmc2", "ahb", 0x60, 10) CCU_GATE(CLK_AHB_NAND, "ahb-nand", "ahb", 0x60, 13) CCU_GATE(CLK_AHB_SDRAM, "ahb-sdram", "ahb", 0x60, 14) CCU_GATE(CLK_AHB_SPI0, "ahb-spi0", "ahb", 0x60, 20) CCU_GATE(CLK_AHB_SPI1, "ahb-spi1", "ahb", 0x60, 21) CCU_GATE(CLK_AHB_SPI2, "ahb-spi2", "ahb", 0x60, 22) CCU_GATE(CLK_AHB_GPS, "ahb-gps", "ahb", 0x60, 26) CCU_GATE(CLK_AHB_HSTIMER, "ahb-hstimer", "ahb", 0x60, 28) CCU_GATE(CLK_AHB_VE, "ahb-ve", "ahb", 0x64, 0) CCU_GATE(CLK_AHB_LCD, "ahb-lcd", "ahb", 0x64, 4) CCU_GATE(CLK_AHB_CSI, "ahb-csi", "ahb", 0x64, 8) CCU_GATE(CLK_AHB_DE_BE, "ahb-de-be", "ahb", 0x64, 12) CCU_GATE(CLK_AHB_DE_FE, "ahb-de-fe", "ahb", 0x64, 14) CCU_GATE(CLK_AHB_IEP, "ahb-iep", "ahb", 0x64, 19) CCU_GATE(CLK_AHB_GPU, "ahb-gpu", "ahb", 0x64, 20) CCU_GATE(CLK_APB0_CODEC, "apb0-codec", "apb0", 0x68, 0) CCU_GATE(CLK_APB0_PIO, "apb0-pio", "apb0", 0x68, 5) CCU_GATE(CLK_APB0_IR, "apb0-ir", "apb0", 0x68, 6) CCU_GATE(CLK_APB1_I2C0, "apb1-i2c0", "apb1", 0x6c, 0) CCU_GATE(CLK_APB1_I2C1, "apb1-i2c1", "apb1", 0x6c, 1) CCU_GATE(CLK_APB1_I2C2, "apb1-i2c2", "apb1", 0x6c, 2) CCU_GATE(CLK_APB1_UART1, "apb1-uart1", "apb1", 0x6c, 17) CCU_GATE(CLK_APB1_UART3, "apb1-uart3", "apb1", 0x6c, 19) CCU_GATE(CLK_DRAM_VE, "dram-ve", "pll-ddr", 0x100, 0) CCU_GATE(CLK_DRAM_CSI, "dram-csi", "pll-ddr", 0x100, 1) CCU_GATE(CLK_DRAM_DE_FE, "dram-de-fe", "pll-ddr", 0x100, 25) CCU_GATE(CLK_DRAM_DE_BE, "dram-de-be", "pll-ddr", 0x100, 26) CCU_GATE(CLK_DRAM_ACE, "dram-ace", "pll-ddr", 0x100, 29) CCU_GATE(CLK_DRAM_IEP, "dram-iep", "pll-ddr", 0x100, 31) CCU_GATE(CLK_CODEC, "codec", "pll-audio", 0x140, 31) CCU_GATE(CLK_AVS, "avs", "hosc", 0x144, 31) }; static const char *pll_parents[] = {"hosc"}; static struct aw_clk_nkmp_def pll_core = { .clkdef = { .id = CLK_PLL_CORE, .name = "pll-core", .parent_names = pll_parents, .parent_cnt = nitems(pll_parents), }, .offset = 0x00, .n = {.shift = 8, .width = 5}, .k = {.shift = 4, .width = 2}, .m = {.shift = 0, .width = 2}, .p = {.shift = 16, .width = 2}, .gate_shift = 31, .flags = AW_CLK_HAS_GATE, }; /* * We only implement pll-audio for now * For pll-audio-2/4/8 x we need a way to change the frequency * of the parent clocks */ static struct aw_clk_nkmp_def pll_audio = { .clkdef = { .id = CLK_PLL_AUDIO, .name = "pll-audio", .parent_names = pll_parents, .parent_cnt = nitems(pll_parents), }, .offset = 0x08, .n = {.shift = 8, .width = 7}, .k = {.value = 1, .flags = AW_CLK_FACTOR_FIXED}, .m = {.shift = 0, .width = 5}, .p = {.shift = 26, .width = 4}, .gate_shift = 31, .flags = AW_CLK_HAS_GATE, }; /* Missing PLL3-Video */ /* Missing PLL4-VE */ static struct aw_clk_nkmp_def pll_ddr_base = { .clkdef = { .id = CLK_PLL_DDR_BASE, .name = "pll-ddr-base", .parent_names = pll_parents, .parent_cnt = nitems(pll_parents), }, .offset = 0x20, .n = {.shift = 8, .width = 5}, .k = {.shift = 4, .width = 2}, .m = {.value = 1, .flags = AW_CLK_FACTOR_FIXED}, .p = {.value = 1, .flags = AW_CLK_FACTOR_FIXED}, .gate_shift = 31, .flags = AW_CLK_HAS_GATE, }; static const char *pll_ddr_parents[] = {"pll-ddr-base"}; static struct clk_div_def pll_ddr = { .clkdef = { .id = CLK_PLL_DDR, .name = "pll-ddr", .parent_names = pll_ddr_parents, .parent_cnt = nitems(pll_ddr_parents), }, .offset = 0x20, .i_shift = 0, .i_width = 2, }; static const char *pll_ddr_other_parents[] = {"pll-ddr-base"}; static struct clk_div_def pll_ddr_other = { .clkdef = { .id = CLK_PLL_DDR_OTHER, .name = "pll-ddr-other", .parent_names = pll_ddr_other_parents, .parent_cnt = nitems(pll_ddr_other_parents), }, .offset = 0x20, .i_shift = 16, .i_width = 2, }; static struct aw_clk_nkmp_def pll_periph = { .clkdef = { .id = CLK_PLL_PERIPH, .name = "pll-periph", .parent_names = pll_parents, .parent_cnt = nitems(pll_parents), }, .offset = 0x28, .n = {.shift = 8, .width = 5}, .k = {.shift = 4, .width = 2}, .m = {.shift = 0, .width = 2}, .p = {.value = 2, .flags = AW_CLK_FACTOR_FIXED}, .gate_shift = 31, .flags = AW_CLK_HAS_GATE, }; /* Missing PLL7-VIDEO1 */ static const char *cpu_parents[] = {"osc32k", "hosc", "pll-core", "pll-periph"}; static struct aw_clk_prediv_mux_def cpu_clk = { .clkdef = { .id = CLK_CPU, .name = "cpu", .parent_names = cpu_parents, .parent_cnt = nitems(cpu_parents), }, .offset = 0x54, .mux_shift = 16, .mux_width = 2, .prediv = { .value = 6, .flags = AW_CLK_FACTOR_FIXED, .cond_shift = 16, .cond_width = 2, .cond_value = 3, }, }; static const char *axi_parents[] = {"cpu"}; static struct clk_div_def axi_clk = { .clkdef = { .id = CLK_AXI, .name = "axi", .parent_names = axi_parents, .parent_cnt = nitems(axi_parents), }, .offset = 0x50, .i_shift = 0, .i_width = 2, }; static const char *ahb_parents[] = {"axi", "cpu", "pll-periph"}; static struct aw_clk_prediv_mux_def ahb_clk = { .clkdef = { .id = CLK_AHB, .name = "ahb", .parent_names = ahb_parents, .parent_cnt = nitems(ahb_parents), }, .offset = 0x54, .mux_shift = 6, .mux_width = 2, .div = { .shift = 4, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO }, .prediv = { .value = 2, .flags = AW_CLK_FACTOR_FIXED, .cond_shift = 6, .cond_width = 2, .cond_value = 2, }, }; static const char *apb0_parents[] = {"ahb"}; static struct clk_div_table apb0_div_table[] = { { .value = 0, .divider = 2, }, { .value = 1, .divider = 2, }, { .value = 2, .divider = 4, }, { .value = 3, .divider = 8, }, { }, }; static struct clk_div_def apb0_clk = { .clkdef = { .id = CLK_APB0, .name = "apb0", .parent_names = apb0_parents, .parent_cnt = nitems(apb0_parents), }, .offset = 0x54, .i_shift = 8, .i_width = 2, .div_flags = CLK_DIV_WITH_TABLE, .div_table = apb0_div_table, }; static const char *apb1_parents[] = {"hosc", "pll-periph", "osc32k"}; static struct aw_clk_nm_def apb1_clk = { .clkdef = { .id = CLK_APB1, .name = "apb1", .parent_names = apb1_parents, .parent_cnt = nitems(apb1_parents), }, .offset = 0x58, .n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, }, .m = {.shift = 0, .width = 5}, .mux_shift = 24, .mux_width = 2, .flags = AW_CLK_HAS_MUX, }; static const char *mod_parents[] = {"hosc", "pll-periph", "pll-ddr-other"}; static struct aw_clk_nm_def nand_clk = { .clkdef = { .id = CLK_NAND, .name = "nand", .parent_names = mod_parents, .parent_cnt = nitems(mod_parents), }, .offset = 0x80, .n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, }, .m = {.shift = 0, .width = 4}, .mux_shift = 24, .mux_width = 2, .gate_shift = 31, .flags = AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT }; static struct aw_clk_nm_def mmc0_clk = { .clkdef = { .id = CLK_MMC0, .name = "mmc0", .parent_names = mod_parents, .parent_cnt = nitems(mod_parents), }, .offset = 0x88, .n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, }, .m = {.shift = 0, .width = 4}, .mux_shift = 24, .mux_width = 2, .gate_shift = 31, .flags = AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT }; static struct aw_clk_nm_def mmc1_clk = { .clkdef = { .id = CLK_MMC1, .name = "mmc1", .parent_names = mod_parents, .parent_cnt = nitems(mod_parents), }, .offset = 0x8C, .n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, }, .m = {.shift = 0, .width = 4}, .mux_shift = 24, .mux_width = 2, .gate_shift = 31, .flags = AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT }; static struct aw_clk_nm_def mmc2_clk = { .clkdef = { .id = CLK_MMC2, .name = "mmc2", .parent_names = mod_parents, .parent_cnt = nitems(mod_parents), }, .offset = 0x90, .n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, }, .m = {.shift = 0, .width = 4}, .mux_shift = 24, .mux_width = 2, .gate_shift = 31, .flags = AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT }; static struct aw_clk_nm_def ss_clk = { .clkdef = { .id = CLK_SS, .name = "ss", .parent_names = mod_parents, .parent_cnt = nitems(mod_parents), }, .offset = 0x9C, .n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, }, .m = {.shift = 0, .width = 4}, .mux_shift = 24, .mux_width = 2, .gate_shift = 31, .flags = AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT }; static struct aw_clk_nm_def spi0_clk = { .clkdef = { .id = CLK_SPI0, .name = "spi0", .parent_names = mod_parents, .parent_cnt = nitems(mod_parents), }, .offset = 0xA0, .n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, }, .m = {.shift = 0, .width = 4}, .mux_shift = 24, .mux_width = 2, .gate_shift = 31, .flags = AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT }; static struct aw_clk_nm_def spi1_clk = { .clkdef = { .id = CLK_SPI1, .name = "spi1", .parent_names = mod_parents, .parent_cnt = nitems(mod_parents), }, .offset = 0xA4, .n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, }, .m = {.shift = 0, .width = 4}, .mux_shift = 24, .mux_width = 2, .gate_shift = 31, .flags = AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT }; static struct aw_clk_nm_def spi2_clk = { .clkdef = { .id = CLK_SPI2, .name = "spi2", .parent_names = mod_parents, .parent_cnt = nitems(mod_parents), }, .offset = 0xA8, .n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, }, .m = {.shift = 0, .width = 4}, .mux_shift = 24, .mux_width = 2, .gate_shift = 31, .flags = AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT }; static struct aw_clk_nm_def ir_clk = { .clkdef = { .id = CLK_IR, .name = "ir", .parent_names = mod_parents, .parent_cnt = nitems(mod_parents), }, .offset = 0xB0, .n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, }, .m = {.shift = 0, .width = 4}, .mux_shift = 24, .mux_width = 2, .gate_shift = 31, .flags = AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT }; /* Missing DE-BE clock */ /* Missing DE-FE clock */ /* Missing LCD CH1 clock */ /* Missing CSI clock */ /* Missing VE clock */ /* Clocks list */ static struct aw_ccung_clk a13_ccu_clks[] = { { .type = AW_CLK_NKMP, .clk.nkmp = &pll_core}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_audio}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_ddr_base}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_periph}, { .type = AW_CLK_NM, .clk.nm = &apb1_clk}, { .type = AW_CLK_NM, .clk.nm = &nand_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc0_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc1_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc2_clk}, { .type = AW_CLK_NM, .clk.nm = &ss_clk}, { .type = AW_CLK_NM, .clk.nm = &spi0_clk}, { .type = AW_CLK_NM, .clk.nm = &spi1_clk}, { .type = AW_CLK_NM, .clk.nm = &spi2_clk}, { .type = AW_CLK_NM, .clk.nm = &ir_clk}, { .type = AW_CLK_PREDIV_MUX, .clk.prediv_mux = &cpu_clk}, { .type = AW_CLK_PREDIV_MUX, .clk.prediv_mux = &ahb_clk}, { .type = AW_CLK_DIV, .clk.div = &pll_ddr}, { .type = AW_CLK_DIV, .clk.div = &pll_ddr_other}, { .type = AW_CLK_DIV, .clk.div = &axi_clk}, { .type = AW_CLK_DIV, .clk.div = &apb0_clk}, }; static int ccu_a13_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "allwinner,sun5i-a13-ccu")) return (ENXIO); device_set_desc(dev, "Allwinner A13 Clock Control Unit NG"); return (BUS_PROBE_DEFAULT); } static int ccu_a13_attach(device_t dev) { struct aw_ccung_softc *sc; sc = device_get_softc(dev); sc->resets = a13_ccu_resets; sc->nresets = nitems(a13_ccu_resets); sc->gates = a13_ccu_gates; sc->ngates = nitems(a13_ccu_gates); sc->clks = a13_ccu_clks; sc->nclks = nitems(a13_ccu_clks); return (aw_ccung_attach(dev)); } static device_method_t ccu_a13ng_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ccu_a13_probe), DEVMETHOD(device_attach, ccu_a13_attach), DEVMETHOD_END }; DEFINE_CLASS_1(ccu_a13ng, ccu_a13ng_driver, ccu_a13ng_methods, sizeof(struct aw_ccung_softc), aw_ccung_driver); EARLY_DRIVER_MODULE(ccu_a13ng, simplebus, ccu_a13ng_driver, 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/dev/clk/allwinner/ccu_a31.c b/sys/dev/clk/allwinner/ccu_a31.c index d6db0ab03fe7..f2cbd9196682 100644 --- a/sys/dev/clk/allwinner/ccu_a31.c +++ b/sys/dev/clk/allwinner/ccu_a31.c @@ -1,973 +1,973 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2017,2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include -#include -#include +#include +#include +#include #include #include #include /* Non-exported clocks */ #define CLK_PLL_CPU 0 #define CLK_PLL_AUDIO_BASE 1 #define CLK_PLL_AUDIO 2 #define CLK_PLL_AUDIO_2X 3 #define CLK_PLL_AUDIO_4X 4 #define CLK_PLL_AUDIO_8X 5 #define CLK_PLL_VIDEO0 6 #define CLK_PLL_VIDEO0_2X 7 #define CLK_PLL_VE 8 #define CLK_PLL_DDR 9 #define CLK_PLL_PERIPH_2X 11 #define CLK_PLL_VIDEO1 12 #define CLK_PLL_VIDEO1_2X 13 #define CLK_PLL_GPU 14 #define CLK_PLL_MIPI 15 #define CLK_PLL9 16 #define CLK_PLL10 17 #define CLK_AXI 19 #define CLK_AHB1 20 #define CLK_APB1 21 #define CLK_APB2 22 #define CLK_MDFS 107 #define CLK_SDRAM0 108 #define CLK_SDRAM1 109 #define CLK_MBUS0 141 #define CLK_MBUS1 142 static struct aw_ccung_reset a31_ccu_resets[] = { CCU_RESET(RST_USB_PHY0, 0xcc, 0) CCU_RESET(RST_USB_PHY1, 0xcc, 1) CCU_RESET(RST_USB_PHY2, 0xcc, 2) CCU_RESET(RST_AHB1_MIPI_DSI, 0x2c0, 1) CCU_RESET(RST_AHB1_SS, 0x2c0, 5) CCU_RESET(RST_AHB1_DMA, 0x2c0, 6) CCU_RESET(RST_AHB1_MMC0, 0x2c0, 8) CCU_RESET(RST_AHB1_MMC1, 0x2c0, 9) CCU_RESET(RST_AHB1_MMC2, 0x2c0, 10) CCU_RESET(RST_AHB1_MMC3, 0x2c0, 11) CCU_RESET(RST_AHB1_NAND1, 0x2c0, 12) CCU_RESET(RST_AHB1_NAND0, 0x2c0, 13) CCU_RESET(RST_AHB1_SDRAM, 0x2c0, 14) CCU_RESET(RST_AHB1_EMAC, 0x2c0, 17) CCU_RESET(RST_AHB1_TS, 0x2c0, 18) CCU_RESET(RST_AHB1_HSTIMER, 0x2c0, 19) CCU_RESET(RST_AHB1_SPI0, 0x2c0, 20) CCU_RESET(RST_AHB1_SPI1, 0x2c0, 21) CCU_RESET(RST_AHB1_SPI2, 0x2c0, 22) CCU_RESET(RST_AHB1_SPI3, 0x2c0, 23) CCU_RESET(RST_AHB1_OTG, 0x2c0, 24) CCU_RESET(RST_AHB1_EHCI0, 0x2c0, 26) CCU_RESET(RST_AHB1_EHCI1, 0x2c0, 27) CCU_RESET(RST_AHB1_OHCI0, 0x2c0, 29) CCU_RESET(RST_AHB1_OHCI1, 0x2c0, 30) CCU_RESET(RST_AHB1_OHCI2, 0x2c0, 31) CCU_RESET(RST_AHB1_VE, 0x2c4, 0) CCU_RESET(RST_AHB1_LCD0, 0x2c4, 4) CCU_RESET(RST_AHB1_LCD1, 0x2c4, 5) CCU_RESET(RST_AHB1_CSI, 0x2c4, 8) CCU_RESET(RST_AHB1_HDMI, 0x2c4, 11) CCU_RESET(RST_AHB1_BE0, 0x2c4, 12) CCU_RESET(RST_AHB1_BE1, 0x2c4, 13) CCU_RESET(RST_AHB1_FE0, 0x2c4, 14) CCU_RESET(RST_AHB1_FE1, 0x2c4, 15) CCU_RESET(RST_AHB1_MP, 0x2c4, 18) CCU_RESET(RST_AHB1_GPU, 0x2c4, 20) CCU_RESET(RST_AHB1_DEU0, 0x2c4, 23) CCU_RESET(RST_AHB1_DEU1, 0x2c4, 24) CCU_RESET(RST_AHB1_DRC0, 0x2c4, 25) CCU_RESET(RST_AHB1_DRC1, 0x2c4, 26) CCU_RESET(RST_AHB1_LVDS, 0x2c8, 0) CCU_RESET(RST_APB1_CODEC, 0x2d0, 0) CCU_RESET(RST_APB1_SPDIF, 0x2d0, 1) CCU_RESET(RST_APB1_DIGITAL_MIC, 0x2d0, 4) CCU_RESET(RST_APB1_DAUDIO0, 0x2d0, 12) CCU_RESET(RST_APB1_DAUDIO1, 0x2d0, 13) CCU_RESET(RST_APB2_I2C0, 0x2d8, 0) CCU_RESET(RST_APB2_I2C1, 0x2d8, 1) CCU_RESET(RST_APB2_I2C2, 0x2d8, 2) CCU_RESET(RST_APB2_I2C3, 0x2d8, 3) CCU_RESET(RST_APB2_UART0, 0x2d8, 16) CCU_RESET(RST_APB2_UART1, 0x2d8, 17) CCU_RESET(RST_APB2_UART2, 0x2d8, 18) CCU_RESET(RST_APB2_UART3, 0x2d8, 19) CCU_RESET(RST_APB2_UART4, 0x2d8, 20) CCU_RESET(RST_APB2_UART5, 0x2d8, 21) }; static struct aw_ccung_gate a31_ccu_gates[] = { CCU_GATE(CLK_AHB1_MIPIDSI, "ahb1-mipidsi", "ahb1", 0x60, 1) CCU_GATE(CLK_AHB1_SS, "ahb1-ss", "ahb1", 0x60, 5) CCU_GATE(CLK_AHB1_DMA, "ahb1-dma", "ahb1", 0x60, 6) CCU_GATE(CLK_AHB1_MMC0, "ahb1-mmc0", "ahb1", 0x60, 8) CCU_GATE(CLK_AHB1_MMC1, "ahb1-mmc1", "ahb1", 0x60, 9) CCU_GATE(CLK_AHB1_MMC2, "ahb1-mmc2", "ahb1", 0x60, 10) CCU_GATE(CLK_AHB1_MMC3, "ahb1-mmc3", "ahb1", 0x60, 11) CCU_GATE(CLK_AHB1_NAND1, "ahb1-nand1", "ahb1", 0x60, 12) CCU_GATE(CLK_AHB1_NAND0, "ahb1-nand0", "ahb1", 0x60, 13) CCU_GATE(CLK_AHB1_SDRAM, "ahb1-sdram", "ahb1", 0x60, 14) CCU_GATE(CLK_AHB1_EMAC, "ahb1-emac", "ahb1", 0x60, 17) CCU_GATE(CLK_AHB1_TS, "ahb1-ts", "ahb1", 0x60, 18) CCU_GATE(CLK_AHB1_HSTIMER, "ahb1-hstimer", "ahb1", 0x60, 19) CCU_GATE(CLK_AHB1_SPI0, "ahb1-spi0", "ahb1", 0x60, 20) CCU_GATE(CLK_AHB1_SPI1, "ahb1-spi1", "ahb1", 0x60, 21) CCU_GATE(CLK_AHB1_SPI2, "ahb1-spi2", "ahb1", 0x60, 22) CCU_GATE(CLK_AHB1_SPI3, "ahb1-spi3", "ahb1", 0x60, 23) CCU_GATE(CLK_AHB1_OTG, "ahb1-otg", "ahb1", 0x60, 24) CCU_GATE(CLK_AHB1_EHCI0, "ahb1-ehci0", "ahb1", 0x60, 26) CCU_GATE(CLK_AHB1_EHCI1, "ahb1-ehci1", "ahb1", 0x60, 27) CCU_GATE(CLK_AHB1_OHCI0, "ahb1-ohci0", "ahb1", 0x60, 29) CCU_GATE(CLK_AHB1_OHCI1, "ahb1-ohci1", "ahb1", 0x60, 30) CCU_GATE(CLK_AHB1_OHCI2, "ahb1-ohci2", "ahb1", 0x60, 31) CCU_GATE(CLK_AHB1_VE, "ahb1-ve", "ahb1", 0x64, 0) CCU_GATE(CLK_AHB1_LCD0, "ahb1-lcd0", "ahb1", 0x64, 4) CCU_GATE(CLK_AHB1_LCD1, "ahb1-lcd1", "ahb1", 0x64, 5) CCU_GATE(CLK_AHB1_CSI, "ahb1-csi", "ahb1", 0x64, 8) CCU_GATE(CLK_AHB1_HDMI, "ahb1-hdmi", "ahb1", 0x64, 11) CCU_GATE(CLK_AHB1_BE0, "ahb1-be0", "ahb1", 0x64, 12) CCU_GATE(CLK_AHB1_BE1, "ahb1-be1", "ahb1", 0x64, 13) CCU_GATE(CLK_AHB1_FE0, "ahb1-fe0", "ahb1", 0x64, 14) CCU_GATE(CLK_AHB1_FE1, "ahb1-fe1", "ahb1", 0x64, 15) CCU_GATE(CLK_AHB1_MP, "ahb1-mp", "ahb1", 0x64, 18) CCU_GATE(CLK_AHB1_GPU, "ahb1-gpu", "ahb1", 0x64, 20) CCU_GATE(CLK_AHB1_DEU0, "ahb1-deu0", "ahb1", 0x64, 23) CCU_GATE(CLK_AHB1_DEU1, "ahb1-deu1", "ahb1", 0x64, 24) CCU_GATE(CLK_AHB1_DRC0, "ahb1-drc0", "ahb1", 0x64, 25) CCU_GATE(CLK_AHB1_DRC1, "ahb1-drc1", "ahb1", 0x64, 26) CCU_GATE(CLK_APB1_CODEC, "apb1-codec", "apb1", 0x68, 0) CCU_GATE(CLK_APB1_SPDIF, "apb1-spdif", "apb1", 0x68, 1) CCU_GATE(CLK_APB1_DIGITAL_MIC, "apb1-digital-mic", "apb1", 0x68, 4) CCU_GATE(CLK_APB1_PIO, "apb1-pio", "apb1", 0x68, 5) CCU_GATE(CLK_APB1_DAUDIO0, "apb1-daudio0", "apb1", 0x68, 12) CCU_GATE(CLK_APB1_DAUDIO1, "apb1-daudio1", "apb1", 0x68, 13) CCU_GATE(CLK_APB2_I2C0, "apb2-i2c0", "apb2", 0x6c, 0) CCU_GATE(CLK_APB2_I2C1, "apb2-i2c1", "apb2", 0x6c, 1) CCU_GATE(CLK_APB2_I2C2, "apb2-i2c2", "apb2", 0x6c, 2) CCU_GATE(CLK_APB2_I2C3, "apb2-i2c3", "apb2", 0x6c, 3) CCU_GATE(CLK_APB2_UART0, "apb2-uart0", "apb2", 0x6c, 16) CCU_GATE(CLK_APB2_UART1, "apb2-uart1", "apb2", 0x6c, 17) CCU_GATE(CLK_APB2_UART2, "apb2-uart2", "apb2", 0x6c, 18) CCU_GATE(CLK_APB2_UART3, "apb2-uart3", "apb2", 0x6c, 19) CCU_GATE(CLK_APB2_UART4, "apb2-uart4", "apb2", 0x6c, 20) CCU_GATE(CLK_APB2_UART5, "apb2-uart5", "apb2", 0x6c, 21) CCU_GATE(CLK_DAUDIO0, "daudio0", "daudio0mux", 0xb0, 31) CCU_GATE(CLK_DAUDIO1, "daudio1", "daudio1mux", 0xb4, 31) CCU_GATE(CLK_USB_PHY0, "usb-phy0", "osc24M", 0xcc, 8) CCU_GATE(CLK_USB_PHY1, "usb-phy1", "osc24M", 0xcc, 9) CCU_GATE(CLK_USB_PHY2, "usb-phy2", "osc24M", 0xcc, 10) CCU_GATE(CLK_USB_OHCI0, "usb-ohci0", "osc24M", 0xcc, 16) CCU_GATE(CLK_USB_OHCI1, "usb-ohci1", "osc24M", 0xcc, 17) CCU_GATE(CLK_USB_OHCI2, "usb-ohci2", "osc24M", 0xcc, 18) CCU_GATE(CLK_DRAM_VE, "dram-ve", "mdfs", 0x100, 0) CCU_GATE(CLK_DRAM_CSI_ISP, "dram-csi_isp", "mdfs", 0x100, 1) CCU_GATE(CLK_DRAM_TS, "dram-ts", "mdfs", 0x100, 3) CCU_GATE(CLK_DRAM_DRC0, "dram-drc0", "mdfs", 0x100, 16) CCU_GATE(CLK_DRAM_DRC1, "dram-drc1", "mdfs", 0x100, 17) CCU_GATE(CLK_DRAM_DEU0, "dram-deu0", "mdfs", 0x100, 18) CCU_GATE(CLK_DRAM_DEU1, "dram-deu1", "mdfs", 0x100, 19) CCU_GATE(CLK_DRAM_FE0, "dram-fe0", "mdfs", 0x100, 24) CCU_GATE(CLK_DRAM_FE1, "dram-fe1", "mdfs", 0x100, 25) CCU_GATE(CLK_DRAM_BE0, "dram-be0", "mdfs", 0x100, 26) CCU_GATE(CLK_DRAM_BE1, "dram-be1", "mdfs", 0x100, 27) CCU_GATE(CLK_DRAM_MP, "dram-mp", "mdfs", 0x100, 28) CCU_GATE(CLK_CODEC, "codec", "pll_audio", 0x140, 31) CCU_GATE(CLK_AVS, "avs", "pll_audio", 0x144, 31) CCU_GATE(CLK_DIGITAL_MIC, "digital-mic", "pll_audio", 0x148, 31) CCU_GATE(CLK_HDMI_DDC, "hdmi-ddc", "osc24M", 0x150, 30) CCU_GATE(CLK_PS, "ps", "lcd1_ch1", 0x154, 31) }; static const char *pll_parents[] = {"osc24M"}; NKMP_CLK(pll_cpu_clk, CLK_PLL_CPU, /* id */ "pll_cpu", pll_parents, /* name, parents */ 0x00, /* offset */ 8, 5, 0, 0, /* n factor */ 4, 2, 0, 0, /* k factor */ 0, 2, 0, 0, /* m factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* p factor (fake) */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK | AW_CLK_SCALE_CHANGE); /* flags */ NKMP_CLK(pll_audio_clk, CLK_PLL_AUDIO, /* id */ "pll_audio", pll_parents, /* name, parents */ 0x08, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* k factor (fake) */ 0, 4, 1, 0, /* m factor */ 16, 3, 1, 0, /* p factor */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ static const char *pll_audio_mult_parents[] = {"pll_audio"}; FIXED_CLK(pll_audio_2x_clk, CLK_PLL_AUDIO_2X, /* id */ "pll_audio-2x", /* name */ pll_audio_mult_parents, /* parent */ 0, /* freq */ 2, /* mult */ 1, /* div */ 0); /* flags */ FIXED_CLK(pll_audio_4x_clk, CLK_PLL_AUDIO_4X, /* id */ "pll_audio-4x", /* name */ pll_audio_mult_parents, /* parent */ 0, /* freq */ 4, /* mult */ 1, /* div */ 0); /* flags */ FIXED_CLK(pll_audio_8x_clk, CLK_PLL_AUDIO_8X, /* id */ "pll_audio-8x", /* name */ pll_audio_mult_parents, /* parent */ 0, /* freq */ 8, /* mult */ 1, /* div */ 0); /* flags */ FRAC_CLK(pll_video0_clk, CLK_PLL_VIDEO0, /* id */ "pll_video0", pll_parents, /* name, parents */ 0x10, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 4, 0, 0, /* m factor */ 31, 28, 1000, /* gate, lock, lock retries */ AW_CLK_HAS_LOCK, /* flags */ 270000000, 297000000, /* freq0, freq1 */ 24, 25, /* mode sel, freq sel */ 30000000, 600000000); /* min freq, max freq */ static const char *pll_video0_2x_parents[] = {"pll_video0"}; FIXED_CLK(pll_video0_2x_clk, CLK_PLL_VIDEO0_2X, /* id */ "pll_video0-2x", /* name */ pll_video0_2x_parents, /* parent */ 0, /* freq */ 2, /* mult */ 1, /* div */ 0); /* flags */ FRAC_CLK(pll_ve_clk, CLK_PLL_VE, /* id */ "pll_ve", pll_parents, /* name, parents */ 0x18, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 4, 0, 0, /* m factor */ 31, 28, 1000, /* gate, lock, lock retries */ AW_CLK_HAS_LOCK, /* flags */ 270000000, 297000000, /* freq0, freq1 */ 24, 25, /* mode sel, freq sel */ 30000000, 600000000); /* min freq, max freq */ NKMP_CLK_WITH_UPDATE(pll_ddr_clk, CLK_PLL_DDR, /* id */ "pll_ddr", pll_parents, /* name, parents */ 0x20, /* offset */ 8, 5, 0, 0, /* n factor */ 4, 2, 0, 0, /* k factor */ 0, 2, 0, 0, /* m factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* p factor (fake) */ 31, /* gate */ 28, 1000, /* lock */ 20, /* update */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ NKMP_CLK(pll_periph_clk, CLK_PLL_PERIPH, /* id */ "pll_periph", pll_parents, /* name, parents */ 0x28, /* offset */ 8, 4, 0, 0, /* n factor */ 5, 2, 1, 0, /* k factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* m factor (fake) */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* p factor (fake) */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ static const char *pll_periph_2x_parents[] = {"pll_periph"}; FIXED_CLK(pll_periph_2x_clk, CLK_PLL_PERIPH_2X, /* id */ "pll_periph-2x", /* name */ pll_periph_2x_parents, /* parent */ 0, /* freq */ 2, /* mult */ 1, /* div */ 0); /* flags */ FRAC_CLK(pll_video1_clk, CLK_PLL_VIDEO1, /* id */ "pll_video1", pll_parents, /* name, parents */ 0x30, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 4, 0, 0, /* m factor */ 31, 28, 1000, /* gate, lock, lock retries */ AW_CLK_HAS_LOCK, /* flags */ 270000000, 297000000, /* freq0, freq1 */ 24, 25, /* mode sel, freq sel */ 30000000, 600000000); /* min freq, max freq */ static const char *pll_video1_2x_parents[] = {"pll_video1"}; FIXED_CLK(pll_video1_2x_clk, CLK_PLL_VIDEO1_2X, /* id */ "pll_video1-2x", /* name */ pll_video1_2x_parents, /* parent */ 0, /* freq */ 2, /* mult */ 1, /* div */ 0); /* flags */ FRAC_CLK(pll_gpu_clk, CLK_PLL_GPU, /* id */ "pll_gpu", pll_parents, /* name, parents */ 0x38, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 4, 0, 0, /* m factor */ 31, 28, 1000, /* gate, lock, lock retries */ AW_CLK_HAS_LOCK, /* flags */ 270000000, 297000000, /* freq0, freq1 */ 24, 25, /* mode sel, freq sel */ 30000000, 600000000); /* min freq, max freq */ static const char *pll_mipi_parents[] = {"pll_video0", "pll_video1"}; NKMP_CLK(pll_mipi_clk, CLK_PLL_MIPI, /* id */ "pll_mipi", pll_mipi_parents, /* name, parents */ 0x40, /* offset */ 8, 4, 0, 0, /* n factor */ 4, 2, 1, 0, /* k factor */ 0, 2, 0, 0, /* m factor (fake) */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* p factor (fake) */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ FRAC_CLK(pll9_clk, CLK_PLL9, /* id */ "pll9", pll_parents, /* name, parents */ 0x44, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 4, 0, 0, /* m factor */ 31, 28, 1000, /* gate, lock, lock retries */ AW_CLK_HAS_LOCK, /* flags */ 270000000, 297000000, /* freq0, freq1 */ 24, 25, /* mode sel, freq sel */ 30000000, 600000000); /* min freq, max freq */ FRAC_CLK(pll10_clk, CLK_PLL10, /* id */ "pll10", pll_parents, /* name, parents */ 0x48, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 4, 0, 0, /* m factor */ 31, 28, 1000, /* gate, lock, lock retries */ AW_CLK_HAS_LOCK, /* flags */ 270000000, 297000000, /* freq0, freq1 */ 24, 25, /* mode sel, freq sel */ 30000000, 600000000); /* min freq, max freq */ static struct clk_div_table axi_div_table[] = { { .value = 0, .divider = 1, }, { .value = 1, .divider = 2, }, { .value = 2, .divider = 3, }, { .value = 3, .divider = 4, }, { .value = 4, .divider = 4, }, { .value = 5, .divider = 4, }, { .value = 6, .divider = 4, }, { .value = 7, .divider = 4, }, { }, }; static const char *axi_parents[] = {"cpu"}; DIV_CLK(axi_clk, CLK_AXI, /* id */ "axi", axi_parents, /* name, parents */ 0x50, /* offset */ 0, 2, /* shift, mask */ 0, axi_div_table); /* flags, div table */ static const char *cpu_parents[] = {"osc32k", "osc24M", "pll_cpu", "pll_cpu"}; MUX_CLK(cpu_clk, CLK_CPU, /* id */ "cpu", cpu_parents, /* name, parents */ 0x50, 16, 2); /* offset, shift, width */ static const char *ahb1_parents[] = {"osc32k", "osc24M", "axi", "pll_periph"}; PREDIV_CLK(ahb1_clk, CLK_AHB1, /* id */ "ahb1", ahb1_parents, /* name, parents */ 0x54, /* offset */ 12, 2, /* mux */ 4, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* div */ 6, 2, 0, AW_CLK_FACTOR_HAS_COND, /* prediv */ 12, 2, 3); /* prediv condition */ static const char *apb1_parents[] = {"ahb1"}; static struct clk_div_table apb1_div_table[] = { { .value = 0, .divider = 2, }, { .value = 1, .divider = 2, }, { .value = 2, .divider = 4, }, { .value = 3, .divider = 8, }, { }, }; DIV_CLK(apb1_clk, CLK_APB1, /* id */ "apb1", apb1_parents, /* name, parents */ 0x54, /* offset */ 8, 2, /* shift, mask */ CLK_DIV_WITH_TABLE, /* flags */ apb1_div_table); /* div table */ static const char *apb2_parents[] = {"osc32k", "osc24M", "pll_periph", "pll_periph"}; NM_CLK(apb2_clk, CLK_APB2, /* id */ "apb2", apb2_parents, /* name, parents */ 0x58, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 5, 0, 0, /* m factor */ 24, 2, /* mux */ 0, /* gate */ AW_CLK_HAS_MUX); static const char *mod_parents[] = {"osc24M", "pll_periph"}; NM_CLK(nand0_clk, CLK_NAND0, "nand0", mod_parents, /* id, name, parents */ 0x80, /* offset */ 16, 3, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); /* flags */ NM_CLK(nand1_clk, CLK_NAND1, "nand1", mod_parents, /* id, name, parents */ 0x80, /* offset */ 16, 3, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); /* flags */ NM_CLK(mmc0_clk, CLK_MMC0, "mmc0", mod_parents, /* id, name, parents */ 0x88, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ NM_CLK(mmc1_clk, CLK_MMC1, "mmc1", mod_parents, /* id, name, parents */ 0x8c, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ NM_CLK(mmc2_clk, CLK_MMC2, "mmc2", mod_parents, /* id, name, parents */ 0x90, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ NM_CLK(mmc3_clk, CLK_MMC2, "mmc3", mod_parents, /* id, name, parents */ 0x94, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ static const char *ts_parents[] = {"osc24M", "pll_periph"}; NM_CLK(ts_clk, CLK_TS, "ts", ts_parents, /* id, name, parents */ 0x98, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 4, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); /* flags */ NM_CLK(ss_clk, CLK_SS, "ss", mod_parents, /* id, name, parents */ 0x9C, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 4, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); /* flags */ NM_CLK(spi0_clk, CLK_SPI0, "spi0", mod_parents, /* id, name, parents */ 0xA0, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 4, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); /* flags */ NM_CLK(spi1_clk, CLK_SPI1, "spi1", mod_parents, /* id, name, parents */ 0xA4, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 4, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); /* flags */ NM_CLK(spi2_clk, CLK_SPI2, "spi2", mod_parents, /* id, name, parents */ 0xA8, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 4, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); /* flags */ NM_CLK(spi3_clk, CLK_SPI3, "spi3", mod_parents, /* id, name, parents */ 0xAC, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 4, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); /* flags */ static const char *daudio_parents[] = {"pll_audio-8x", "pll_audio-4x", "pll_audio-2x", "pll_audio"}; MUX_CLK(daudio0mux_clk, 0, "daudio0mux", daudio_parents, 0xb0, 16, 2); MUX_CLK(daudio1mux_clk, 0, "daudio1mux", daudio_parents, 0xb4, 16, 2); static const char *mdfs_parents[] = {"pll_ddr", "pll_periph"}; NM_CLK(mdfs_clk, CLK_MDFS, "mdfs", mdfs_parents, /* id, name, parents */ 0xF0, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 4, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); /* flags */ static const char *dram_parents[] = {"pll_ddr", "pll_periph"}; NM_CLK(sdram0_clk, CLK_SDRAM0, "sdram0", dram_parents, /* id, name, parents */ 0xF4, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 4, 1, /* mux */ 0, /* gate */ AW_CLK_HAS_MUX); /* flags */ NM_CLK(sdram1_clk, CLK_SDRAM1, "sdram1", dram_parents, /* id, name, parents */ 0xF4, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 8, 4, 0, 0, /* m factor */ 12, 1, /* mux */ 0, /* gate */ AW_CLK_HAS_MUX); /* flags */ static const char *befe_parents[] = {"pll_video0", "pll_video1", "pll_periph-2x", "pll_gpu", "pll9", "pll10"}; NM_CLK(be0_clk, CLK_BE0, "be0", befe_parents, /* id, name, parents */ 0x104, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 3, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(be1_clk, CLK_BE1, "be1", befe_parents, /* id, name, parents */ 0x108, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 3, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(fe0_clk, CLK_FE0, "fe0", befe_parents, /* id, name, parents */ 0x104, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 3, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(fe1_clk, CLK_FE1, "fe1", befe_parents, /* id, name, parents */ 0x108, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 3, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *mp_parents[] = {"pll_video0", "pll_video1", "pll9", "pll10"}; NM_CLK(mp_clk, CLK_MP, "mp", mp_parents, /* id, name, parents */ 0x108, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 3, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *lcd_ch0_parents[] = {"pll_video0", "pll_video1", "pll_video0-2x", "pll_video1-2x", "pll_mipi"}; NM_CLK(lcd0_ch0_clk, CLK_LCD0_CH0, "lcd0_ch0", lcd_ch0_parents, /* id, name, parents */ 0x118, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* m factor (fake )*/ 24, 3, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(lcd1_ch0_clk, CLK_LCD1_CH0, "lcd1_ch0", lcd_ch0_parents, /* id, name, parents */ 0x11C, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* m factor (fake )*/ 24, 3, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *lcd_ch1_parents[] = {"pll_video0", "pll_video1", "pll_video0-2x", "pll_video1-2x"}; NM_CLK(lcd0_ch1_clk, CLK_LCD0_CH1, "lcd0_ch1", lcd_ch1_parents, /* id, name, parents */ 0x12C, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 3, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(lcd1_ch1_clk, CLK_LCD1_CH1, "lcd1_ch1", lcd_ch1_parents, /* id, name, parents */ 0x130, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 3, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ /* CSI0 0x134 Need Mux table */ /* CSI1 0x138 Need Mux table */ static const char *ve_parents[] = {"pll_ve"}; NM_CLK(ve_clk, CLK_VE, "ve", ve_parents, /* id, name, parents */ 0x13C, /* offset */ 16, 3, 0, 0, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* m factor (fake) */ 0, 0, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE); /* flags */ NM_CLK(hdmi_clk, CLK_HDMI, "hdmi", lcd_ch1_parents, /* id, name, parents */ 0x150, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 0, 0, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE); /* flags */ static const char *mbus_parents[] = {"osc24M", "pll_periph", "pll_ddr"}; NM_CLK(mbus0_clk, CLK_MBUS0, "mbus0", mbus_parents, /* id, name, parents */ 0x15C, /* offset */ 16, 2, 0, 0, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(mbus1_clk, CLK_MBUS1, "mbus1", mbus_parents, /* id, name, parents */ 0x160, /* offset */ 16, 2, 0, 0, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *mipi_parents[] = {"pll_video0", "pll_video1", "pll_video0-2x", "pll_video1-2x"}; NM_CLK(mipi_dsi_clk, CLK_MIPI_DSI, "mipi_dsi", mipi_parents, /* id, name, parents */ 0x168, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 16, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(mipi_dsi_dphy_clk, CLK_MIPI_DSI_DPHY, "mipi_dsi_dphy", mipi_parents, /* id, name, parents */ 0x168, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 8, 2, /* mux */ 15, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(mipi_csi_dphy_clk, CLK_MIPI_CSI_DPHY, "mipi_csi_dphy", mipi_parents, /* id, name, parents */ 0x16C, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 8, 2, /* mux */ 15, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *iep_parents[] = {"pll_video0", "pll_video1", "pll_periph-2x", "pll_gpu", "pll9", "pll10"}; NM_CLK(iep_drc0_clk, CLK_IEP_DRC0, "iep_drc0", iep_parents, /* id, name, parents */ 0x180, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(iep_drc1_clk, CLK_IEP_DRC1, "iep_drc1", iep_parents, /* id, name, parents */ 0x184, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(iep_deu0_clk, CLK_IEP_DEU0, "iep_deu0", iep_parents, /* id, name, parents */ 0x188, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ NM_CLK(iep_deu1_clk, CLK_IEP_DEU1, "iep_deu1", iep_parents, /* id, name, parents */ 0x18C, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *gpu_parents[] = {"pll_gpu", "pll_periph-2x", "pll_video0", "pll_video1", "pll9", "pll10"}; PREDIV_CLK(gpu_core_clk, CLK_GPU_CORE, /* id */ "gpu_core", gpu_parents, /* name, parents */ 0x1A0, /* offset */ 24, 3, /* mux */ 0, 3, 0, 0, /* div */ 0, 0, 3, AW_CLK_FACTOR_HAS_COND | AW_CLK_FACTOR_FIXED, /* prediv */ 24, 2, 1); /* prediv condition */ PREDIV_CLK(gpu_memory_clk, CLK_GPU_MEMORY, /* id */ "gpu_memory", gpu_parents, /* name, parents */ 0x1A4, /* offset */ 24, 3, /* mux */ 0, 3, 0, 0, /* div */ 0, 0, 3, AW_CLK_FACTOR_HAS_COND | AW_CLK_FACTOR_FIXED, /* prediv */ 24, 2, 1); /* prediv condition */ PREDIV_CLK(gpu_hyd_clk, CLK_GPU_HYD, /* id */ "gpu_hyd", gpu_parents, /* name, parents */ 0x1A8, /* offset */ 24, 3, /* mux */ 0, 3, 0, 0, /* div */ 0, 0, 3, AW_CLK_FACTOR_HAS_COND | AW_CLK_FACTOR_FIXED, /* prediv */ 24, 2, 1); /* prediv condition */ /* ATS 0x1B0 */ /* Trace 0x1B4 */ static struct aw_ccung_clk a31_ccu_clks[] = { { .type = AW_CLK_NKMP, .clk.nkmp = &pll_cpu_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_audio_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_periph_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_ddr_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_mipi_clk}, { .type = AW_CLK_FRAC, .clk.frac = &pll_video0_clk}, { .type = AW_CLK_FRAC, .clk.frac = &pll_ve_clk}, { .type = AW_CLK_FRAC, .clk.frac = &pll_video1_clk}, { .type = AW_CLK_FRAC, .clk.frac = &pll_gpu_clk}, { .type = AW_CLK_FRAC, .clk.frac = &pll9_clk}, { .type = AW_CLK_FRAC, .clk.frac = &pll10_clk}, { .type = AW_CLK_NM, .clk.nm = &apb2_clk}, { .type = AW_CLK_NM, .clk.nm = &nand0_clk}, { .type = AW_CLK_NM, .clk.nm = &nand1_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc0_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc1_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc2_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc3_clk}, { .type = AW_CLK_NM, .clk.nm = &ts_clk}, { .type = AW_CLK_NM, .clk.nm = &ss_clk}, { .type = AW_CLK_NM, .clk.nm = &spi0_clk}, { .type = AW_CLK_NM, .clk.nm = &spi1_clk}, { .type = AW_CLK_NM, .clk.nm = &spi2_clk}, { .type = AW_CLK_NM, .clk.nm = &spi3_clk}, { .type = AW_CLK_NM, .clk.nm = &mdfs_clk}, { .type = AW_CLK_NM, .clk.nm = &sdram0_clk}, { .type = AW_CLK_NM, .clk.nm = &sdram1_clk}, { .type = AW_CLK_NM, .clk.nm = &be0_clk}, { .type = AW_CLK_NM, .clk.nm = &be1_clk}, { .type = AW_CLK_NM, .clk.nm = &fe0_clk}, { .type = AW_CLK_NM, .clk.nm = &fe1_clk}, { .type = AW_CLK_NM, .clk.nm = &mp_clk}, { .type = AW_CLK_NM, .clk.nm = &lcd0_ch0_clk}, { .type = AW_CLK_NM, .clk.nm = &lcd1_ch0_clk}, { .type = AW_CLK_NM, .clk.nm = &lcd0_ch1_clk}, { .type = AW_CLK_NM, .clk.nm = &lcd1_ch1_clk}, { .type = AW_CLK_NM, .clk.nm = &ve_clk}, { .type = AW_CLK_NM, .clk.nm = &hdmi_clk}, { .type = AW_CLK_NM, .clk.nm = &mbus0_clk}, { .type = AW_CLK_NM, .clk.nm = &mbus1_clk}, { .type = AW_CLK_NM, .clk.nm = &mipi_dsi_clk}, { .type = AW_CLK_NM, .clk.nm = &mipi_dsi_dphy_clk}, { .type = AW_CLK_NM, .clk.nm = &mipi_csi_dphy_clk}, { .type = AW_CLK_NM, .clk.nm = &iep_drc0_clk}, { .type = AW_CLK_NM, .clk.nm = &iep_drc1_clk}, { .type = AW_CLK_NM, .clk.nm = &iep_deu0_clk}, { .type = AW_CLK_NM, .clk.nm = &iep_deu1_clk}, { .type = AW_CLK_PREDIV_MUX, .clk.prediv_mux = &ahb1_clk}, { .type = AW_CLK_PREDIV_MUX, .clk.prediv_mux = &gpu_core_clk}, { .type = AW_CLK_PREDIV_MUX, .clk.prediv_mux = &gpu_memory_clk}, { .type = AW_CLK_PREDIV_MUX, .clk.prediv_mux = &gpu_hyd_clk}, { .type = AW_CLK_DIV, .clk.div = &axi_clk}, { .type = AW_CLK_DIV, .clk.div = &apb1_clk}, { .type = AW_CLK_MUX, .clk.mux = &cpu_clk}, { .type = AW_CLK_MUX, .clk.mux = &daudio0mux_clk}, { .type = AW_CLK_MUX, .clk.mux = &daudio1mux_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_audio_2x_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_audio_4x_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_audio_8x_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_video0_2x_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_periph_2x_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_video1_2x_clk}, }; static int ccu_a31_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "allwinner,sun6i-a31-ccu")) return (ENXIO); device_set_desc(dev, "Allwinner A31 Clock Control Unit NG"); return (BUS_PROBE_DEFAULT); } static int ccu_a31_attach(device_t dev) { struct aw_ccung_softc *sc; sc = device_get_softc(dev); sc->resets = a31_ccu_resets; sc->nresets = nitems(a31_ccu_resets); sc->gates = a31_ccu_gates; sc->ngates = nitems(a31_ccu_gates); sc->clks = a31_ccu_clks; sc->nclks = nitems(a31_ccu_clks); return (aw_ccung_attach(dev)); } static device_method_t ccu_a31ng_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ccu_a31_probe), DEVMETHOD(device_attach, ccu_a31_attach), DEVMETHOD_END }; DEFINE_CLASS_1(ccu_a31ng, ccu_a31ng_driver, ccu_a31ng_methods, sizeof(struct aw_ccung_softc), aw_ccung_driver); EARLY_DRIVER_MODULE(ccu_a31ng, simplebus, ccu_a31ng_driver, 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/dev/clk/allwinner/ccu_a64.c b/sys/dev/clk/allwinner/ccu_a64.c index 73cbe147c398..779b20a19d3e 100644 --- a/sys/dev/clk/allwinner/ccu_a64.c +++ b/sys/dev/clk/allwinner/ccu_a64.c @@ -1,836 +1,836 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2017,2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include -#include -#include +#include +#include +#include #include #include #include /* Non-exported clocks */ #define CLK_OSC_12M 0 #define CLK_PLL_CPUX 1 #define CLK_PLL_AUDIO_BASE 2 #define CLK_PLL_AUDIO 3 #define CLK_PLL_AUDIO_2X 4 #define CLK_PLL_AUDIO_4X 5 #define CLK_PLL_AUDIO_8X 6 #define CLK_PLL_VIDEO0 7 #define CLK_PLL_VIDEO0_2X 8 #define CLK_PLL_VE 9 #define CLK_PLL_DDR0 10 #define CLK_PLL_PERIPH0_2X 12 #define CLK_PLL_PERIPH1 13 #define CLK_PLL_PERIPH1_2X 14 #define CLK_PLL_VIDEO1 15 #define CLK_PLL_GPU 16 #define CLK_PLL_MIPI 17 #define CLK_PLL_HSIC 18 #define CLK_PLL_DE 19 #define CLK_PLL_DDR1 20 #define CLK_CPUX 21 #define CLK_AXI 22 #define CLK_APB 23 #define CLK_AHB1 24 #define CLK_APB1 25 #define CLK_APB2 26 #define CLK_AHB2 27 #define CLK_DRAM 94 #define CLK_MBUS 112 static struct aw_ccung_reset a64_ccu_resets[] = { CCU_RESET(RST_USB_PHY0, 0x0cc, 0) CCU_RESET(RST_USB_PHY1, 0x0cc, 1) CCU_RESET(RST_USB_HSIC, 0x0cc, 2) CCU_RESET(RST_BUS_MIPI_DSI, 0x2c0, 1) CCU_RESET(RST_BUS_CE, 0x2c0, 5) CCU_RESET(RST_BUS_DMA, 0x2c0, 6) CCU_RESET(RST_BUS_MMC0, 0x2c0, 8) CCU_RESET(RST_BUS_MMC1, 0x2c0, 9) CCU_RESET(RST_BUS_MMC2, 0x2c0, 10) CCU_RESET(RST_BUS_NAND, 0x2c0, 13) CCU_RESET(RST_BUS_DRAM, 0x2c0, 14) CCU_RESET(RST_BUS_EMAC, 0x2c0, 17) CCU_RESET(RST_BUS_TS, 0x2c0, 18) CCU_RESET(RST_BUS_HSTIMER, 0x2c0, 19) CCU_RESET(RST_BUS_SPI0, 0x2c0, 20) CCU_RESET(RST_BUS_SPI1, 0x2c0, 21) CCU_RESET(RST_BUS_OTG, 0x2c0, 23) CCU_RESET(RST_BUS_EHCI0, 0x2c0, 24) CCU_RESET(RST_BUS_EHCI1, 0x2c0, 25) CCU_RESET(RST_BUS_OHCI0, 0x2c0, 28) CCU_RESET(RST_BUS_OHCI1, 0x2c0, 29) CCU_RESET(RST_BUS_VE, 0x2c4, 0) CCU_RESET(RST_BUS_TCON0, 0x2c4, 3) CCU_RESET(RST_BUS_TCON1, 0x2c4, 4) CCU_RESET(RST_BUS_DEINTERLACE, 0x2c4, 5) CCU_RESET(RST_BUS_CSI, 0x2c4, 8) CCU_RESET(RST_BUS_HDMI0, 0x2c4, 10) CCU_RESET(RST_BUS_HDMI1, 0x2c4, 11) CCU_RESET(RST_BUS_DE, 0x2c4, 12) CCU_RESET(RST_BUS_GPU, 0x2c4, 20) CCU_RESET(RST_BUS_MSGBOX, 0x2c4, 21) CCU_RESET(RST_BUS_SPINLOCK, 0x2c4, 22) CCU_RESET(RST_BUS_DBG, 0x2c4, 31) CCU_RESET(RST_BUS_LVDS, 0x2C8, 31) CCU_RESET(RST_BUS_CODEC, 0x2D0, 0) CCU_RESET(RST_BUS_SPDIF, 0x2D0, 1) CCU_RESET(RST_BUS_THS, 0x2D0, 8) CCU_RESET(RST_BUS_I2S0, 0x2D0, 12) CCU_RESET(RST_BUS_I2S1, 0x2D0, 13) CCU_RESET(RST_BUS_I2S2, 0x2D0, 14) CCU_RESET(RST_BUS_I2C0, 0x2D8, 0) CCU_RESET(RST_BUS_I2C1, 0x2D8, 1) CCU_RESET(RST_BUS_I2C2, 0x2D8, 2) CCU_RESET(RST_BUS_SCR, 0x2D8, 5) CCU_RESET(RST_BUS_UART0, 0x2D8, 16) CCU_RESET(RST_BUS_UART1, 0x2D8, 17) CCU_RESET(RST_BUS_UART2, 0x2D8, 18) CCU_RESET(RST_BUS_UART3, 0x2D8, 19) CCU_RESET(RST_BUS_UART4, 0x2D8, 20) }; static struct aw_ccung_gate a64_ccu_gates[] = { CCU_GATE(CLK_BUS_MIPI_DSI, "bus-mipi-dsi", "ahb1", 0x60, 1) CCU_GATE(CLK_BUS_CE, "bus-ce", "ahb1", 0x60, 5) CCU_GATE(CLK_BUS_DMA, "bus-dma", "ahb1", 0x60, 6) CCU_GATE(CLK_BUS_MMC0, "bus-mmc0", "ahb1", 0x60, 8) CCU_GATE(CLK_BUS_MMC1, "bus-mmc1", "ahb1", 0x60, 9) CCU_GATE(CLK_BUS_MMC2, "bus-mmc2", "ahb1", 0x60, 10) CCU_GATE(CLK_BUS_NAND, "bus-nand", "ahb1", 0x60, 13) CCU_GATE(CLK_BUS_DRAM, "bus-dram", "ahb1", 0x60, 14) CCU_GATE(CLK_BUS_EMAC, "bus-emac", "ahb2", 0x60, 16) CCU_GATE(CLK_BUS_TS, "bus-ts", "ahb1", 0x60, 18) CCU_GATE(CLK_BUS_HSTIMER, "bus-hstimer", "ahb1", 0x60, 19) CCU_GATE(CLK_BUS_SPI0, "bus-spi0", "ahb1", 0x60, 20) CCU_GATE(CLK_BUS_SPI1, "bus-spi1", "ahb1", 0x60, 21) CCU_GATE(CLK_BUS_OTG, "bus-otg", "ahb1", 0x60, 23) CCU_GATE(CLK_BUS_EHCI0, "bus-ehci0", "ahb1", 0x60, 24) CCU_GATE(CLK_BUS_EHCI1, "bus-ehci1", "ahb2", 0x60, 25) CCU_GATE(CLK_BUS_OHCI0, "bus-ohci0", "ahb1", 0x60, 28) CCU_GATE(CLK_BUS_OHCI1, "bus-ohci1", "ahb2", 0x60, 29) CCU_GATE(CLK_BUS_VE, "bus-ve", "ahb1", 0x64, 0) CCU_GATE(CLK_BUS_TCON0, "bus-tcon0", "ahb1", 0x64, 3) CCU_GATE(CLK_BUS_TCON1, "bus-tcon1", "ahb1", 0x64, 4) CCU_GATE(CLK_BUS_DEINTERLACE, "bus-deinterlace", "ahb1", 0x64, 5) CCU_GATE(CLK_BUS_CSI, "bus-csi", "ahb1", 0x64, 8) CCU_GATE(CLK_BUS_HDMI, "bus-hdmi", "ahb1", 0x64, 11) CCU_GATE(CLK_BUS_DE, "bus-de", "ahb1", 0x64, 12) CCU_GATE(CLK_BUS_GPU, "bus-gpu", "ahb1", 0x64, 20) CCU_GATE(CLK_BUS_MSGBOX, "bus-msgbox", "ahb1", 0x64, 21) CCU_GATE(CLK_BUS_SPINLOCK, "bus-spinlock", "ahb1", 0x64, 22) CCU_GATE(CLK_BUS_CODEC, "bus-codec", "apb1", 0x68, 0) CCU_GATE(CLK_BUS_SPDIF, "bus-spdif", "apb1", 0x68, 1) CCU_GATE(CLK_BUS_PIO, "bus-pio", "apb1", 0x68, 5) CCU_GATE(CLK_BUS_THS, "bus-ths", "apb1", 0x68, 8) CCU_GATE(CLK_BUS_I2S0, "bus-i2s0", "apb1", 0x68, 12) CCU_GATE(CLK_BUS_I2S1, "bus-i2s1", "apb1", 0x68, 13) CCU_GATE(CLK_BUS_I2S2, "bus-i2s2", "apb1", 0x68, 14) CCU_GATE(CLK_BUS_I2C0, "bus-i2c0", "apb2", 0x6C, 0) CCU_GATE(CLK_BUS_I2C1, "bus-i2c1", "apb2", 0x6C, 1) CCU_GATE(CLK_BUS_I2C2, "bus-i2c2", "apb2", 0x6C, 2) CCU_GATE(CLK_BUS_SCR, "bus-src", "apb2", 0x6C, 5) CCU_GATE(CLK_BUS_UART0, "bus-uart0", "apb2", 0x6C, 16) CCU_GATE(CLK_BUS_UART1, "bus-uart1", "apb2", 0x6C, 17) CCU_GATE(CLK_BUS_UART2, "bus-uart2", "apb2", 0x6C, 18) CCU_GATE(CLK_BUS_UART3, "bus-uart3", "apb2", 0x6C, 19) CCU_GATE(CLK_BUS_UART4, "bus-uart4", "apb2", 0x6C, 20) CCU_GATE(CLK_BUS_DBG, "bus-dbg", "ahb1", 0x70, 7) CCU_GATE(CLK_THS, "ths", "thsdiv", 0x74, 31) CCU_GATE(CLK_USB_PHY0, "usb-phy0", "osc24M", 0xcc, 8) CCU_GATE(CLK_USB_PHY1, "usb-phy1", "osc24M", 0xcc, 9) CCU_GATE(CLK_USB_HSIC, "usb-hsic", "pll_hsic", 0xcc, 10) CCU_GATE(CLK_USB_HSIC_12M, "usb-hsic-12M", "osc12M", 0xcc, 11) CCU_GATE(CLK_USB_OHCI0, "usb-ohci0", "osc12M", 0xcc, 16) CCU_GATE(CLK_USB_OHCI1, "usb-ohci1", "usb-ohci0", 0xcc, 17) CCU_GATE(CLK_DRAM_VE, "dram-ve", "dram", 0x100, 0) CCU_GATE(CLK_DRAM_CSI, "dram-csi", "dram", 0x100, 1) CCU_GATE(CLK_DRAM_DEINTERLACE, "dram-deinterlace", "dram", 0x100, 2) CCU_GATE(CLK_DRAM_TS, "dram-ts", "dram", 0x100, 3) CCU_GATE(CLK_CSI_MISC, "csi-misc", "osc24M", 0x130, 31) CCU_GATE(CLK_AC_DIG_4X, "ac-dig-4x", "pll_audio-4x", 0x140, 30) CCU_GATE(CLK_AC_DIG, "ac-dig", "pll_audio", 0x140, 31) CCU_GATE(CLK_AVS, "avs", "osc24M", 0x144, 31) CCU_GATE(CLK_HDMI_DDC, "hdmi-ddc", "osc24M", 0x154, 31) }; static const char *osc12m_parents[] = {"osc24M"}; FIXED_CLK(osc12m_clk, CLK_OSC_12M, /* id */ "osc12M", /* name */ osc12m_parents, /* parent */ 0, /* freq */ 1, /* mult */ 2, /* div */ 0); /* flags */ static const char *pll_cpux_parents[] = {"osc24M"}; NKMP_CLK(pll_cpux_clk, CLK_PLL_CPUX, /* id */ "pll_cpux", pll_cpux_parents, /* name, parents */ 0x00, /* offset */ 8, 5, 0, 0, /* n factor */ 4, 2, 0, 0, /* k factor */ 0, 2, 0, 0, /* m factor */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* p factor */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK | AW_CLK_SCALE_CHANGE); /* flags */ static const char *pll_audio_parents[] = {"osc24M"}; NKMP_CLK(pll_audio_clk, CLK_PLL_AUDIO, /* id */ "pll_audio", pll_audio_parents, /* name, parents */ 0x08, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* k factor (fake) */ 0, 5, 0, 0, /* m factor */ 16, 4, 0, 0, /* p factor */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ static const char *pll_audio_mult_parents[] = {"pll_audio"}; FIXED_CLK(pll_audio_2x_clk, CLK_PLL_AUDIO_2X, /* id */ "pll_audio-2x", /* name */ pll_audio_mult_parents, /* parent */ 0, /* freq */ 2, /* mult */ 1, /* div */ 0); /* flags */ FIXED_CLK(pll_audio_4x_clk, CLK_PLL_AUDIO_4X, /* id */ "pll_audio-4x", /* name */ pll_audio_mult_parents, /* parent */ 0, /* freq */ 4, /* mult */ 1, /* div */ 0); /* flags */ FIXED_CLK(pll_audio_8x_clk, CLK_PLL_AUDIO_8X, /* id */ "pll_audio-8x", /* name */ pll_audio_mult_parents, /* parent */ 0, /* freq */ 8, /* mult */ 1, /* div */ 0); /* flags */ static const char *pll_video0_parents[] = {"osc24M"}; FRAC_CLK(pll_video0_clk, CLK_PLL_VIDEO0, /* id */ "pll_video0", pll_video0_parents, /* name, parents */ 0x10, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 4, 0, 0, /* m factor */ 31, 28, 1000, /* gate, lock, lock retries */ AW_CLK_HAS_LOCK, /* flags */ 270000000, 297000000, /* freq0, freq1 */ 24, 25, /* mode sel, freq sel */ 192000000, 600000000); /* min freq, max freq */ static const char *pll_video0_2x_parents[] = {"pll_video0"}; FIXED_CLK(pll_video0_2x_clk, CLK_PLL_VIDEO0_2X, /* id */ "pll_video0-2x", /* name */ pll_video0_2x_parents, /* parent */ 0, /* freq */ 2, /* mult */ 1, /* div */ 0); /* flags */ static const char *pll_ve_parents[] = {"osc24M"}; FRAC_CLK(pll_ve_clk, CLK_PLL_VE, /* id */ "pll_ve", pll_ve_parents, /* name, parents */ 0x18, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 4, 0, 0, /* m factor */ 31, 28, 1000, /* gate, lock, lock retries */ AW_CLK_HAS_LOCK, /* flags */ 270000000, 297000000, /* freq0, freq1 */ 24, 25, /* mode sel, freq sel */ 192000000, 600000000); /* min freq, max freq */ static const char *pll_ddr0_parents[] = {"osc24M"}; NKMP_CLK_WITH_UPDATE(pll_ddr0_clk, CLK_PLL_DDR0, /* id */ "pll_ddr0", pll_ddr0_parents, /* name, parents */ 0x20, /* offset */ 8, 5, 0, 0, /* n factor */ 4, 2, 0, 0, /* k factor */ 0, 2, 0, 0, /* m factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* p factor (fake) */ 31, /* gate */ 28, 1000, /* lock */ 20, /* update */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ static const char *pll_periph0_2x_parents[] = {"osc24M"}; static const char *pll_periph0_parents[] = {"pll_periph0_2x"}; NKMP_CLK(pll_periph0_2x_clk, CLK_PLL_PERIPH0_2X, /* id */ "pll_periph0_2x", pll_periph0_2x_parents, /* name, parents */ 0x28, /* offset */ 8, 5, 0, 0, /* n factor */ 4, 2, 0, 0, /* k factor */ 0, 0, 2, AW_CLK_FACTOR_FIXED, /* m factor (fake) */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* p factor (fake) */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ FIXED_CLK(pll_periph0_clk, CLK_PLL_PERIPH0, /* id */ "pll_periph0", /* name */ pll_periph0_parents, /* parent */ 0, /* freq */ 1, /* mult */ 2, /* div */ 0); /* flags */ static const char *pll_periph1_2x_parents[] = {"osc24M"}; static const char *pll_periph1_parents[] = {"pll_periph1_2x"}; NKMP_CLK(pll_periph1_2x_clk, CLK_PLL_PERIPH1_2X, /* id */ "pll_periph1_2x", pll_periph1_2x_parents, /* name, parents */ 0x2C, /* offset */ 8, 5, 0, 0, /* n factor */ 4, 2, 0, 0, /* k factor */ 0, 0, 2, AW_CLK_FACTOR_FIXED, /* m factor (fake) */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* p factor (fake) */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ FIXED_CLK(pll_periph1_clk, CLK_PLL_PERIPH1, /* id */ "pll_periph1", /* name */ pll_periph1_parents, /* parent */ 0, /* freq */ 1, /* mult */ 2, /* div */ 0); /* flags */ static const char *pll_video1_parents[] = {"osc24M"}; FRAC_CLK(pll_video1_clk, CLK_PLL_VIDEO1, /* id */ "pll_video1", pll_video1_parents, /* name, parents */ 0x30, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 4, 0, 0, /* m factor */ 31, 28, 1000, /* gate, lock, lock retries */ AW_CLK_HAS_LOCK, /* flags */ 270000000, 297000000, /* freq0, freq1 */ 24, 25, /* mode sel, freq sel */ 192000000, 600000000); /* min freq, max freq */ static const char *pll_gpu_parents[] = {"osc24M"}; FRAC_CLK(pll_gpu_clk, CLK_PLL_GPU, /* id */ "pll_gpu", pll_gpu_parents, /* name, parents */ 0x38, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 4, 0, 0, /* m factor */ 31, 28, 1000, /* gate, lock, lock retries */ AW_CLK_HAS_LOCK, /* flags */ 270000000, 297000000, /* freq0, freq1 */ 24, 25, /* mode sel, freq sel */ 192000000, 600000000); /* min freq, max freq */ static const char *pll_mipi_parents[] = {"pll_video0"}; MIPI_CLK(pll_mipi_clk, CLK_PLL_MIPI, "pll_mipi", pll_mipi_parents, 0x40, 4, 2, AW_CLK_FACTOR_MIN_VALUE, 2, 0, 3, 8, 4, 31, 28); static const char *pll_hsic_parents[] = {"osc24M"}; FRAC_CLK(pll_hsic_clk, CLK_PLL_HSIC, /* id */ "pll_hsic", pll_hsic_parents, /* name, parents */ 0x44, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 4, 0, 0, /* m factor */ 31, 28, 1000, /* gate, lock, lock retries */ AW_CLK_HAS_LOCK, /* flags */ 270000000, 297000000, /* freq0, freq1 */ 24, 25, /* mode sel, freq sel */ 192000000, 600000000); /* min freq, max freq */ static const char *pll_de_parents[] = {"osc24M"}; FRAC_CLK(pll_de_clk, CLK_PLL_DE, /* id */ "pll_de", pll_de_parents, /* name, parents */ 0x48, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 4, 0, 0, /* m factor */ 31, 28, 1000, /* gate, lock, lock retries */ AW_CLK_HAS_LOCK, /* flags */ 270000000, 297000000, /* freq0, freq1 */ 24, 25, /* mode sel, freq sel */ 192000000, 600000000); /* min freq, max freq */ static const char *pll_ddr1_parents[] = {"osc24M"}; NKMP_CLK_WITH_UPDATE(pll_ddr1_clk, CLK_PLL_DDR1, /* id */ "pll_ddr1", pll_ddr1_parents, /* name, parents */ 0x4C, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* k factor (fake) */ 0, 2, 0, 0, /* m factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* p factor (fake) */ 31, /* gate */ 28, 1000, /* lock */ 20, /* update */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ static const char *cpux_parents[] = {"osc32k", "osc24M", "pll_cpux"}; MUX_CLK(cpux_clk, CLK_CPUX, /* id */ "cpux", cpux_parents, /* name, parents */ 0x50, 16, 2); /* offset, shift, width */ static const char *axi_parents[] = {"cpux"}; DIV_CLK(axi_clk, CLK_AXI, /* id */ "axi", axi_parents, /* name, parents */ 0x50, /* offset */ 0, 2, /* shift, width */ 0, NULL); /* flags, div table */ static const char *apb_parents[] = {"cpux"}; DIV_CLK(apb_clk, CLK_APB, /* id */ "apb", apb_parents, /* name, parents */ 0x50, /* offset */ 8, 2, /* shift, width */ 0, NULL); /* flags, div table */ static const char *ahb1_parents[] = {"osc32k", "osc24M", "axi", "pll_periph0"}; PREDIV_CLK(ahb1_clk, CLK_AHB1, /* id */ "ahb1", ahb1_parents, /* name, parents */ 0x54, /* offset */ 12, 2, /* mux */ 4, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* div */ 6, 2, 0, AW_CLK_FACTOR_HAS_COND, /* prediv */ 12, 2, 3); /* prediv condition */ static const char *apb1_parents[] = {"ahb1"}; static struct clk_div_table apb1_div_table[] = { { .value = 0, .divider = 2, }, { .value = 1, .divider = 2, }, { .value = 2, .divider = 4, }, { .value = 3, .divider = 8, }, { }, }; DIV_CLK(apb1_clk, CLK_APB1, /* id */ "apb1", apb1_parents, /* name, parents */ 0x54, /* offset */ 8, 2, /* shift, width */ CLK_DIV_WITH_TABLE, /* flags */ apb1_div_table); /* div table */ static const char *apb2_parents[] = {"osc32k", "osc24M", "pll_periph0_2x", "pll_periph0_2x"}; NM_CLK(apb2_clk, CLK_APB2, /* id */ "apb2", apb2_parents, /* name, parents */ 0x58, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 5, 0, 0, /* m factor */ 24, 2, /* mux */ 0, /* gate */ AW_CLK_HAS_MUX); static const char *ahb2_parents[] = {"ahb1", "pll_periph0"}; PREDIV_CLK(ahb2_clk, CLK_AHB2, /* id */ "ahb2", ahb2_parents, /* name, parents */ 0x5c, /* offset */ 0, 2, /* mux */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* div */ 0, 0, 2, AW_CLK_FACTOR_HAS_COND | AW_CLK_FACTOR_FIXED, /* prediv */ 0, 2, 1); /* prediv condition */ static const char *ths_parents[] = {"osc24M"}; static struct clk_div_table ths_div_table[] = { { .value = 0, .divider = 1, }, { .value = 1, .divider = 2, }, { .value = 2, .divider = 4, }, { .value = 3, .divider = 6, }, { }, }; DIV_CLK(ths_clk, 0, /* id */ "thsdiv", ths_parents, /* name, parents */ 0x74, /* offset */ 0, 2, /* div shift, div width */ CLK_DIV_WITH_TABLE, /* flags */ ths_div_table); /* div table */ static const char *mod_parents[] = {"osc24M", "pll_periph0_2x", "pll_periph1_2x"}; NM_CLK(nand_clk, CLK_NAND, "nand", mod_parents, /* id, name, parents */ 0x80, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); /* flags */ NM_CLK(mmc0_clk, CLK_MMC0, "mmc0", mod_parents, /* id, name, parents */ 0x88, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ NM_CLK(mmc1_clk, CLK_MMC1, "mmc1", mod_parents, /* id, name, parents */ 0x8c, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ NM_CLK(mmc2_clk, CLK_MMC2, "mmc2", mod_parents, /* id, name, parents */ 0x90, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ static const char *ts_parents[] = {"osc24M", "pll_periph0"}; NM_CLK(ts_clk, CLK_TS, "ts", ts_parents, /* id, name, parents */ 0x98, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); /* flags */ NM_CLK(ce_clk, CLK_CE, "ce", mod_parents, /* id, name, parents */ 0x9C, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); /* flags */ NM_CLK(spi0_clk, CLK_SPI0, "spi0", mod_parents, /* id, name, parents */ 0xA0, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ NM_CLK(spi1_clk, CLK_SPI1, "spi1", mod_parents, /* id, name, parents */ 0xA4, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ static const char *i2s_parents[] = {"pll_audio-8x", "pll_audio-4x", "pll_audio-2x", "pll_audio"}; MUX_CLK(i2s0mux_clk, 0, "i2s0mux", i2s_parents, /* id, name, parents */ 0xb0, 16, 2); /* offset, mux shift, mux width */ MUX_CLK(i2s1mux_clk, 0, "i2s1mux", i2s_parents, /* id, name, parents */ 0xb4, 16, 2); /* offset, mux shift, mux width */ MUX_CLK(i2s2mux_clk, 0, "i2s2mux", i2s_parents, /* id, name, parents */ 0xb8, 16, 2); /* offset, mux shift, mux width */ static const char *spdif_parents[] = {"pll_audio"}; M_CLK(spdif_clk, CLK_SPDIF, "spdif", spdif_parents, /* id, name, parents */ 0xC0, /* offset */ 0, 4, 0, 0, /* m factor */ 0, 0, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE); /* flags */ /* USBPHY clk sel */ /* DRAM needs update bit */ static const char *dram_parents[] = {"pll_ddr0", "pll_ddr1"}; M_CLK(dram_clk, CLK_DRAM, "dram", dram_parents, /* id, name, parents */ 0xF4, /* offset */ 0, 2, 0, 0, /* m factor */ 20, 2, /* mux */ 0, /* gate */ AW_CLK_HAS_MUX); /* flags */ static const char *de_parents[] = {"pll_periph0_2x", "pll_de"}; M_CLK(de_clk, CLK_DE, "de", de_parents, /* id, name, parents */ 0x104, /* offset */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *tcon0_parents[] = {"pll_mipi", NULL, "pll_video0-2x"}; MUX_CLK(tcon0_clk, CLK_TCON0, /* id */ "tcon0", tcon0_parents, /* name, parents */ 0x118, 24, 2); /* offset, shift, width */ static const char *tcon1_parents[] = {"pll_video0", NULL, "pll_video1"}; M_CLK(tcon1_clk, CLK_TCON1, "tcon1", tcon1_parents, /* id, name, parents */ 0x11C, /* offset */ 0, 5, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_SET_PARENT); /* flags */ static const char *deinterlace_parents[] = {"pll_periph0", "pll_periph1"}; M_CLK(deinterlace_clk, CLK_DEINTERLACE, "deinterlace", deinterlace_parents, /* id, name, parents */ 0x124, /* offset */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *csi_sclk_parents[] = {"pll_periph0", "pll_periph1"}; M_CLK(csi_sclk_clk, CLK_CSI_SCLK, "csi-sclk", csi_sclk_parents, /* id, name, parents */ 0x134, /* offset */ 16, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *csi_mclk_parents[] = {"osc24M", "pll_video0", "pll_periph1"}; M_CLK(csi_mclk_clk, CLK_CSI_MCLK, "csi-mclk", csi_mclk_parents, /* id, name, parents */ 0x134, /* offset */ 0, 4, 0, 0, /* m factor */ 8, 2, /* mux */ 15, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *ve_parents[] = {"pll_ve"}; M_CLK(ve_clk, CLK_VE, "ve", ve_parents, /* id, name, parents */ 0x13C, /* offset */ 16, 3, 0, 0, /* m factor */ 0, 0, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE); /* flags */ static const char *hdmi_parents[] = {"pll_video0"}; M_CLK(hdmi_clk, CLK_HDMI, "hdmi", hdmi_parents, /* id, name, parents */ 0x150, /* offset */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_SET_PARENT); /* flags */ static const char *mbus_parents[] = {"osc24M", "pll_periph0_2x", "pll_ddr0"}; M_CLK(mbus_clk, CLK_MBUS, "mbus", mbus_parents, /* id, name, parents */ 0x15C, /* offset */ 0, 3, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *gpu_parents[] = {"pll_gpu"}; M_CLK(gpu_clk, CLK_GPU, "gpu", gpu_parents, /* id, name, parents */ 0x1A0, /* offset */ 0, 2, 0, 0, /* m factor */ 0, 0, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE); /* flags */ static struct aw_ccung_clk a64_ccu_clks[] = { { .type = AW_CLK_NKMP, .clk.nkmp = &pll_cpux_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_audio_clk}, { .type = AW_CLK_FRAC, .clk.frac = &pll_video0_clk}, { .type = AW_CLK_FRAC, .clk.frac = &pll_ve_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_ddr0_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_periph0_2x_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_periph1_2x_clk}, { .type = AW_CLK_FRAC, .clk.frac = &pll_video1_clk}, { .type = AW_CLK_FRAC, .clk.frac = &pll_gpu_clk}, { .type = AW_CLK_MIPI, .clk.mipi = &pll_mipi_clk}, { .type = AW_CLK_FRAC, .clk.frac = &pll_hsic_clk}, { .type = AW_CLK_FRAC, .clk.frac = &pll_de_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_ddr1_clk}, { .type = AW_CLK_NM, .clk.nm = &apb2_clk}, { .type = AW_CLK_NM, .clk.nm = &nand_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc0_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc1_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc2_clk}, { .type = AW_CLK_NM, .clk.nm = &ts_clk}, { .type = AW_CLK_NM, .clk.nm = &ce_clk}, { .type = AW_CLK_NM, .clk.nm = &spi0_clk}, { .type = AW_CLK_NM, .clk.nm = &spi1_clk}, { .type = AW_CLK_M, .clk.m = &spdif_clk}, { .type = AW_CLK_M, .clk.m = &dram_clk}, { .type = AW_CLK_M, .clk.m = &de_clk}, { .type = AW_CLK_M, .clk.m = &tcon1_clk}, { .type = AW_CLK_M, .clk.m = &deinterlace_clk}, { .type = AW_CLK_M, .clk.m = &csi_sclk_clk}, { .type = AW_CLK_M, .clk.m = &csi_mclk_clk}, { .type = AW_CLK_M, .clk.m = &ve_clk}, { .type = AW_CLK_M, .clk.m = &hdmi_clk}, { .type = AW_CLK_M, .clk.m = &mbus_clk}, { .type = AW_CLK_M, .clk.m = &gpu_clk}, { .type = AW_CLK_PREDIV_MUX, .clk.prediv_mux = &ahb1_clk}, { .type = AW_CLK_PREDIV_MUX, .clk.prediv_mux = &ahb2_clk}, { .type = AW_CLK_MUX, .clk.mux = &cpux_clk}, { .type = AW_CLK_MUX, .clk.mux = &i2s0mux_clk}, { .type = AW_CLK_MUX, .clk.mux = &i2s1mux_clk}, { .type = AW_CLK_MUX, .clk.mux = &i2s2mux_clk}, { .type = AW_CLK_MUX, .clk.mux = &tcon0_clk}, { .type = AW_CLK_DIV, .clk.div = &axi_clk}, { .type = AW_CLK_DIV, .clk.div = &apb1_clk}, { .type = AW_CLK_DIV, .clk.div = &apb_clk}, { .type = AW_CLK_DIV, .clk.div = &ths_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &osc12m_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_periph0_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_periph1_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_audio_2x_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_audio_4x_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_audio_8x_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_video0_2x_clk}, }; static struct aw_clk_init a64_init_clks[] = { {"ahb1", "pll_periph0", 0, false}, {"ahb2", "pll_periph0", 0, false}, {"dram", "pll_ddr0", 0, false}, {"pll_de", NULL, 432000000, true}, {"de", "pll_de", 0, true}, }; static int ccu_a64_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "allwinner,sun50i-a64-ccu")) return (ENXIO); device_set_desc(dev, "Allwinner A64 Clock Control Unit NG"); return (BUS_PROBE_DEFAULT); } static int ccu_a64_attach(device_t dev) { struct aw_ccung_softc *sc; sc = device_get_softc(dev); sc->resets = a64_ccu_resets; sc->nresets = nitems(a64_ccu_resets); sc->gates = a64_ccu_gates; sc->ngates = nitems(a64_ccu_gates); sc->clks = a64_ccu_clks; sc->nclks = nitems(a64_ccu_clks); sc->clk_init = a64_init_clks; sc->n_clk_init = nitems(a64_init_clks); return (aw_ccung_attach(dev)); } static device_method_t ccu_a64ng_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ccu_a64_probe), DEVMETHOD(device_attach, ccu_a64_attach), DEVMETHOD_END }; DEFINE_CLASS_1(ccu_a64ng, ccu_a64ng_driver, ccu_a64ng_methods, sizeof(struct aw_ccung_softc), aw_ccung_driver); EARLY_DRIVER_MODULE(ccu_a64ng, simplebus, ccu_a64ng_driver, 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/dev/clk/allwinner/ccu_a83t.c b/sys/dev/clk/allwinner/ccu_a83t.c index 1cb49d99ccbc..bc94250ba4ac 100644 --- a/sys/dev/clk/allwinner/ccu_a83t.c +++ b/sys/dev/clk/allwinner/ccu_a83t.c @@ -1,781 +1,781 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2017 Kyle Evans * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include -#include -#include +#include +#include +#include #include #include #include /* Non-exported clocks */ #define CLK_PLL_C0CPUX 0 #define CLK_PLL_C1CPUX 1 #define CLK_PLL_AUDIO 2 #define CLK_PLL_VIDEO0 3 #define CLK_PLL_VE 4 #define CLK_PLL_DDR 5 #define CLK_PLL_GPU 7 #define CLK_PLL_HSIC 8 #define CLK_PLL_VIDEO1 10 #define CLK_AXI0 13 #define CLK_AXI1 14 #define CLK_AHB1 15 #define CLK_APB1 16 #define CLK_APB2 17 #define CLK_AHB2 18 #define CLK_CCI400 58 #define CLK_DRAM 82 #define CLK_MBUS 95 /* Non-exported fixed clocks */ #define CLK_OSC_12M 150 static struct aw_ccung_reset a83t_ccu_resets[] = { CCU_RESET(RST_USB_PHY0, 0xcc, 0) CCU_RESET(RST_USB_PHY1, 0xcc, 1) CCU_RESET(RST_USB_HSIC, 0xcc, 2) CCU_RESET(RST_DRAM, 0xf4, 31) CCU_RESET(RST_MBUS, 0xfc, 31) CCU_RESET(RST_BUS_MIPI_DSI, 0x2c0, 1) CCU_RESET(RST_BUS_SS, 0x2c0, 5) CCU_RESET(RST_BUS_DMA, 0x2c0, 6) CCU_RESET(RST_BUS_MMC0, 0x2c0, 8) CCU_RESET(RST_BUS_MMC1, 0x2c0, 9) CCU_RESET(RST_BUS_MMC2, 0x2c0, 10) CCU_RESET(RST_BUS_NAND, 0x2c0, 13) CCU_RESET(RST_BUS_DRAM, 0x2c0, 14) CCU_RESET(RST_BUS_EMAC, 0x2c0, 17) CCU_RESET(RST_BUS_HSTIMER, 0x2c0, 19) CCU_RESET(RST_BUS_SPI0, 0x2c0, 20) CCU_RESET(RST_BUS_SPI1, 0x2c0, 21) CCU_RESET(RST_BUS_OTG, 0x2c0, 24) CCU_RESET(RST_BUS_EHCI0, 0x2c0, 26) CCU_RESET(RST_BUS_EHCI1, 0x2c0, 27) CCU_RESET(RST_BUS_OHCI0, 0x2c0, 29) CCU_RESET(RST_BUS_VE, 0x2c4, 0) CCU_RESET(RST_BUS_TCON0, 0x2c4, 4) CCU_RESET(RST_BUS_TCON1, 0x2c4, 5) CCU_RESET(RST_BUS_CSI, 0x2c4, 8) CCU_RESET(RST_BUS_HDMI0, 0x2c4, 10) CCU_RESET(RST_BUS_HDMI1, 0x2c4, 11) CCU_RESET(RST_BUS_DE, 0x2c4, 12) CCU_RESET(RST_BUS_GPU, 0x2c4, 20) CCU_RESET(RST_BUS_MSGBOX, 0x2c4, 21) CCU_RESET(RST_BUS_SPINLOCK, 0x2c4, 22) CCU_RESET(RST_BUS_LVDS, 0x2c8, 0) CCU_RESET(RST_BUS_SPDIF, 0x2d0, 1) CCU_RESET(RST_BUS_I2S0, 0x2d0, 12) CCU_RESET(RST_BUS_I2S1, 0x2d0, 13) CCU_RESET(RST_BUS_I2S2, 0x2d0, 14) CCU_RESET(RST_BUS_TDM, 0x2d0, 15) CCU_RESET(RST_BUS_I2C0, 0x2d8, 0) CCU_RESET(RST_BUS_I2C1, 0x2d8, 1) CCU_RESET(RST_BUS_I2C2, 0x2d8, 2) CCU_RESET(RST_BUS_UART0, 0x2d8, 16) CCU_RESET(RST_BUS_UART1, 0x2d8, 17) CCU_RESET(RST_BUS_UART2, 0x2d8, 18) CCU_RESET(RST_BUS_UART3, 0x2d8, 19) CCU_RESET(RST_BUS_UART4, 0x2d8, 20) }; static struct aw_ccung_gate a83t_ccu_gates[] = { CCU_GATE(CLK_BUS_MIPI_DSI, "bus-mipi-dsi", "ahb1", 0x60, 1) CCU_GATE(CLK_BUS_SS, "bus-ss", "ahb1", 0x60, 5) CCU_GATE(CLK_BUS_DMA, "bus-dma", "ahb1", 0x60, 6) CCU_GATE(CLK_BUS_MMC0, "bus-mmc0", "ahb1", 0x60, 8) CCU_GATE(CLK_BUS_MMC1, "bus-mmc1", "ahb1", 0x60, 9) CCU_GATE(CLK_BUS_MMC2, "bus-mmc2", "ahb1", 0x60, 10) CCU_GATE(CLK_BUS_NAND, "bus-nand", "ahb1", 0x60, 13) CCU_GATE(CLK_BUS_DRAM, "bus-dram", "ahb1", 0x60, 14) CCU_GATE(CLK_BUS_EMAC, "bus-emac", "ahb1", 0x60, 17) CCU_GATE(CLK_BUS_HSTIMER, "bus-hstimer", "ahb1", 0x60, 19) CCU_GATE(CLK_BUS_SPI0, "bus-spi0", "ahb1", 0x60, 20) CCU_GATE(CLK_BUS_SPI1, "bus-spi1", "ahb1", 0x60, 21) CCU_GATE(CLK_BUS_OTG, "bus-otg", "ahb1", 0x60, 24) CCU_GATE(CLK_BUS_EHCI0, "bus-ehci0", "ahb2", 0x60, 26) CCU_GATE(CLK_BUS_EHCI1, "bus-ehci1", "ahb2", 0x60, 27) CCU_GATE(CLK_BUS_OHCI0, "bus-ohci0", "ahb2", 0x60, 29) CCU_GATE(CLK_BUS_VE, "bus-ve", "ahb1", 0x64, 0) CCU_GATE(CLK_BUS_TCON0, "bus-tcon0", "ahb1", 0x64, 4) CCU_GATE(CLK_BUS_TCON1, "bus-tcon1", "ahb1", 0x64, 5) CCU_GATE(CLK_BUS_CSI, "bus-csi", "ahb1", 0x64, 8) CCU_GATE(CLK_BUS_HDMI, "bus-hdmi", "ahb1", 0x64, 11) CCU_GATE(CLK_BUS_DE, "bus-de", "ahb1", 0x64, 12) CCU_GATE(CLK_BUS_GPU, "bus-gpu", "ahb1", 0x64, 20) CCU_GATE(CLK_BUS_MSGBOX, "bus-msgbox", "ahb1", 0x64, 21) CCU_GATE(CLK_BUS_SPINLOCK, "bus-spinlock", "ahb1", 0x64, 22) CCU_GATE(CLK_BUS_SPDIF, "bus-spdif", "apb1", 0x68, 1) CCU_GATE(CLK_BUS_PIO, "bus-pio", "apb1", 0x68, 5) CCU_GATE(CLK_BUS_I2S0, "bus-i2s0", "apb1", 0x68, 12) CCU_GATE(CLK_BUS_I2S1, "bus-i2s1", "apb1", 0x68, 13) CCU_GATE(CLK_BUS_I2S2, "bus-i2s2", "apb1", 0x68, 14) CCU_GATE(CLK_BUS_TDM, "bus-tdm", "apb1", 0x68, 15) CCU_GATE(CLK_BUS_I2C0, "bus-i2c0", "apb2", 0x6c, 0) CCU_GATE(CLK_BUS_I2C1, "bus-i2c1", "apb2", 0x6c, 1) CCU_GATE(CLK_BUS_I2C2, "bus-i2c2", "apb2", 0x6c, 2) CCU_GATE(CLK_BUS_UART0, "bus-uart0", "apb2", 0x6c, 16) CCU_GATE(CLK_BUS_UART1, "bus-uart1", "apb2", 0x6c, 17) CCU_GATE(CLK_BUS_UART2, "bus-uart2", "apb2", 0x6c, 18) CCU_GATE(CLK_BUS_UART3, "bus-uart3", "apb2", 0x6c, 19) CCU_GATE(CLK_BUS_UART4, "bus-uart4", "apb2", 0x6c, 20) CCU_GATE(CLK_USB_PHY0, "usb-phy0", "osc24M", 0xcc, 8) CCU_GATE(CLK_USB_PHY1, "usb-phy1", "osc24M", 0xcc, 9) CCU_GATE(CLK_USB_HSIC, "usb-hsic", "pll_hsic", 0xcc, 10) CCU_GATE(CLK_USB_HSIC_12M, "usb-hsic-12M", "osc12M", 0xcc, 11) CCU_GATE(CLK_USB_OHCI0, "usb-ohci0", "osc12M", 0xcc, 16) CCU_GATE(CLK_DRAM_VE, "dram-ve", "dram", 0x100, 0) CCU_GATE(CLK_DRAM_CSI, "dram-csi", "dram", 0x100, 1) CCU_GATE(CLK_CSI_MISC, "csi-misc", "osc24M", 0x130, 16) CCU_GATE(CLK_MIPI_CSI, "mipi-csi", "osc24M", 0x130, 31) CCU_GATE(CLK_AVS, "avs", "osc24M", 0x144, 31) CCU_GATE(CLK_HDMI_SLOW, "hdmi-ddc", "osc24M", 0x154, 31) }; static const char *osc12m_parents[] = {"osc24M"}; FIXED_CLK(osc12m_clk, CLK_OSC_12M, /* id */ "osc12M", osc12m_parents, /* name, parents */ 0, /* freq */ 1, /* mult */ 2, /* div */ 0); /* flags */ /* CPU PLL are 24Mhz * N / P */ static const char *pll_c0cpux_parents[] = {"osc24M"}; static const char *pll_c1cpux_parents[] = {"osc24M"}; NKMP_CLK(pll_c0cpux_clk, CLK_PLL_C0CPUX, /* id */ "pll_c0cpux", pll_c0cpux_parents, /* name, parents */ 0x00, /* offset */ 8, 8, 0, AW_CLK_FACTOR_ZERO_BASED, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* k factor (fake) */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* m factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* p factor (fake) */ 0, 0, /* lock */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_SCALE_CHANGE); /* flags */ NKMP_CLK(pll_c1cpux_clk, CLK_PLL_C1CPUX, /* id */ "pll_c1cpux", pll_c1cpux_parents, /* name, parents */ 0x04, /* offset */ 8, 8, 0, AW_CLK_FACTOR_ZERO_BASED, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* k factor (fake) */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* m factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* p factor (fake) */ 0, 0, /* lock */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_SCALE_CHANGE); /* flags */ static const char *pll_audio_parents[] = {"osc24M"}; NKMP_CLK(pll_audio_clk, CLK_PLL_AUDIO, /* id */ "pll_audio", pll_audio_parents, /* name, parents */ 0x08, /* offset */ 8, 8, 0, AW_CLK_FACTOR_ZERO_BASED, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* k factor (fake) */ 16, 1, 0, 0, /* m factor */ 18, 1, 0, 0, /* p factor */ 31, /* gate */ 0, 0, /* lock */ AW_CLK_HAS_GATE); /* flags */ static const char *pll_video0_parents[] = {"osc24M"}; NKMP_CLK(pll_video0_clk, CLK_PLL_VIDEO0, /* id */ "pll_video0", pll_video0_parents, /* name, parents */ 0x10, /* offset */ 8, 8, 0, AW_CLK_FACTOR_ZERO_BASED, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* k factor (fake) */ 16, 1, 0, 0, /* m factor */ 0, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* p factor */ 31, /* gate */ 0, 0, /* lock */ AW_CLK_HAS_GATE); /* flags */ static const char *pll_ve_parents[] = {"osc24M"}; NKMP_CLK(pll_ve_clk, CLK_PLL_VE, /* id */ "pll_ve", pll_ve_parents, /* name, parents */ 0x18, /* offset */ 8, 8, 0, AW_CLK_FACTOR_ZERO_BASED, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* k factor (fake) */ 16, 1, 0, 0, /* m factor */ 18, 1, 0, 0, /* p factor */ 31, /* gate */ 0, 0, /* lock */ AW_CLK_HAS_GATE); /* flags */ static const char *pll_ddr_parents[] = {"osc24M"}; NKMP_CLK(pll_ddr_clk, CLK_PLL_DDR, /* id */ "pll_ddr", pll_ddr_parents, /* name, parents */ 0x20, /* offset */ 8, 5, 0, 0, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* k factor (fake) */ 16, 1, 0, 0, /* m factor */ 18, 1, 0, 0, /* p factor */ 31, /* gate */ 0, 0, /* lock */ AW_CLK_HAS_GATE); /* flags */ static const char *pll_periph_parents[] = {"osc24M"}; NKMP_CLK(pll_periph_clk, CLK_PLL_PERIPH, /* id */ "pll_periph", pll_periph_parents, /* name, parents */ 0x28, /* offset */ 8, 8, 0, AW_CLK_FACTOR_ZERO_BASED, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* k factor (fake) */ 16, 1, 1, 0, /* m factor */ 18, 1, 1, 0, /* p factor */ 31, /* gate */ 0, 0, /* lock */ AW_CLK_HAS_GATE); /* flags */ static const char *pll_gpu_parents[] = {"osc24M"}; NKMP_CLK(pll_gpu_clk, CLK_PLL_GPU, /* id */ "pll_gpu", pll_gpu_parents, /* name, parents */ 0x38, /* offset */ 8, 8, 0, AW_CLK_FACTOR_ZERO_BASED, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* k factor (fake) */ 16, 1, 1, 0, /* m factor */ 18, 1, 1, 0, /* p factor */ 31, /* gate */ 0, 0, /* lock */ AW_CLK_HAS_GATE); /* flags */ static const char *pll_hsic_parents[] = {"osc24M"}; NKMP_CLK(pll_hsic_clk, CLK_PLL_HSIC, /* id */ "pll_hsic", pll_hsic_parents, /* name, parents */ 0x44, /* offset */ 8, 8, 0, AW_CLK_FACTOR_ZERO_BASED, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* k factor (fake) */ 16, 1, 1, 0, /* m factor */ 18, 1, 1, 0, /* p factor */ 31, /* gate */ 0, 0, /* lock */ AW_CLK_HAS_GATE); /* flags */ static const char *pll_de_parents[] = {"osc24M"}; NKMP_CLK(pll_de_clk, CLK_PLL_DE, /* id */ "pll_de", pll_de_parents, /* name, parents */ 0x48, /* offset */ 8, 8, 0, AW_CLK_FACTOR_ZERO_BASED, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* k factor (fake) */ 16, 1, 1, 0, /* m factor */ 18, 1, 1, 0, /* p factor */ 31, /* gate */ 0, 0, /* lock */ AW_CLK_HAS_GATE); /* flags */ static const char *pll_video1_parents[] = {"osc24M"}; NKMP_CLK(pll_video1_clk, CLK_PLL_VIDEO1, /* id */ "pll_video1", pll_video1_parents, /* name, parents */ 0x4c, /* offset */ 8, 8, 0, AW_CLK_FACTOR_ZERO_BASED, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* k factor (fake) */ 16, 1, 1, 0, /* m factor */ 0, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* p factor */ 31, /* gate */ 0, 0, /* lock */ AW_CLK_HAS_GATE); /* flags */ static const char *c0cpux_parents[] = {"osc24M", "pll_c0cpux"}; MUX_CLK(c0cpux_clk, CLK_C0CPUX, /* id */ "c0cpux", c0cpux_parents, /* name, parents */ 0x50, 12, 1); /* offset, shift, width */ static const char *c1cpux_parents[] = {"osc24M", "pll_c1cpux"}; MUX_CLK(c1cpux_clk, CLK_C1CPUX, /* id */ "c1cpux", c1cpux_parents, /* name, parents */ 0x50, 28, 1); /* offset, shift, width */ static const char *axi0_parents[] = {"c0cpux"}; DIV_CLK(axi0_clk, CLK_AXI0, /* id */ "axi0", axi0_parents, /* name, parents */ 0x50, /* offset */ 0, 2, /* shift, width */ 0, NULL); /* flags, div table */ static const char *axi1_parents[] = {"c1cpux"}; DIV_CLK(axi1_clk, CLK_AXI1, /* id */ "axi1", axi1_parents, /* name, parents */ 0x50, /* offset */ 16, 2, /* shift, width */ 0, NULL); /* flags, div table */ static const char *ahb1_parents[] = {"osc16M-d512", "osc24M", "pll_periph", "pll_periph"}; PREDIV_CLK_WITH_MASK(ahb1_clk, CLK_AHB1, /* id */ "ahb1", ahb1_parents, /* name, parents */ 0x54, /* offset */ 12, 2, /* mux */ 4, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* div */ 6, 2, 0, AW_CLK_FACTOR_HAS_COND, /* prediv */ (2 << 12), (2 << 12)); /* prediv condition */ static const char *apb1_parents[] = {"ahb1"}; DIV_CLK(apb1_clk, CLK_APB1, /* id */ "apb1", apb1_parents, /* name, parents */ 0x54, /* offset */ 8, 2, /* shift, width */ 0, NULL); /* flags, div table */ static const char *apb2_parents[] = {"osc16M-d512", "osc24M", "pll_periph", "pll_periph"}; NM_CLK(apb2_clk, CLK_APB2, /* id */ "apb2", apb2_parents, /* name, parents */ 0x58, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 5, 0, 0, /* m factor */ 24, 2, /* mux */ 0, /* gate */ AW_CLK_HAS_MUX); static const char *ahb2_parents[] = {"ahb1", "pll_periph"}; PREDIV_CLK(ahb2_clk, CLK_AHB2, /* id */ "ahb2", ahb2_parents, /* name, parents */ 0x5c, 0, 2, /* mux */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* div (fake) */ 0, 0, 2, AW_CLK_FACTOR_HAS_COND | AW_CLK_FACTOR_FIXED, /* prediv */ 0, 2, 1); /* prediv cond */ /* Actually has a divider, but we don't use it */ static const char *cci400_parents[] = {"osc24M", "pll_periph", "pll_hsic"}; MUX_CLK(cci400_clk, CLK_CCI400, /* id */ "cci400", cci400_parents, /* name, parents */ 0x78, 24, 2); /* offset, shift, width */ static const char *mod_parents[] = {"osc24M", "pll_periph"}; NM_CLK(nand_clk, CLK_NAND, /* id */ "nand", mod_parents, /* name, parents */ 0x80, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); NM_CLK(mmc0_clk, CLK_MMC0, /* id */ "mmc0", mod_parents, /* name, parents */ 0x88, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); NM_CLK(mmc1_clk, CLK_MMC1, /* id */ "mmc1", mod_parents, /* name, parents */ 0x8c, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); NM_CLK(mmc2_clk, CLK_MMC2, /* id */ "mmc2", mod_parents, /* name, parents */ 0x90, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); NM_CLK(ss_clk, CLK_SS, /* id */ "ss", mod_parents, /* name, parents */ 0x9c, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); NM_CLK(spi0_clk, CLK_SPI0, /* id */ "spi0", mod_parents, /* name, parents */ 0xa0, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); NM_CLK(spi1_clk, CLK_SPI1, /* id */ "spi1", mod_parents, /* name, parents */ 0xa4, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); static const char *daudio_parents[] = {"pll_audio"}; NM_CLK(i2s0_clk, CLK_I2S0, /* id */ "i2s0", daudio_parents, /* name, parents */ 0xb0, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 0, 0, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE); NM_CLK(i2s1_clk, CLK_I2S1, /* id */ "i2s1", daudio_parents, /* name, parents */ 0xb4, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 0, 0, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE); NM_CLK(i2s2_clk, CLK_I2S2, /* id */ "i2s2", daudio_parents, /* name, parents */ 0xb8, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 0, 0, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE); static const char *tdm_parents[] = {"pll_audio"}; NM_CLK(tdm_clk, CLK_TDM, /* id */ "tdm", tdm_parents, /* name, parents */ 0xbc, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 0, 0, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE); static const char *spdif_parents[] = {"pll_audio"}; NM_CLK(spdif_clk, CLK_SPDIF, /* id */ "spdif", spdif_parents, /* name, parents */ 0xc0, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 0, 0, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE); static const char *dram_parents[] = {"pll_ddr"}; NM_CLK(dram_clk, CLK_DRAM, /* id */ "dram", dram_parents, /* name, parents */ 0xf4, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 0, 0, /* mux */ 0, /* gate */ 0); static const char *tcon0_parents[] = {"pll_video0"}; MUX_CLK(tcon0_clk, CLK_TCON0, /* id */ "tcon0", tcon0_parents, /* name, parents */ 0x118, 24, 2); /* offset, shift, width */ static const char *tcon1_parents[] = {"pll_video1"}; NM_CLK(tcon1_clk, CLK_TCON1, /* id */ "tcon1", tcon1_parents, /* name, parents */ 0x11c, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 0, 0, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE); static const char *csi_mclk_parents[] = {"pll_de", "osc24M"}; NM_CLK(csi_mclk_clk, CLK_CSI_MCLK, /* id */ "csi-mclk", csi_mclk_parents, /* name, parents */ 0x134, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 8, 3, /* mux */ 15, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); static const char *csi_sclk_parents[] = {"pll_periph", "pll_ve"}; NM_CLK(csi_sclk_clk, CLK_CSI_SCLK, /* id */ "csi-sclk", csi_sclk_parents, /* name, parents */ 0x134, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 16, 4, 0, 0, /* m factor */ 24, 3, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); static const char *ve_parents[] = {"pll_ve"}; NM_CLK(ve_clk, CLK_VE, /* id */ "ve", ve_parents, /* name, parents */ 0x13c, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 16, 3, 0, 0, /* m factor */ 0, 0, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE); static const char *hdmi_parents[] = {"pll_video1"}; NM_CLK(hdmi_clk, CLK_HDMI, /* id */ "hdmi", hdmi_parents, /* name, parents */ 0x150, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); static const char *mbus_parents[] = {"osc24M", "pll_periph", "pll_ddr"}; NM_CLK(mbus_clk, CLK_MBUS, /* id */ "mbus", mbus_parents, /* name, parents */ 0x15c, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 3, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); static const char *mipi_dsi0_parents[] = {"pll_video0"}; NM_CLK(mipi_dsi0_clk, CLK_MIPI_DSI0, /* id */ "mipi-dsi0", mipi_dsi0_parents, /* name, parents */ 0x168, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 4, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); static const char *mipi_dsi1_parents[] = {"osc24M", "pll_video0"}; NM_CLK(mipi_dsi1_clk, CLK_MIPI_DSI1, /* id */ "mipi-dsi1", mipi_dsi1_parents, /* name, parents */ 0x16c, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 4, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); static const char *gpu_core_parents[] = {"pll_gpu"}; NM_CLK(gpu_core_clk, CLK_GPU_CORE, /* id */ "gpu-core", gpu_core_parents, /* name, parents */ 0x1a0, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 3, 0, 0, /* m factor */ 0, 0, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE); static const char *gpu_memory_parents[] = {"pll_gpu", "pll_periph"}; NM_CLK(gpu_memory_clk, CLK_GPU_MEMORY, /* id */ "gpu-memory", gpu_memory_parents, /* name, parents */ 0x1a4, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 3, 0, 0, /* m factor */ 24, 1, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); static const char *gpu_hyd_parents[] = {"pll_gpu"}; NM_CLK(gpu_hyd_clk, CLK_GPU_HYD, /* id */ "gpu-hyd", gpu_hyd_parents, /* name, parents */ 0x1a0, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 3, 0, 0, /* m factor */ 0, 0, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE); static struct aw_ccung_clk a83t_clks[] = { { .type = AW_CLK_NKMP, .clk.nkmp = &pll_audio_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_video0_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_ve_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_ddr_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_periph_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_gpu_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_hsic_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_de_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_video1_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_c0cpux_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_c1cpux_clk}, { .type = AW_CLK_NM, .clk.nm = &apb2_clk}, { .type = AW_CLK_NM, .clk.nm = &nand_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc0_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc1_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc2_clk}, { .type = AW_CLK_NM, .clk.nm = &ss_clk}, { .type = AW_CLK_NM, .clk.nm = &spi0_clk}, { .type = AW_CLK_NM, .clk.nm = &spi1_clk}, { .type = AW_CLK_NM, .clk.nm = &i2s0_clk}, { .type = AW_CLK_NM, .clk.nm = &i2s1_clk}, { .type = AW_CLK_NM, .clk.nm = &i2s2_clk}, { .type = AW_CLK_NM, .clk.nm = &tdm_clk}, { .type = AW_CLK_NM, .clk.nm = &spdif_clk}, { .type = AW_CLK_NM, .clk.nm = &dram_clk}, { .type = AW_CLK_NM, .clk.nm = &tcon1_clk}, { .type = AW_CLK_NM, .clk.nm = &csi_mclk_clk}, { .type = AW_CLK_NM, .clk.nm = &csi_sclk_clk}, { .type = AW_CLK_NM, .clk.nm = &ve_clk}, { .type = AW_CLK_NM, .clk.nm = &hdmi_clk}, { .type = AW_CLK_NM, .clk.nm = &mbus_clk}, { .type = AW_CLK_NM, .clk.nm = &mipi_dsi0_clk}, { .type = AW_CLK_NM, .clk.nm = &mipi_dsi1_clk}, { .type = AW_CLK_NM, .clk.nm = &gpu_core_clk}, { .type = AW_CLK_NM, .clk.nm = &gpu_memory_clk}, { .type = AW_CLK_NM, .clk.nm = &gpu_hyd_clk}, { .type = AW_CLK_PREDIV_MUX, .clk.prediv_mux = &ahb1_clk}, { .type = AW_CLK_PREDIV_MUX, .clk.prediv_mux = &ahb2_clk}, { .type = AW_CLK_MUX, .clk.mux = &c0cpux_clk}, { .type = AW_CLK_MUX, .clk.mux = &c1cpux_clk}, { .type = AW_CLK_MUX, .clk.mux = &cci400_clk}, { .type = AW_CLK_MUX, .clk.mux = &tcon0_clk}, { .type = AW_CLK_DIV, .clk.div = &axi0_clk}, { .type = AW_CLK_DIV, .clk.div = &axi1_clk}, { .type = AW_CLK_DIV, .clk.div = &apb1_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &osc12m_clk}, }; static struct aw_clk_init a83t_init_clks[] = { {"ahb1", "pll_periph", 0, false}, {"ahb2", "ahb1", 0, false}, {"dram", "pll_ddr", 0, false}, }; static int ccu_a83t_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "allwinner,sun8i-a83t-ccu")) return (ENXIO); device_set_desc(dev, "Allwinner A83T Clock Control Unit NG"); return (BUS_PROBE_DEFAULT); } static int ccu_a83t_attach(device_t dev) { struct aw_ccung_softc *sc; sc = device_get_softc(dev); sc->resets = a83t_ccu_resets; sc->nresets = nitems(a83t_ccu_resets); sc->gates = a83t_ccu_gates; sc->ngates = nitems(a83t_ccu_gates); sc->clks = a83t_clks; sc->nclks = nitems(a83t_clks); sc->clk_init = a83t_init_clks; sc->n_clk_init = nitems(a83t_init_clks); return (aw_ccung_attach(dev)); } static device_method_t ccu_a83tng_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ccu_a83t_probe), DEVMETHOD(device_attach, ccu_a83t_attach), DEVMETHOD_END }; DEFINE_CLASS_1(ccu_a83tng, ccu_a83tng_driver, ccu_a83tng_methods, sizeof(struct aw_ccung_softc), aw_ccung_driver); EARLY_DRIVER_MODULE(ccu_a83tng, simplebus, ccu_a83tng_driver, 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/dev/clk/allwinner/ccu_de2.c b/sys/dev/clk/allwinner/ccu_de2.c index de490a88bafa..4bd29d139c11 100644 --- a/sys/dev/clk/allwinner/ccu_de2.c +++ b/sys/dev/clk/allwinner/ccu_de2.c @@ -1,233 +1,233 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #ifdef __aarch64__ #include "opt_soc.h" #endif -#include -#include -#include +#include +#include +#include #include #include #include #include enum CCU_DE2 { H3_CCU = 1, A64_CCU, }; /* Non exported clocks */ #define CLK_MIXER0_DIV 3 #define CLK_MIXER1_DIV 4 #define CLK_WB_DIV 5 static struct aw_ccung_reset h3_de2_ccu_resets[] = { CCU_RESET(RST_MIXER0, 0x08, 0) CCU_RESET(RST_WB, 0x08, 2) }; static struct aw_ccung_reset a64_de2_ccu_resets[] = { CCU_RESET(RST_MIXER0, 0x08, 0) CCU_RESET(RST_MIXER1, 0x08, 1) CCU_RESET(RST_WB, 0x08, 2) }; static struct aw_ccung_gate h3_de2_ccu_gates[] = { CCU_GATE(CLK_BUS_MIXER0, "mixer0", "mixer0-div", 0x00, 0) CCU_GATE(CLK_BUS_WB, "wb", "wb-div", 0x00, 2) CCU_GATE(CLK_MIXER0, "bus-mixer0", "bus-de", 0x04, 0) CCU_GATE(CLK_WB, "bus-wb", "bus-de", 0x04, 2) }; static struct aw_ccung_gate a64_de2_ccu_gates[] = { CCU_GATE(CLK_BUS_MIXER0, "mixer0", "mixer0-div", 0x00, 0) CCU_GATE(CLK_BUS_MIXER1, "mixer1", "mixer1-div", 0x00, 1) CCU_GATE(CLK_BUS_WB, "wb", "wb-div", 0x00, 2) CCU_GATE(CLK_MIXER0, "bus-mixer0", "bus-de", 0x04, 0) CCU_GATE(CLK_MIXER1, "bus-mixer1", "bus-de", 0x04, 1) CCU_GATE(CLK_WB, "bus-wb", "bus-de", 0x04, 2) }; static const char *div_parents[] = {"de"}; NM_CLK(mixer0_div_clk, CLK_MIXER0_DIV, /* id */ "mixer0-div", div_parents, /* names, parents */ 0x0C, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* N factor (fake)*/ 0, 4, 0, 0, /* M flags */ 0, 0, /* mux */ 0, /* gate */ AW_CLK_SCALE_CHANGE); /* flags */ NM_CLK(mixer1_div_clk, CLK_MIXER1_DIV, /* id */ "mixer1-div", div_parents, /* names, parents */ 0x0C, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* N factor (fake)*/ 4, 4, 0, 0, /* M flags */ 0, 0, /* mux */ 0, /* gate */ AW_CLK_SCALE_CHANGE); /* flags */ NM_CLK(wb_div_clk, CLK_WB_DIV, /* id */ "wb-div", div_parents, /* names, parents */ 0x0C, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* N factor (fake)*/ 8, 4, 0, 0, /* M flags */ 0, 0, /* mux */ 0, /* gate */ AW_CLK_SCALE_CHANGE); /* flags */ static struct aw_ccung_clk h3_de2_ccu_clks[] = { { .type = AW_CLK_NM, .clk.nm = &mixer0_div_clk}, { .type = AW_CLK_NM, .clk.nm = &wb_div_clk}, }; static struct aw_ccung_clk a64_de2_ccu_clks[] = { { .type = AW_CLK_NM, .clk.nm = &mixer0_div_clk}, { .type = AW_CLK_NM, .clk.nm = &mixer1_div_clk}, { .type = AW_CLK_NM, .clk.nm = &wb_div_clk}, }; static struct ofw_compat_data compat_data[] = { {"allwinner,sun8i-h3-de2-clk", H3_CCU}, {"allwinner,sun50i-a64-de2-clk", A64_CCU}, {NULL, 0} }; static int ccu_de2_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner DE2 Clock Control Unit"); return (BUS_PROBE_DEFAULT); } static int ccu_de2_attach(device_t dev) { struct aw_ccung_softc *sc; phandle_t node; clk_t mod, bus; hwreset_t rst_de; enum CCU_DE2 type; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); type = (enum CCU_DE2)ofw_bus_search_compatible(dev, compat_data)->ocd_data; switch (type) { case H3_CCU: sc->resets = h3_de2_ccu_resets; sc->nresets = nitems(h3_de2_ccu_resets); sc->gates = h3_de2_ccu_gates; sc->ngates = nitems(h3_de2_ccu_gates); sc->clks = h3_de2_ccu_clks; sc->nclks = nitems(h3_de2_ccu_clks); break; case A64_CCU: sc->resets = a64_de2_ccu_resets; sc->nresets = nitems(a64_de2_ccu_resets); sc->gates = a64_de2_ccu_gates; sc->ngates = nitems(a64_de2_ccu_gates); sc->clks = a64_de2_ccu_clks; sc->nclks = nitems(a64_de2_ccu_clks); break; } if (hwreset_get_by_ofw_idx(dev, node, 0, &rst_de) != 0) { device_printf(dev, "Cannot get de reset\n"); return (ENXIO); } if (hwreset_deassert(rst_de) != 0) { device_printf(dev, "Cannot de-assert de reset\n"); return (ENXIO); } if (clk_get_by_ofw_name(dev, node, "mod", &mod) != 0) { device_printf(dev, "Cannot get mod clock\n"); return (ENXIO); } if (clk_enable(mod) != 0) { device_printf(dev, "Cannot enable mod clock\n"); return (ENXIO); } if (clk_get_by_ofw_name(dev, node, "bus", &bus) != 0) { device_printf(dev, "Cannot get bus clock\n"); return (ENXIO); } if (clk_enable(bus) != 0) { device_printf(dev, "Cannot enable bus clock\n"); return (ENXIO); } return (aw_ccung_attach(dev)); } static device_method_t ccu_de2_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ccu_de2_probe), DEVMETHOD(device_attach, ccu_de2_attach), DEVMETHOD_END }; DEFINE_CLASS_1(ccu_de2, ccu_de2_driver, ccu_de2_methods, sizeof(struct aw_ccung_softc), aw_ccung_driver); EARLY_DRIVER_MODULE(ccu_de2, simplebus, ccu_de2_driver, 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_LAST); diff --git a/sys/dev/clk/allwinner/ccu_h3.c b/sys/dev/clk/allwinner/ccu_h3.c index fb6e26542b8a..de538f720d4f 100644 --- a/sys/dev/clk/allwinner/ccu_h3.c +++ b/sys/dev/clk/allwinner/ccu_h3.c @@ -1,787 +1,787 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2017,2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include -#include +#include +#include +#include #if defined(__aarch64__) #include "opt_soc.h" #endif #include #include #include /* Non-exported resets */ #define RST_BUS_SCR 53 /* Non-exported clocks */ #define CLK_PLL_CPUX 0 #define CLK_PLL_AUDIO_BASE 1 #define CLK_PLL_AUDIO 2 #define CLK_PLL_AUDIO_2X 3 #define CLK_PLL_AUDIO_4X 4 #define CLK_PLL_AUDIO_8X 5 #define CLK_PLL_VIDEO 6 #define CLK_PLL_VE 7 #define CLK_PLL_DDR 8 #define CLK_PLL_PERIPH0_2X 10 #define CLK_PLL_GPU 11 #define CLK_PLL_PERIPH1 12 #define CLK_PLL_DE 13 #define CLK_AXI 15 #define CLK_AHB1 16 #define CLK_APB1 17 #define CLK_APB2 18 #define CLK_AHB2 19 #define CLK_BUS_SCR 66 #define CLK_USBPHY0 88 #define CLK_USBPHY1 89 #define CLK_USBPHY2 90 #define CLK_USBPHY3 91 #define CLK_USBOHCI0 92 #define CLK_USBOHCI1 93 #define CLK_USBOHCI2 94 #define CLK_USBOHCI3 95 #define CLK_DRAM 96 #define CLK_MBUS 113 static struct aw_ccung_reset h3_ccu_resets[] = { CCU_RESET(RST_USB_PHY0, 0xcc, 0) CCU_RESET(RST_USB_PHY1, 0xcc, 1) CCU_RESET(RST_USB_PHY2, 0xcc, 2) CCU_RESET(RST_USB_PHY3, 0xcc, 3) CCU_RESET(RST_MBUS, 0xfc, 31) CCU_RESET(RST_BUS_CE, 0x2c0, 5) CCU_RESET(RST_BUS_DMA, 0x2c0, 6) CCU_RESET(RST_BUS_MMC0, 0x2c0, 8) CCU_RESET(RST_BUS_MMC1, 0x2c0, 9) CCU_RESET(RST_BUS_MMC2, 0x2c0, 10) CCU_RESET(RST_BUS_NAND, 0x2c0, 13) CCU_RESET(RST_BUS_DRAM, 0x2c0, 14) CCU_RESET(RST_BUS_EMAC, 0x2c0, 17) CCU_RESET(RST_BUS_TS, 0x2c0, 18) CCU_RESET(RST_BUS_HSTIMER, 0x2c0, 19) CCU_RESET(RST_BUS_SPI0, 0x2c0, 20) CCU_RESET(RST_BUS_SPI1, 0x2c0, 21) CCU_RESET(RST_BUS_OTG, 0x2c0, 23) CCU_RESET(RST_BUS_EHCI0, 0x2c0, 24) CCU_RESET(RST_BUS_EHCI1, 0x2c0, 25) CCU_RESET(RST_BUS_EHCI2, 0x2c0, 26) CCU_RESET(RST_BUS_EHCI3, 0x2c0, 27) CCU_RESET(RST_BUS_OHCI0, 0x2c0, 28) CCU_RESET(RST_BUS_OHCI1, 0x2c0, 29) CCU_RESET(RST_BUS_OHCI2, 0x2c0, 30) CCU_RESET(RST_BUS_OHCI3, 0x2c0, 31) CCU_RESET(RST_BUS_VE, 0x2c4, 0) CCU_RESET(RST_BUS_TCON0, 0x2c4, 3) CCU_RESET(RST_BUS_TCON1, 0x2c4, 4) CCU_RESET(RST_BUS_DEINTERLACE, 0x2c4, 5) CCU_RESET(RST_BUS_CSI, 0x2c4, 8) CCU_RESET(RST_BUS_TVE, 0x2c4, 9) CCU_RESET(RST_BUS_HDMI0, 0x2c4, 10) CCU_RESET(RST_BUS_HDMI1, 0x2c4, 11) CCU_RESET(RST_BUS_DE, 0x2c4, 12) CCU_RESET(RST_BUS_GPU, 0x2c4, 20) CCU_RESET(RST_BUS_MSGBOX, 0x2c4, 21) CCU_RESET(RST_BUS_SPINLOCK, 0x2c4, 22) CCU_RESET(RST_BUS_DBG, 0x2c4, 31) CCU_RESET(RST_BUS_EPHY, 0x2c8, 2) CCU_RESET(RST_BUS_CODEC, 0x2d0, 0) CCU_RESET(RST_BUS_SPDIF, 0x2d0, 1) CCU_RESET(RST_BUS_THS, 0x2d0, 8) CCU_RESET(RST_BUS_I2S0, 0x2d0, 12) CCU_RESET(RST_BUS_I2S1, 0x2d0, 13) CCU_RESET(RST_BUS_I2S2, 0x2d0, 14) CCU_RESET(RST_BUS_I2C0, 0x2d8, 0) CCU_RESET(RST_BUS_I2C1, 0x2d8, 1) CCU_RESET(RST_BUS_I2C2, 0x2d8, 2) CCU_RESET(RST_BUS_UART0, 0x2d8, 16) CCU_RESET(RST_BUS_UART1, 0x2d8, 17) CCU_RESET(RST_BUS_UART2, 0x2d8, 18) CCU_RESET(RST_BUS_UART3, 0x2d8, 19) CCU_RESET(RST_BUS_SCR, 0x2d8, 20) }; static struct aw_ccung_gate h3_ccu_gates[] = { CCU_GATE(CLK_BUS_CE, "bus-ce", "ahb1", 0x60, 5) CCU_GATE(CLK_BUS_DMA, "bus-dma", "ahb1", 0x60, 6) CCU_GATE(CLK_BUS_MMC0, "bus-mmc0", "ahb1", 0x60, 8) CCU_GATE(CLK_BUS_MMC1, "bus-mmc1", "ahb1", 0x60, 9) CCU_GATE(CLK_BUS_MMC2, "bus-mmc2", "ahb1", 0x60, 10) CCU_GATE(CLK_BUS_NAND, "bus-nand", "ahb1", 0x60, 13) CCU_GATE(CLK_BUS_DRAM, "bus-dram", "ahb1", 0x60, 14) CCU_GATE(CLK_BUS_EMAC, "bus-emac", "ahb2", 0x60, 17) CCU_GATE(CLK_BUS_TS, "bus-ts", "ahb1", 0x60, 18) CCU_GATE(CLK_BUS_HSTIMER, "bus-hstimer", "ahb1", 0x60, 19) CCU_GATE(CLK_BUS_SPI0, "bus-spi0", "ahb1", 0x60, 20) CCU_GATE(CLK_BUS_SPI1, "bus-spi1", "ahb1", 0x60, 21) CCU_GATE(CLK_BUS_OTG, "bus-otg", "ahb1", 0x60, 23) CCU_GATE(CLK_BUS_EHCI0, "bus-ehci0", "ahb1", 0x60, 24) CCU_GATE(CLK_BUS_EHCI1, "bus-ehci1", "ahb2", 0x60, 25) CCU_GATE(CLK_BUS_EHCI2, "bus-ehci2", "ahb2", 0x60, 26) CCU_GATE(CLK_BUS_EHCI3, "bus-ehci3", "ahb2", 0x60, 27) CCU_GATE(CLK_BUS_OHCI0, "bus-ohci0", "ahb1", 0x60, 28) CCU_GATE(CLK_BUS_OHCI1, "bus-ohci1", "ahb2", 0x60, 29) CCU_GATE(CLK_BUS_OHCI2, "bus-ohci2", "ahb2", 0x60, 30) CCU_GATE(CLK_BUS_OHCI3, "bus-ohci3", "ahb2", 0x60, 31) CCU_GATE(CLK_BUS_VE, "bus-ve", "ahb1", 0x64, 0) CCU_GATE(CLK_BUS_TCON0, "bus-tcon0", "ahb1", 0x64, 3) CCU_GATE(CLK_BUS_TCON1, "bus-tcon1", "ahb1", 0x64, 4) CCU_GATE(CLK_BUS_DEINTERLACE, "bus-deinterlace", "ahb1", 0x64, 5) CCU_GATE(CLK_BUS_CSI, "bus-csi", "ahb1", 0x64, 8) CCU_GATE(CLK_BUS_TVE, "bus-tve", "ahb1", 0x64, 9) CCU_GATE(CLK_BUS_HDMI, "bus-hdmi", "ahb1", 0x64, 11) CCU_GATE(CLK_BUS_DE, "bus-de", "ahb1", 0x64, 12) CCU_GATE(CLK_BUS_GPU, "bus-gpu", "ahb1", 0x64, 20) CCU_GATE(CLK_BUS_MSGBOX, "bus-msgbox", "ahb1", 0x64, 21) CCU_GATE(CLK_BUS_SPINLOCK, "bus-spinlock", "ahb1", 0x64, 22) CCU_GATE(CLK_BUS_CODEC, "bus-codec", "apb1", 0x68, 0) CCU_GATE(CLK_BUS_SPDIF, "bus-spdif", "apb1", 0x68, 1) CCU_GATE(CLK_BUS_PIO, "bus-pio", "apb1", 0x68, 5) CCU_GATE(CLK_BUS_THS, "bus-ths", "apb1", 0x68, 8) CCU_GATE(CLK_BUS_I2S0, "bus-i2s0", "apb1", 0x68, 12) CCU_GATE(CLK_BUS_I2S1, "bus-i2s1", "apb1", 0x68, 13) CCU_GATE(CLK_BUS_I2S2, "bus-i2s2", "apb1", 0x68, 14) CCU_GATE(CLK_BUS_I2C0, "bus-i2c0", "apb2", 0x6c, 0) CCU_GATE(CLK_BUS_I2C1, "bus-i2c1", "apb2", 0x6c, 1) CCU_GATE(CLK_BUS_I2C2, "bus-i2c2", "apb2", 0x6c, 2) CCU_GATE(CLK_BUS_UART0, "bus-uart0", "apb2", 0x6c, 16) CCU_GATE(CLK_BUS_UART1, "bus-uart1", "apb2", 0x6c, 17) CCU_GATE(CLK_BUS_UART2, "bus-uart2", "apb2", 0x6c, 18) CCU_GATE(CLK_BUS_UART3, "bus-uart3", "apb2", 0x6c, 19) CCU_GATE(CLK_BUS_SCR, "bus-scr", "apb2", 0x6c, 20) CCU_GATE(CLK_BUS_EPHY, "bus-ephy", "ahb1", 0x70, 0) CCU_GATE(CLK_BUS_DBG, "bus-dbg", "ahb1", 0x70, 7) CCU_GATE(CLK_USBPHY0, "usb-phy0", "osc24M", 0xcc, 8) CCU_GATE(CLK_USBPHY1, "usb-phy1", "osc24M", 0xcc, 9) CCU_GATE(CLK_USBPHY2, "usb-phy2", "osc24M", 0xcc, 10) CCU_GATE(CLK_USBPHY3, "usb-phy3", "osc24M", 0xcc, 11) CCU_GATE(CLK_USBOHCI0, "usb-ohci0", "osc24M", 0xcc, 16) CCU_GATE(CLK_USBOHCI1, "usb-ohci1", "osc24M", 0xcc, 17) CCU_GATE(CLK_USBOHCI2, "usb-ohci2", "osc24M", 0xcc, 18) CCU_GATE(CLK_USBOHCI3, "usb-ohci3", "osc24M", 0xcc, 19) CCU_GATE(CLK_THS, "ths", "thsdiv", 0x74, 31) CCU_GATE(CLK_I2S0, "i2s0", "i2s0mux", 0xB0, 31) CCU_GATE(CLK_I2S1, "i2s1", "i2s1mux", 0xB4, 31) CCU_GATE(CLK_I2S2, "i2s2", "i2s2mux", 0xB8, 31) CCU_GATE(CLK_DRAM_VE, "dram-ve", "dram", 0x100, 0) CCU_GATE(CLK_DRAM_CSI, "dram-csi", "dram", 0x100, 1) CCU_GATE(CLK_DRAM_DEINTERLACE, "dram-deinterlace", "dram", 0x100, 2) CCU_GATE(CLK_DRAM_TS, "dram-ts", "dram", 0x100, 3) CCU_GATE(CLK_AC_DIG, "ac-dig", "pll_audio", 0x140, 31) CCU_GATE(CLK_AVS, "avs", "osc24M", 0x144, 31) CCU_GATE(CLK_CSI_MISC, "csi-misc", "osc24M", 0x130, 31) CCU_GATE(CLK_HDMI_DDC, "hdmi-ddc", "osc24M", 0x154, 31) }; static const char *pll_cpux_parents[] = {"osc24M"}; NKMP_CLK(pll_cpux_clk, CLK_PLL_CPUX, /* id */ "pll_cpux", pll_cpux_parents, /* name, parents */ 0x00, /* offset */ 8, 5, 0, 0, /* n factor */ 4, 2, 0, 0, /* k factor */ 0, 2, 0, 0, /* m factor */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* p factor */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK | AW_CLK_SCALE_CHANGE); /* flags */ static const char *pll_audio_parents[] = {"osc24M"}; NKMP_CLK(pll_audio_clk, CLK_PLL_AUDIO, /* id */ "pll_audio", pll_audio_parents, /* name, parents */ 0x08, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* k factor (fake) */ 0, 5, 0, 0, /* m factor */ 16, 4, 0, 0, /* p factor */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ static const char *pll_audio_mult_parents[] = {"pll_audio"}; FIXED_CLK(pll_audio_2x_clk, CLK_PLL_AUDIO_2X, /* id */ "pll_audio-2x", /* name */ pll_audio_mult_parents, /* parent */ 0, /* freq */ 2, /* mult */ 1, /* div */ 0); /* flags */ FIXED_CLK(pll_audio_4x_clk, CLK_PLL_AUDIO_4X, /* id */ "pll_audio-4x", /* name */ pll_audio_mult_parents, /* parent */ 0, /* freq */ 4, /* mult */ 1, /* div */ 0); /* flags */ FIXED_CLK(pll_audio_8x_clk, CLK_PLL_AUDIO_8X, /* id */ "pll_audio-8x", /* name */ pll_audio_mult_parents, /* parent */ 0, /* freq */ 8, /* mult */ 1, /* div */ 0); /* flags */ static const char *pll_video_parents[] = {"osc24M"}; FRAC_CLK(pll_video_clk, CLK_PLL_VIDEO, /* id */ "pll_video", pll_video_parents, /* name, parents */ 0x10, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 4, 0, 0, /* m factor */ 31, 28, 1000, /* gate, lock, lock retries */ AW_CLK_HAS_LOCK, /* flags */ 270000000, 297000000, /* freq0, freq1 */ 24, 25, /* mode sel, freq sel */ 192000000, 600000000); /* min freq, max freq */ static const char *pll_ve_parents[] = {"osc24M"}; FRAC_CLK(pll_ve_clk, CLK_PLL_VE, /* id */ "pll_ve", pll_ve_parents, /* name, parents */ 0x18, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 4, 0, 0, /* m factor */ 31, 28, 1000, /* gate, lock, lock retries */ AW_CLK_HAS_LOCK, /* flags */ 270000000, 297000000, /* freq0, freq1 */ 24, 25, /* mode sel, freq sel */ 192000000, 600000000); /* min freq, max freq */ static const char *pll_ddr_parents[] = {"osc24M"}; NKMP_CLK_WITH_UPDATE(pll_ddr_clk, CLK_PLL_DDR, /* id */ "pll_ddr", pll_ddr_parents, /* name, parents */ 0x20, /* offset */ 8, 5, 0, 0, /* n factor */ 4, 2, 0, 0, /* k factor */ 0, 2, 0, 0, /* m factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* p factor (fake) */ 31, /* gate */ 28, 1000, /* lock */ 20, /* update */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ static const char *pll_periph0_parents[] = {"osc24M"}; static const char *pll_periph0_2x_parents[] = {"pll_periph0"}; NKMP_CLK(pll_periph0_clk, CLK_PLL_PERIPH0, /* id */ "pll_periph0", pll_periph0_parents, /* name, parents */ 0x28, /* offset */ 8, 5, 0, 0, /* n factor */ 4, 2, 0, 0, /* k factor */ 0, 0, 2, AW_CLK_FACTOR_FIXED, /* m factor (fake) */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* p factor (fake) */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ FIXED_CLK(pll_periph0_2x_clk, CLK_PLL_PERIPH0_2X, /* id */ "pll_periph0-2x", /* name */ pll_periph0_2x_parents, /* parent */ 0, /* freq */ 2, /* mult */ 1, /* div */ 0); /* flags */ static const char *pll_gpu_parents[] = {"osc24M"}; FRAC_CLK(pll_gpu_clk, CLK_PLL_GPU, /* id */ "pll_gpu", pll_gpu_parents, /* name, parents */ 0x38, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 4, 0, 0, /* m factor */ 31, 28, 1000, /* gate, lock, lock retries */ AW_CLK_HAS_LOCK, /* flags */ 270000000, 297000000, /* freq0, freq1 */ 24, 25, /* mode sel, freq sel */ 192000000, 600000000); /* min freq, max freq */ static const char *pll_periph1_parents[] = {"osc24M"}; NKMP_CLK(pll_periph1_clk, CLK_PLL_PERIPH1, /* id */ "pll_periph1", pll_periph1_parents, /* name, parents */ 0x44, /* offset */ 8, 5, 0, 0, /* n factor */ 4, 2, 0, 0, /* k factor */ 0, 0, 2, AW_CLK_FACTOR_FIXED, /* m factor (fake) */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* p factor (fake) */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ static const char *pll_de_parents[] = {"osc24M"}; FRAC_CLK(pll_de_clk, CLK_PLL_DE, /* id */ "pll_de", pll_de_parents, /* name, parents */ 0x48, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 4, 0, 0, /* m factor */ 31, 28, 1000, /* gate, lock, lock retries */ AW_CLK_HAS_LOCK, /* flags */ 270000000, 297000000, /* freq0, freq1 */ 24, 25, /* mode sel, freq sel */ 192000000, 600000000); /* min freq, max freq */ static const char *cpux_parents[] = {"osc32k", "osc24M", "pll_cpux", "pll_cpux"}; MUX_CLK(cpux_clk, CLK_CPUX, /* id */ "cpux", cpux_parents, /* name, parents */ 0x50, 16, 2); /* offset, shift, width */ static const char *axi_parents[] = {"cpux"}; DIV_CLK(axi_clk, CLK_AXI, /* id */ "axi", axi_parents, /* name, parents */ 0x50, /* offset */ 0, 2, /* shift, width */ 0, NULL); /* flags, div table */ static const char *ahb1_parents[] = {"osc32k", "osc24M", "axi", "pll_periph0"}; PREDIV_CLK(ahb1_clk, CLK_AHB1, /* id */ "ahb1", ahb1_parents, /* name, parents */ 0x54, /* offset */ 12, 2, /* mux */ 4, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* div */ 6, 2, 0, AW_CLK_FACTOR_HAS_COND, /* prediv */ 12, 2, 3); /* prediv condition */ static const char *apb1_parents[] = {"ahb1"}; static struct clk_div_table apb1_div_table[] = { { .value = 0, .divider = 2, }, { .value = 1, .divider = 2, }, { .value = 2, .divider = 4, }, { .value = 3, .divider = 8, }, { }, }; DIV_CLK(apb1_clk, CLK_APB1, /* id */ "apb1", apb1_parents, /* name, parents */ 0x54, /* offset */ 8, 2, /* shift, width */ CLK_DIV_WITH_TABLE, /* flags */ apb1_div_table); /* div table */ static const char *apb2_parents[] = {"osc32k", "osc24M", "pll_periph0", "pll_periph0"}; NM_CLK(apb2_clk, CLK_APB2, /* id */ "apb2", apb2_parents, /* name, parents */ 0x58, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 5, 0, 0, /* m factor */ 24, 2, /* mux */ 0, /* gate */ AW_CLK_HAS_MUX); static const char *ahb2_parents[] = {"ahb1", "pll_periph0"}; PREDIV_CLK(ahb2_clk, CLK_AHB2, /* id */ "ahb2", ahb2_parents, /* name, parents */ 0x5c, /* offset */ 0, 2, /* mux */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* div */ 0, 0, 2, AW_CLK_FACTOR_HAS_COND | AW_CLK_FACTOR_FIXED, /* prediv */ 0, 2, 1); /* prediv condition */ static const char *ths_parents[] = {"osc24M"}; static struct clk_div_table ths_div_table[] = { { .value = 0, .divider = 1, }, { .value = 1, .divider = 2, }, { .value = 2, .divider = 4, }, { .value = 3, .divider = 6, }, { }, }; DIV_CLK(thsdiv_clk, 0, /* id */ "thsdiv", ths_parents, /* name, parents */ 0x74, /* offset */ 0, 2, /* shift, width */ CLK_DIV_WITH_TABLE, /* flags */ ths_div_table); /* div table */ static const char *mod_parents[] = {"osc24M", "pll_periph0", "pll_periph1"}; NM_CLK(nand_clk, CLK_NAND, "nand", mod_parents, /* id, name, parents */ 0x80, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); /* flags */ NM_CLK(mmc0_clk, CLK_MMC0, "mmc0", mod_parents, /* id, name, parents */ 0x88, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ NM_CLK(mmc1_clk, CLK_MMC1, "mmc1", mod_parents, /* id, name, parents */ 0x8c, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ NM_CLK(mmc2_clk, CLK_MMC2, "mmc2", mod_parents, /* id, name, parents */ 0x90, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ static const char *ts_parents[] = {"osc24M", "pll_periph0"}; NM_CLK(ts_clk, CLK_TS, "ts", ts_parents, /* id, name, parents */ 0x98, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); /* flags */ NM_CLK(ce_clk, CLK_CE, "ce", mod_parents, /* id, name, parents */ 0x9C, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX); /* flags */ NM_CLK(spi0_clk, CLK_SPI0, "spi0", mod_parents, /* id, name, parents */ 0xA0, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ NM_CLK(spi1_clk, CLK_SPI1, "spi1", mod_parents, /* id, name, parents */ 0xA4, /* offset */ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ static const char *i2s_parents[] = {"pll_audio-8x", "pll_audio-4x", "pll_audio-2x", "pll_audio"}; MUX_CLK(i2s0mux_clk, 0, "i2s0mux", i2s_parents, /* id, name, parents */ 0xb0, 16, 2); /* offset, mux shift, mux width */ MUX_CLK(i2s1mux_clk, 0, "i2s1mux", i2s_parents, /* id, name, parents */ 0xb4, 16, 2); /* offset, mux shift, mux width */ MUX_CLK(i2s2mux_clk, 0, "i2s2mux", i2s_parents, /* id, name, parents */ 0xb8, 16, 2); /* offset, mux shift, mux width */ static const char *spdif_parents[] = {"pll_audio"}; NM_CLK(spdif_clk, CLK_SPDIF, "spdif", spdif_parents, /* id, name, parents */ 0xC0, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake); */ 0, 4, 0, 0, /* m factor */ 0, 0, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE); /* flags */ static const char *dram_parents[] = {"pll_ddr", "pll_periph0-2x"}; NM_CLK(dram_clk, CLK_DRAM, "dram", dram_parents, /* id, name, parents */ 0xF4, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 20, 2, /* mux */ 0, /* gate */ AW_CLK_HAS_MUX); /* flags */ static const char *de_parents[] = {"pll_periph0-2x", "pll_de"}; NM_CLK(de_clk, CLK_DE, "de", de_parents, /* id, name, parents */ 0x104, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *tcon0_parents[] = {"pll_video"}; NM_CLK(tcon0_clk, CLK_TCON0, "tcon0", tcon0_parents, /* id, name, parents */ 0x118, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *tve_parents[] = {"pll_de", "pll_periph1"}; NM_CLK(tve_clk, CLK_TVE, "tve", tve_parents, /* id, name, parents */ 0x120, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *deinterlace_parents[] = {"pll_periph0", "pll_periph1"}; NM_CLK(deinterlace_clk, CLK_DEINTERLACE, "deinterlace", deinterlace_parents, /* id, name, parents */ 0x124, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *csi_sclk_parents[] = {"pll_periph0", "pll_periph1"}; NM_CLK(csi_sclk_clk, CLK_CSI_SCLK, "csi-sclk", csi_sclk_parents, /* id, name, parents */ 0x134, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 16, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *csi_mclk_parents[] = {"osc24M", "pll_video", "pll_periph1"}; NM_CLK(csi_mclk_clk, CLK_CSI_MCLK, "csi-mclk", csi_mclk_parents, /* id, name, parents */ 0x134, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 8, 2, /* mux */ 15, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *ve_parents[] = {"pll_ve"}; NM_CLK(ve_clk, CLK_VE, "ve", ve_parents, /* id, name, parents */ 0x13C, /* offset */ 16, 3, 0, 0, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* m factor (fake) */ 0, 0, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE); /* flags */ static const char *hdmi_parents[] = {"pll_video"}; NM_CLK(hdmi_clk, CLK_HDMI, "hdmi", hdmi_parents, /* id, name, parents */ 0x150, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *mbus_parents[] = {"osc24M", "pll_periph0-2x", "pll_ddr"}; NM_CLK(mbus_clk, CLK_MBUS, "mbus", mbus_parents, /* id, name, parents */ 0x15C, /* offset */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* n factor (fake) */ 0, 3, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_HAS_GATE); /* flags */ static const char *gpu_parents[] = {"pll_gpu"}; NM_CLK(gpu_clk, CLK_GPU, "gpu", gpu_parents, /* id, name, parents */ 0x1A0, /* offset */ 0, 2, 0, 0, /* n factor */ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* m factor (fake) */ 0, 0, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE); /* flags */ static struct aw_ccung_clk h3_ccu_clks[] = { { .type = AW_CLK_NKMP, .clk.nkmp = &pll_cpux_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_audio_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_periph0_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_periph1_clk}, { .type = AW_CLK_NKMP, .clk.nkmp = &pll_ddr_clk}, { .type = AW_CLK_FRAC, .clk.frac = &pll_video_clk}, { .type = AW_CLK_FRAC, .clk.frac = &pll_ve_clk}, { .type = AW_CLK_FRAC, .clk.frac = &pll_gpu_clk}, { .type = AW_CLK_FRAC, .clk.frac = &pll_de_clk}, { .type = AW_CLK_NM, .clk.nm = &apb2_clk}, { .type = AW_CLK_NM, .clk.nm = &nand_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc0_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc1_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc2_clk}, { .type = AW_CLK_NM, .clk.nm = &ts_clk}, { .type = AW_CLK_NM, .clk.nm = &ce_clk}, { .type = AW_CLK_NM, .clk.nm = &spi0_clk}, { .type = AW_CLK_NM, .clk.nm = &spi1_clk}, { .type = AW_CLK_NM, .clk.nm = &spdif_clk}, { .type = AW_CLK_NM, .clk.nm = &dram_clk}, { .type = AW_CLK_NM, .clk.nm = &de_clk}, { .type = AW_CLK_NM, .clk.nm = &tcon0_clk}, { .type = AW_CLK_NM, .clk.nm = &tve_clk}, { .type = AW_CLK_NM, .clk.nm = &deinterlace_clk}, { .type = AW_CLK_NM, .clk.nm = &csi_sclk_clk}, { .type = AW_CLK_NM, .clk.nm = &csi_mclk_clk}, { .type = AW_CLK_NM, .clk.nm = &ve_clk}, { .type = AW_CLK_NM, .clk.nm = &hdmi_clk}, { .type = AW_CLK_NM, .clk.nm = &mbus_clk}, { .type = AW_CLK_NM, .clk.nm = &gpu_clk}, { .type = AW_CLK_PREDIV_MUX, .clk.prediv_mux = &ahb1_clk}, { .type = AW_CLK_PREDIV_MUX, .clk.prediv_mux = &ahb2_clk}, { .type = AW_CLK_MUX, .clk.mux = &cpux_clk}, { .type = AW_CLK_MUX, .clk.mux = &i2s0mux_clk}, { .type = AW_CLK_MUX, .clk.mux = &i2s1mux_clk}, { .type = AW_CLK_MUX, .clk.mux = &i2s2mux_clk}, { .type = AW_CLK_DIV, .clk.div = &axi_clk}, { .type = AW_CLK_DIV, .clk.div = &apb1_clk}, { .type = AW_CLK_DIV, .clk.div = &thsdiv_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_periph0_2x_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_audio_2x_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_audio_4x_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_audio_8x_clk}, }; static struct aw_clk_init h3_init_clks[] = { {"ahb1", "pll_periph0", 0, false}, {"ahb2", "pll_periph0", 0, false}, {"dram", "pll_ddr", 0, false}, }; static struct ofw_compat_data compat_data[] = { #if defined(SOC_ALLWINNER_H3) { "allwinner,sun8i-h3-ccu", 1 }, #endif #if defined(SOC_ALLWINNER_H5) { "allwinner,sun50i-h5-ccu", 1 }, #endif { NULL, 0}, }; static int ccu_h3_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner H3/H5 Clock Control Unit NG"); return (BUS_PROBE_DEFAULT); } static int ccu_h3_attach(device_t dev) { struct aw_ccung_softc *sc; sc = device_get_softc(dev); sc->resets = h3_ccu_resets; sc->nresets = nitems(h3_ccu_resets); sc->gates = h3_ccu_gates; sc->ngates = nitems(h3_ccu_gates); sc->clks = h3_ccu_clks; sc->nclks = nitems(h3_ccu_clks); sc->clk_init = h3_init_clks; sc->n_clk_init = nitems(h3_init_clks); return (aw_ccung_attach(dev)); } static device_method_t ccu_h3ng_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ccu_h3_probe), DEVMETHOD(device_attach, ccu_h3_attach), DEVMETHOD_END }; DEFINE_CLASS_1(ccu_h3ng, ccu_h3ng_driver, ccu_h3ng_methods, sizeof(struct aw_ccung_softc), aw_ccung_driver); EARLY_DRIVER_MODULE(ccu_h3ng, simplebus, ccu_h3ng_driver, 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/dev/clk/allwinner/ccu_h6.c b/sys/dev/clk/allwinner/ccu_h6.c index 0a378d9c3980..f172d85ab07e 100644 --- a/sys/dev/clk/allwinner/ccu_h6.c +++ b/sys/dev/clk/allwinner/ccu_h6.c @@ -1,494 +1,494 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include -#include -#include +#include +#include +#include #include #include #include /* Non-exported clocks */ #define CLK_OSC_12M 0 #define CLK_PLL_CPUX 1 #define CLK_PLL_DDR0 2 #define CLK_PLL_PERIPH0_2X 4 #define CLK_PLL_PERIPH0_4X 5 #define CLK_PLL_PERIPH1 6 #define CLK_PLL_PERIPH1_2X 7 #define CLK_PLL_PERIPH1_4X 8 #define CLK_PLL_GPU 9 #define CLK_PLL_VIDEO0 10 #define CLK_PLL_VIDEO0_4X 11 #define CLK_PLL_VIDEO1 12 #define CLK_PLL_VIDEO1_4X 13 #define CLK_PLL_VE 14 #define CLK_PLL_DE 14 #define CLK_PLL_HSIC 16 #define CLK_PSI_AHB1_AHB2 24 #define CLK_AHB3 25 #define CLK_APB2 27 static struct aw_ccung_reset h6_ccu_resets[] = { /* PSI_BGR_REG */ CCU_RESET(RST_BUS_PSI, 0x79c, 16) /* SMHC_BGR_REG */ CCU_RESET(RST_BUS_MMC0, 0x84c, 16) CCU_RESET(RST_BUS_MMC1, 0x84c, 17) CCU_RESET(RST_BUS_MMC2, 0x84c, 18) /* UART_BGR_REG */ CCU_RESET(RST_BUS_UART0, 0x90c, 16) CCU_RESET(RST_BUS_UART1, 0x90c, 17) CCU_RESET(RST_BUS_UART2, 0x90c, 18) CCU_RESET(RST_BUS_UART3, 0x90c, 19) /* TWI_BGR_REG */ CCU_RESET(RST_BUS_I2C0, 0x91c, 16) CCU_RESET(RST_BUS_I2C1, 0x91c, 17) CCU_RESET(RST_BUS_I2C2, 0x91c, 18) CCU_RESET(RST_BUS_I2C3, 0x91c, 19) /* EMAC_BGR_REG */ CCU_RESET(RST_BUS_EMAC, 0x97c, 16) /* USB0_CLK_REG */ CCU_RESET(RST_USB_PHY0, 0xa70, 30) /* USB1_CLK_REG */ CCU_RESET(RST_USB_PHY1, 0xa74, 30) /* USB3_CLK_REG */ CCU_RESET(RST_USB_HSIC, 0xa7c, 28) CCU_RESET(RST_USB_PHY3, 0xa7c, 30) /* USB_BGR_REG */ CCU_RESET(RST_BUS_OHCI0, 0xa8c, 16) CCU_RESET(RST_BUS_OHCI3, 0xa8c, 19) CCU_RESET(RST_BUS_EHCI0, 0xa8c, 20) CCU_RESET(RST_BUS_XHCI, 0xa8c, 21) CCU_RESET(RST_BUS_EHCI3, 0xa8c, 23) CCU_RESET(RST_BUS_OTG, 0xa8c, 24) }; static struct aw_ccung_gate h6_ccu_gates[] = { /* PSI_BGR_REG */ CCU_GATE(CLK_BUS_PSI, "bus-psi", "psi_ahb1_ahb2", 0x79c, 0) /* SMHC_BGR_REG */ CCU_GATE(CLK_BUS_MMC0, "bus-mmc0", "ahb3", 0x84c, 0) CCU_GATE(CLK_BUS_MMC1, "bus-mmc1", "ahb3", 0x84c, 1) CCU_GATE(CLK_BUS_MMC2, "bus-mmc2", "ahb3", 0x84c, 2) /* UART_BGR_REG Enabling the gate enable weir behavior ... */ /* CCU_GATE(CLK_BUS_UART0, "bus-uart0", "apb2", 0x90c, 0) */ /* CCU_GATE(CLK_BUS_UART1, "bus-uart1", "apb2", 0x90c, 1) */ /* CCU_GATE(CLK_BUS_UART2, "bus-uart2", "apb2", 0x90c, 2) */ /* CCU_GATE(CLK_BUS_UART3, "bus-uart3", "apb2", 0x90c, 3) */ /* TWI_BGR_REG */ CCU_GATE(CLK_BUS_I2C0, "bus-i2c0", "apb2", 0x91c, 0) CCU_GATE(CLK_BUS_I2C1, "bus-i2c1", "apb2", 0x91c, 1) CCU_GATE(CLK_BUS_I2C2, "bus-i2c2", "apb2", 0x91c, 2) CCU_GATE(CLK_BUS_I2C3, "bus-i2c3", "apb2", 0x91c, 3) /* EMAC_BGR_REG */ CCU_GATE(CLK_BUS_EMAC, "bus-emac", "ahb3", 0x97c, 0) /* USB0_CLK_REG */ CCU_GATE(CLK_USB_PHY0, "usb-phy0", "ahb3", 0xa70, 29) CCU_GATE(CLK_USB_OHCI0, "usb-ohci0", "ahb3", 0xa70, 31) /* USB1_CLK_REG */ CCU_GATE(CLK_USB_PHY1, "usb-phy1", "ahb3", 0xa74, 29) /* USB3_CLK_REG */ CCU_GATE(CLK_USB_HSIC, "usb-hsic", "ahb3", 0xa7c, 26) CCU_GATE(CLK_USB_HSIC_12M, "usb-hsic-12M", "ahb3", 0xa7c, 27) CCU_GATE(CLK_USB_PHY3, "usb-phy3", "ahb3", 0xa7c, 29) CCU_GATE(CLK_USB_OHCI3, "usb-ohci3", "ahb3", 0xa7c, 31) /* USB_BGR_REG */ CCU_GATE(CLK_BUS_OHCI0, "bus-ohci0", "ahb3", 0xa8c, 0) CCU_GATE(CLK_BUS_OHCI3, "bus-ohci3", "ahb3", 0xa8c, 3) CCU_GATE(CLK_BUS_EHCI0, "bus-ehci0", "ahb3", 0xa8c, 4) CCU_GATE(CLK_BUS_XHCI, "bus-xhci", "ahb3", 0xa8c, 5) CCU_GATE(CLK_BUS_EHCI3, "bus-ehci3", "ahb3", 0xa8c, 7) CCU_GATE(CLK_BUS_OTG, "bus-otg", "ahb3", 0xa8c, 8) }; static const char *osc12m_parents[] = {"osc24M"}; FIXED_CLK(osc12m_clk, CLK_OSC_12M, /* id */ "osc12M", /* name */ osc12m_parents, /* parent */ 0, /* freq */ 1, /* mult */ 2, /* div */ 0); /* flags */ static const char *pll_cpux_parents[] = {"osc24M"}; NP_CLK(pll_cpux_clk, CLK_PLL_CPUX, /* id */ "pll_cpux", pll_cpux_parents, /* name, parents */ 0x00, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 2, 0, 0, /* p factor */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ static const char *pll_ddr0_parents[] = {"osc24M"}; NMM_CLK(pll_ddr0_clk, CLK_PLL_DDR0, /* id */ "pll_ddr0", pll_ddr0_parents, /* name, parents */ 0x10, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 1, 0, 0, /* m0 factor */ 1, 1, 0, 0, /* m1 factor */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ static const char *pll_peri0_4x_parents[] = {"osc24M"}; NMM_CLK(pll_peri0_4x_clk, CLK_PLL_PERIPH0_4X, /* id */ "pll_periph0_4x", pll_peri0_4x_parents, /* name, parents */ 0x20, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 1, 0, 0, /* m0 factor */ 1, 1, 0, 0, /* m1 factor */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ static const char *pll_peri0_2x_parents[] = {"pll_periph0_4x"}; FIXED_CLK(pll_peri0_2x_clk, CLK_PLL_PERIPH0_2X, /* id */ "pll_periph0_2x", /* name */ pll_peri0_2x_parents, /* parent */ 0, /* freq */ 1, /* mult */ 2, /* div */ 0); /* flags */ static const char *pll_peri0_parents[] = {"pll_periph0_4x"}; FIXED_CLK(pll_peri0_clk, CLK_PLL_PERIPH0, /* id */ "pll_periph0", /* name */ pll_peri0_parents, /* parent */ 0, /* freq */ 1, /* mult */ 4, /* div */ 0); /* flags */ static const char *pll_peri1_4x_parents[] = {"osc24M"}; NMM_CLK(pll_peri1_4x_clk, CLK_PLL_PERIPH1_4X, /* id */ "pll_periph1_4x", pll_peri1_4x_parents, /* name, parents */ 0x28, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 1, 0, 0, /* m0 factor */ 1, 1, 0, 0, /* m1 factor */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ static const char *pll_peri1_2x_parents[] = {"pll_periph1_4x"}; FIXED_CLK(pll_peri1_2x_clk, CLK_PLL_PERIPH1_2X, /* id */ "pll_periph1_2x", /* name */ pll_peri1_2x_parents, /* parent */ 0, /* freq */ 1, /* mult */ 2, /* div */ 0); /* flags */ static const char *pll_peri1_parents[] = {"pll_periph1_4x"}; FIXED_CLK(pll_peri1_clk, CLK_PLL_PERIPH1, /* id */ "pll_periph1", /* name */ pll_peri1_parents, /* parent */ 0, /* freq */ 1, /* mult */ 4, /* div */ 0); /* flags */ static const char *pll_gpu_parents[] = {"osc24M"}; NMM_CLK(pll_gpu_clk, CLK_PLL_GPU, /* id */ "pll_gpu", pll_gpu_parents, /* name, parents */ 0x30, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 1, 0, 0, /* m0 factor */ 1, 1, 0, 0, /* m1 factor */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ static const char *pll_video0_4x_parents[] = {"osc24M"}; NMM_CLK(pll_video0_4x_clk, CLK_PLL_VIDEO0_4X, /* id */ "pll_video0_4x", pll_video0_4x_parents, /* name, parents */ 0x40, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 1, 0, 0, /* m0 factor */ 1, 1, 0, 0, /* m1 factor */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ static const char *pll_video0_parents[] = {"pll_video0_4x"}; FIXED_CLK(pll_video0_clk, CLK_PLL_VIDEO0, /* id */ "pll_video0", /* name */ pll_video0_parents, /* parent */ 0, /* freq */ 1, /* mult */ 4, /* div */ 0); /* flags */ static const char *pll_video1_4x_parents[] = {"osc24M"}; NMM_CLK(pll_video1_4x_clk, CLK_PLL_VIDEO1_4X, /* id */ "pll_video1_4x", pll_video1_4x_parents, /* name, parents */ 0x48, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 1, 0, 0, /* m0 factor */ 1, 1, 0, 0, /* m1 factor */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ static const char *pll_video1_parents[] = {"pll_video1_4x"}; FIXED_CLK(pll_video1_clk, CLK_PLL_VIDEO1, /* id */ "pll_video1", /* name */ pll_video1_parents, /* parent */ 0, /* freq */ 1, /* mult */ 4, /* div */ 0); /* flags */ static const char *pll_ve_parents[] = {"osc24M"}; NMM_CLK(pll_ve_clk, CLK_PLL_VE, /* id */ "pll_ve", pll_ve_parents, /* name, parents */ 0x58, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 1, 0, 0, /* m0 factor */ 1, 1, 0, 0, /* m1 factor */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ static const char *pll_de_parents[] = {"osc24M"}; NMM_CLK(pll_de_clk, CLK_PLL_DE, /* id */ "pll_de", pll_de_parents, /* name, parents */ 0x60, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 1, 0, 0, /* m0 factor */ 1, 1, 0, 0, /* m1 factor */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ static const char *pll_hsic_parents[] = {"osc24M"}; NMM_CLK(pll_hsic_clk, CLK_PLL_HSIC, /* id */ "pll_hsic", pll_hsic_parents, /* name, parents */ 0x70, /* offset */ 8, 7, 0, 0, /* n factor */ 0, 1, 0, 0, /* m0 factor */ 1, 1, 0, 0, /* m1 factor */ 31, /* gate */ 28, 1000, /* lock */ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */ /* PLL_AUDIO missing */ /* CPUX_AXI missing */ static const char *psi_ahb1_ahb2_parents[] = {"osc24M", "osc32k", "iosc", "pll_periph0"}; NM_CLK(psi_ahb1_ahb2_clk, CLK_PSI_AHB1_AHB2, "psi_ahb1_ahb2", psi_ahb1_ahb2_parents, /* id, name, parents */ 0x510, /* offset */ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 2, 0, 0, /* m factor */ 24, 2, /* mux */ 0, /* gate */ AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ static const char *ahb3_parents[] = {"osc24M", "osc32k", "psi_ahb1_ahb2", "pll_periph0"}; NM_CLK(ahb3_clk, CLK_AHB3, "ahb3", ahb3_parents, /* id, name, parents */ 0x51C, /* offset */ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 2, 0, 0, /* m factor */ 24, 2, /* mux */ 0, /* gate */ AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ static const char *apb1_parents[] = {"osc24M", "osc32k", "psi_ahb1_ahb2", "pll_periph0"}; NM_CLK(apb1_clk, CLK_APB1, "apb1", apb1_parents, /* id, name, parents */ 0x520, /* offset */ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 2, 0, 0, /* m factor */ 24, 2, /* mux */ 0, /* gate */ AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ static const char *apb2_parents[] = {"osc24M", "osc32k", "psi_ahb1_ahb2", "pll_periph0"}; NM_CLK(apb2_clk, CLK_APB2, "apb2", apb2_parents, /* id, name, parents */ 0x524, /* offset */ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 2, 0, 0, /* m factor */ 24, 2, /* mux */ 0, /* gate */ AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ /* Missing MBUS clock */ static const char *mod_parents[] = {"osc24M", "pll_periph0_2x", "pll_periph1_2x"}; NM_CLK(mmc0_clk, CLK_MMC0, "mmc0", mod_parents, /* id, name, parents */ 0x830, /* offset */ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ NM_CLK(mmc1_clk, CLK_MMC1, "mmc1", mod_parents, /* id, name, parents */ 0x834, /* offset */ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ NM_CLK(mmc2_clk, CLK_MMC2, "mmc2", mod_parents, /* id, name, parents */ 0x838, /* offset */ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */ 0, 4, 0, 0, /* m factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */ static struct aw_ccung_clk h6_ccu_clks[] = { { .type = AW_CLK_NP, .clk.np = &pll_cpux_clk}, { .type = AW_CLK_NMM, .clk.nmm = &pll_ddr0_clk}, { .type = AW_CLK_NMM, .clk.nmm = &pll_peri0_4x_clk}, { .type = AW_CLK_NMM, .clk.nmm = &pll_peri1_4x_clk}, { .type = AW_CLK_NMM, .clk.nmm = &pll_gpu_clk}, { .type = AW_CLK_NMM, .clk.nmm = &pll_video0_4x_clk}, { .type = AW_CLK_NMM, .clk.nmm = &pll_video1_4x_clk}, { .type = AW_CLK_NMM, .clk.nmm = &pll_ve_clk}, { .type = AW_CLK_NMM, .clk.nmm = &pll_de_clk}, { .type = AW_CLK_NMM, .clk.nmm = &pll_hsic_clk}, { .type = AW_CLK_NM, .clk.nm = &psi_ahb1_ahb2_clk}, { .type = AW_CLK_NM, .clk.nm = &ahb3_clk}, { .type = AW_CLK_NM, .clk.nm = &apb1_clk}, { .type = AW_CLK_NM, .clk.nm = &apb2_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc0_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc1_clk}, { .type = AW_CLK_NM, .clk.nm = &mmc2_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &osc12m_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_peri0_2x_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_peri0_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_peri1_2x_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_peri1_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_video0_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &pll_video1_clk}, }; static int ccu_h6_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "allwinner,sun50i-h6-ccu")) return (ENXIO); device_set_desc(dev, "Allwinner H6 Clock Control Unit NG"); return (BUS_PROBE_DEFAULT); } static int ccu_h6_attach(device_t dev) { struct aw_ccung_softc *sc; sc = device_get_softc(dev); sc->resets = h6_ccu_resets; sc->nresets = nitems(h6_ccu_resets); sc->gates = h6_ccu_gates; sc->ngates = nitems(h6_ccu_gates); sc->clks = h6_ccu_clks; sc->nclks = nitems(h6_ccu_clks); return (aw_ccung_attach(dev)); } static device_method_t ccu_h6ng_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ccu_h6_probe), DEVMETHOD(device_attach, ccu_h6_attach), DEVMETHOD_END }; DEFINE_CLASS_1(ccu_h6ng, ccu_h6ng_driver, ccu_h6ng_methods, sizeof(struct aw_ccung_softc), aw_ccung_driver); EARLY_DRIVER_MODULE(ccu_h6ng, simplebus, ccu_h6ng_driver, 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/dev/clk/allwinner/ccu_h6_r.c b/sys/dev/clk/allwinner/ccu_h6_r.c index 7d87b3082682..4ca5e86cc436 100644 --- a/sys/dev/clk/allwinner/ccu_h6_r.c +++ b/sys/dev/clk/allwinner/ccu_h6_r.c @@ -1,165 +1,165 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include -#include -#include +#include +#include +#include #include #include #include /* Non-exported clocks */ #define CLK_R_AHB 1 #define CLK_R_APB2 3 static struct aw_ccung_reset ccu_sun50i_h6_r_resets[] = { CCU_RESET(RST_R_APB1_TIMER, 0x11c, 16) CCU_RESET(RST_R_APB1_TWD, 0x12c, 16) CCU_RESET(RST_R_APB1_PWM, 0x13c, 16) CCU_RESET(RST_R_APB2_UART, 0x18c, 16) CCU_RESET(RST_R_APB2_I2C, 0x19c, 16) CCU_RESET(RST_R_APB1_IR, 0x1cc, 16) CCU_RESET(RST_R_APB1_W1, 0x1ec, 16) }; static struct aw_ccung_gate ccu_sun50i_h6_r_gates[] = { CCU_GATE(CLK_R_APB1_TIMER, "r_apb1-timer", "r_apb1", 0x11c, 0) CCU_GATE(CLK_R_APB1_TWD, "r_apb1-twd", "r_apb1", 0x12c, 0) CCU_GATE(CLK_R_APB1_PWM, "r_apb1-pwm", "r_apb1", 0x13c, 0) CCU_GATE(CLK_R_APB2_UART, "r_apb1-uart", "r_apb2", 0x18c, 0) CCU_GATE(CLK_R_APB2_I2C, "r_apb1-i2c", "r_apb2", 0x19c, 0) CCU_GATE(CLK_R_APB1_IR, "r_apb1-ir", "r_apb1", 0x1cc, 0) CCU_GATE(CLK_R_APB1_W1, "r_apb1-w1", "r_apb1", 0x1ec, 0) }; static const char *ar100_parents[] = {"osc24M", "osc32k", "pll_periph0", "iosc"}; PREDIV_CLK(ar100_clk, CLK_AR100, /* id */ "ar100", ar100_parents, /* name, parents */ 0x00, /* offset */ 16, 2, /* mux */ 4, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* div */ 8, 5, 0, AW_CLK_FACTOR_HAS_COND, /* prediv */ 16, 2, 2); /* prediv condition */ static const char *r_ahb_parents[] = {"ar100"}; FIXED_CLK(r_ahb_clk, CLK_R_AHB, /* id */ "r_ahb", /* name */ r_ahb_parents, /* parent */ 0, /* freq */ 1, /* mult */ 1, /* div */ 0); /* flags */ static const char *r_apb1_parents[] = {"r_ahb"}; DIV_CLK(r_apb1_clk, CLK_R_APB1, /* id */ "r_apb1", r_apb1_parents, /* name, parents */ 0x0c, /* offset */ 0, 2, /* shift, width */ 0, NULL); /* flags, div table */ static const char *r_apb2_parents[] = {"osc24M", "osc32k", "pll_periph0", "iosc"}; PREDIV_CLK(r_apb2_clk, CLK_R_APB2, /* id */ "r_apb2", r_apb2_parents, /* name, parents */ 0x10, /* offset */ 16, 2, /* mux */ 4, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* div */ 8, 5, 0, AW_CLK_FACTOR_HAS_COND, /* prediv */ 16, 2, 2); /* prediv condition */ static struct aw_ccung_clk clks[] = { { .type = AW_CLK_PREDIV_MUX, .clk.prediv_mux = &ar100_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &r_ahb_clk}, { .type = AW_CLK_DIV, .clk.div = &r_apb1_clk}, { .type = AW_CLK_PREDIV_MUX, .clk.prediv_mux = &r_apb2_clk}, }; static struct ofw_compat_data compat_data[] = { { "allwinner,sun50i-h6-r-ccu", 1 }, { NULL, 0}, }; static int ccu_sun50i_h6_r_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner SUN50I_H6_R Clock Control Unit NG"); return (BUS_PROBE_DEFAULT); } static int ccu_sun50i_h6_r_attach(device_t dev) { struct aw_ccung_softc *sc; sc = device_get_softc(dev); sc->resets = ccu_sun50i_h6_r_resets; sc->nresets = nitems(ccu_sun50i_h6_r_resets); sc->gates = ccu_sun50i_h6_r_gates; sc->ngates = nitems(ccu_sun50i_h6_r_gates); sc->clks = clks; sc->nclks = nitems(clks); return (aw_ccung_attach(dev)); } static device_method_t ccu_sun50i_h6_r_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ccu_sun50i_h6_r_probe), DEVMETHOD(device_attach, ccu_sun50i_h6_r_attach), DEVMETHOD_END }; DEFINE_CLASS_1(ccu_sun50i_h6_r, ccu_sun50i_h6_r_driver, ccu_sun50i_h6_r_methods, sizeof(struct aw_ccung_softc), aw_ccung_driver); EARLY_DRIVER_MODULE(ccu_sun50i_h6_r, simplebus, ccu_sun50i_h6_r_driver, 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/dev/clk/allwinner/ccu_sun8i_r.c b/sys/dev/clk/allwinner/ccu_sun8i_r.c index e970dd1de95c..0fdbe6a10ec2 100644 --- a/sys/dev/clk/allwinner/ccu_sun8i_r.c +++ b/sys/dev/clk/allwinner/ccu_sun8i_r.c @@ -1,256 +1,256 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2017,2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #if defined(__aarch64__) #include "opt_soc.h" #endif -#include -#include -#include +#include +#include +#include #include #include #include /* Non-exported clocks */ #define CLK_AHB0 1 #define CLK_APB0 2 static struct aw_ccung_reset ccu_sun8i_r_resets[] = { CCU_RESET(RST_APB0_IR, 0xb0, 1) CCU_RESET(RST_APB0_TIMER, 0xb0, 2) CCU_RESET(RST_APB0_RSB, 0xb0, 3) CCU_RESET(RST_APB0_UART, 0xb0, 4) CCU_RESET(RST_APB0_I2C, 0xb0, 6) }; static struct aw_ccung_gate ccu_sun8i_r_gates[] = { CCU_GATE(CLK_APB0_PIO, "apb0-pio", "apb0", 0x28, 0) CCU_GATE(CLK_APB0_IR, "apb0-ir", "apb0", 0x28, 1) CCU_GATE(CLK_APB0_TIMER, "apb0-timer", "apb0", 0x28, 2) CCU_GATE(CLK_APB0_RSB, "apb0-rsb", "apb0", 0x28, 3) CCU_GATE(CLK_APB0_UART, "apb0-uart", "apb0", 0x28, 4) CCU_GATE(CLK_APB0_I2C, "apb0-i2c", "apb0", 0x28, 6) CCU_GATE(CLK_APB0_TWD, "apb0-twd", "apb0", 0x28, 7) }; static const char *ar100_parents[] = {"osc32k", "osc24M", "pll_periph0", "iosc"}; static const char *a83t_ar100_parents[] = {"osc16M-d512", "osc24M", "pll_periph", "osc16M"}; PREDIV_CLK(ar100_clk, CLK_AR100, /* id */ "ar100", ar100_parents, /* name, parents */ 0x00, /* offset */ 16, 2, /* mux */ 4, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* div */ 8, 5, 0, AW_CLK_FACTOR_HAS_COND, /* prediv */ 16, 2, 2); /* prediv condition */ PREDIV_CLK(a83t_ar100_clk, CLK_AR100, /* id */ "ar100", a83t_ar100_parents, /* name, parents */ 0x00, /* offset */ 16, 2, /* mux */ 4, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* div */ 8, 5, 0, AW_CLK_FACTOR_HAS_COND, /* prediv */ 16, 2, 2); /* prediv condition */ static const char *ahb0_parents[] = {"ar100"}; FIXED_CLK(ahb0_clk, CLK_AHB0, /* id */ "ahb0", /* name */ ahb0_parents, /* parent */ 0, /* freq */ 1, /* mult */ 1, /* div */ 0); /* flags */ static const char *apb0_parents[] = {"ahb0"}; DIV_CLK(apb0_clk, CLK_APB0, /* id */ "apb0", apb0_parents, /* name, parents */ 0x0c, /* offset */ 0, 2, /* shift, width */ 0, NULL); /* flags, div table */ static const char *r_ccu_ir_parents[] = {"osc32k", "osc24M"}; NM_CLK(r_ccu_ir_clk, CLK_IR, /* id */ "ir", r_ccu_ir_parents, /* names, parents */ 0x54, /* offset */ 0, 4, 0, 0, /* N factor */ 16, 2, 0, 0, /* M factor */ 24, 2, /* mux */ 31, /* gate */ AW_CLK_HAS_MUX | AW_CLK_REPARENT | AW_CLK_HAS_GATE);/* flags */ static const char *a83t_ir_parents[] = {"osc16M", "osc24M"}; static struct aw_clk_nm_def a83t_ir_clk = { .clkdef = { .id = CLK_IR, .name = "ir", .parent_names = a83t_ir_parents, .parent_cnt = nitems(a83t_ir_parents), }, .offset = 0x54, .n = {.shift = 0, .width = 4, .flags = AW_CLK_FACTOR_POWER_OF_TWO, }, .m = {.shift = 16, .width = 2}, .prediv = { .cond_shift = 24, .cond_width = 2, .cond_value = 0, .value = 16 }, .mux_shift = 24, .mux_width = 2, .flags = AW_CLK_HAS_MUX | AW_CLK_HAS_PREDIV, }; static struct aw_ccung_clk clks[] = { { .type = AW_CLK_PREDIV_MUX, .clk.prediv_mux = &ar100_clk}, { .type = AW_CLK_DIV, .clk.div = &apb0_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &ahb0_clk}, { .type = AW_CLK_NM, .clk.nm = &r_ccu_ir_clk}, }; static struct aw_ccung_clk a83t_clks[] = { { .type = AW_CLK_PREDIV_MUX, .clk.prediv_mux = &a83t_ar100_clk}, { .type = AW_CLK_DIV, .clk.div = &apb0_clk}, { .type = AW_CLK_FIXED, .clk.fixed = &ahb0_clk}, { .type = AW_CLK_NM, .clk.nm = &a83t_ir_clk}, }; static struct ofw_compat_data compat_data[] = { #if defined(SOC_ALLWINNER_H3) || defined(SOC_ALLWINNER_H5) { "allwinner,sun8i-h3-r-ccu", 1 }, #endif #if defined(SOC_ALLWINNER_A64) { "allwinner,sun50i-a64-r-ccu", 1 }, #endif { NULL, 0}, }; static int ccu_sun8i_r_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner SUN8I_R Clock Control Unit NG"); return (BUS_PROBE_DEFAULT); } static int ccu_sun8i_r_attach(device_t dev) { struct aw_ccung_softc *sc; sc = device_get_softc(dev); sc->resets = ccu_sun8i_r_resets; sc->nresets = nitems(ccu_sun8i_r_resets); sc->gates = ccu_sun8i_r_gates; sc->ngates = nitems(ccu_sun8i_r_gates); sc->clks = clks; sc->nclks = nitems(clks); return (aw_ccung_attach(dev)); } static device_method_t ccu_sun8i_r_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ccu_sun8i_r_probe), DEVMETHOD(device_attach, ccu_sun8i_r_attach), DEVMETHOD_END }; DEFINE_CLASS_1(ccu_sun8i_r, ccu_sun8i_r_driver, ccu_sun8i_r_methods, sizeof(struct aw_ccung_softc), aw_ccung_driver); EARLY_DRIVER_MODULE(ccu_sun8i_r, simplebus, ccu_sun8i_r_driver, 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_MIDDLE); static int ccu_a83t_r_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "allwinner,sun8i-a83t-r-ccu")) return (ENXIO); device_set_desc(dev, "Allwinner A83T_R Clock Control Unit NG"); return (BUS_PROBE_DEFAULT); } static int ccu_a83t_r_attach(device_t dev) { struct aw_ccung_softc *sc; sc = device_get_softc(dev); sc->resets = ccu_sun8i_r_resets; sc->nresets = nitems(ccu_sun8i_r_resets); sc->gates = ccu_sun8i_r_gates; sc->ngates = nitems(ccu_sun8i_r_gates); sc->clks = a83t_clks; sc->nclks = nitems(a83t_clks); return (aw_ccung_attach(dev)); } static device_method_t ccu_a83t_r_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ccu_a83t_r_probe), DEVMETHOD(device_attach, ccu_a83t_r_attach), DEVMETHOD_END }; DEFINE_CLASS_1(ccu_a83t_r, ccu_a83t_r_driver, ccu_a83t_r_methods, sizeof(struct aw_ccung_softc), aw_ccung_driver); EARLY_DRIVER_MODULE(ccu_a83t_r, simplebus, ccu_a83t_r_driver, 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/dev/extres/clk/clk.c b/sys/dev/clk/clk.c similarity index 99% rename from sys/dev/extres/clk/clk.c rename to sys/dev/clk/clk.c index c569b05b6189..52015d4e2905 100644 --- a/sys/dev/extres/clk/clk.c +++ b/sys/dev/clk/clk.c @@ -1,1708 +1,1708 @@ /*- * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include #endif -#include +#include SYSCTL_NODE(_hw, OID_AUTO, clock, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Clocks"); MALLOC_DEFINE(M_CLOCK, "clocks", "Clock framework"); /* Forward declarations. */ struct clk; struct clknodenode; struct clkdom; typedef TAILQ_HEAD(clknode_list, clknode) clknode_list_t; typedef TAILQ_HEAD(clkdom_list, clkdom) clkdom_list_t; /* Default clock methods. */ static int clknode_method_init(struct clknode *clk, device_t dev); static int clknode_method_recalc_freq(struct clknode *clk, uint64_t *freq); static int clknode_method_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout, int flags, int *stop); static int clknode_method_set_gate(struct clknode *clk, bool enable); static int clknode_method_set_mux(struct clknode *clk, int idx); /* * Clock controller methods. */ static clknode_method_t clknode_methods[] = { CLKNODEMETHOD(clknode_init, clknode_method_init), CLKNODEMETHOD(clknode_recalc_freq, clknode_method_recalc_freq), CLKNODEMETHOD(clknode_set_freq, clknode_method_set_freq), CLKNODEMETHOD(clknode_set_gate, clknode_method_set_gate), CLKNODEMETHOD(clknode_set_mux, clknode_method_set_mux), CLKNODEMETHOD_END }; DEFINE_CLASS_0(clknode, clknode_class, clknode_methods, 0); /* * Clock node - basic element for modeling SOC clock graph. It holds the clock * provider's data about the clock, and the links for the clock's membership in * various lists. */ struct clknode { KOBJ_FIELDS; /* Clock nodes topology. */ struct clkdom *clkdom; /* Owning clock domain */ TAILQ_ENTRY(clknode) clkdom_link; /* Domain list entry */ TAILQ_ENTRY(clknode) clklist_link; /* Global list entry */ /* String based parent list. */ const char **parent_names; /* Array of parent names */ int parent_cnt; /* Number of parents */ int parent_idx; /* Parent index or -1 */ /* Cache for already resolved names. */ struct clknode **parents; /* Array of potential parents */ struct clknode *parent; /* Current parent */ /* Parent/child relationship links. */ clknode_list_t children; /* List of our children */ TAILQ_ENTRY(clknode) sibling_link; /* Our entry in parent's list */ /* Details of this device. */ void *softc; /* Instance softc */ const char *name; /* Globally unique name */ intptr_t id; /* Per domain unique id */ int flags; /* CLK_FLAG_* */ struct sx lock; /* Lock for this clock */ int ref_cnt; /* Reference counter */ int enable_cnt; /* Enabled counter */ /* Cached values. */ uint64_t freq; /* Actual frequency */ struct sysctl_ctx_list sysctl_ctx; }; /* * Per consumer data, information about how a consumer is using a clock node. * A pointer to this structure is used as a handle in the consumer interface. */ struct clk { device_t dev; struct clknode *clknode; int enable_cnt; }; /* * Clock domain - a group of clocks provided by one clock device. */ struct clkdom { device_t dev; /* Link to provider device */ TAILQ_ENTRY(clkdom) link; /* Global domain list entry */ clknode_list_t clknode_list; /* All clocks in the domain */ #ifdef FDT clknode_ofw_mapper_func *ofw_mapper; /* Find clock using FDT xref */ #endif }; /* * The system-wide list of clock domains. */ static clkdom_list_t clkdom_list = TAILQ_HEAD_INITIALIZER(clkdom_list); /* * Each clock node is linked on a system-wide list and can be searched by name. */ static clknode_list_t clknode_list = TAILQ_HEAD_INITIALIZER(clknode_list); /* * Locking - we use three levels of locking: * - First, topology lock is taken. This one protect all lists. * - Second level is per clknode lock. It protects clknode data. * - Third level is outside of this file, it protect clock device registers. * First two levels use sleepable locks; clock device can use mutex or sx lock. */ static struct sx clk_topo_lock; SX_SYSINIT(clock_topology, &clk_topo_lock, "Clock topology lock"); #define CLK_TOPO_SLOCK() sx_slock(&clk_topo_lock) #define CLK_TOPO_XLOCK() sx_xlock(&clk_topo_lock) #define CLK_TOPO_UNLOCK() sx_unlock(&clk_topo_lock) #define CLK_TOPO_ASSERT() sx_assert(&clk_topo_lock, SA_LOCKED) #define CLK_TOPO_XASSERT() sx_assert(&clk_topo_lock, SA_XLOCKED) #define CLKNODE_SLOCK(_sc) sx_slock(&((_sc)->lock)) #define CLKNODE_XLOCK(_sc) sx_xlock(&((_sc)->lock)) #define CLKNODE_UNLOCK(_sc) sx_unlock(&((_sc)->lock)) static void clknode_adjust_parent(struct clknode *clknode, int idx); enum clknode_sysctl_type { CLKNODE_SYSCTL_PARENT, CLKNODE_SYSCTL_PARENTS_LIST, CLKNODE_SYSCTL_CHILDREN_LIST, CLKNODE_SYSCTL_FREQUENCY, CLKNODE_SYSCTL_GATE, }; static int clknode_sysctl(SYSCTL_HANDLER_ARGS); static int clkdom_sysctl(SYSCTL_HANDLER_ARGS); static void clknode_finish(void *dummy); SYSINIT(clknode_finish, SI_SUB_LAST, SI_ORDER_ANY, clknode_finish, NULL); /* * Default clock methods for base class. */ static int clknode_method_init(struct clknode *clknode, device_t dev) { return (0); } static int clknode_method_recalc_freq(struct clknode *clknode, uint64_t *freq) { return (0); } static int clknode_method_set_freq(struct clknode *clknode, uint64_t fin, uint64_t *fout, int flags, int *stop) { *stop = 0; return (0); } static int clknode_method_set_gate(struct clknode *clk, bool enable) { return (0); } static int clknode_method_set_mux(struct clknode *clk, int idx) { return (0); } /* * Internal functions. */ /* * Duplicate an array of parent names. * * Compute total size and allocate a single block which holds both the array of * pointers to strings and the copied strings themselves. Returns a pointer to * the start of the block where the array of copied string pointers lives. * * XXX Revisit this, no need for the DECONST stuff. */ static const char ** strdup_list(const char **names, int num) { size_t len, slen; const char **outptr, *ptr; int i; len = sizeof(char *) * num; for (i = 0; i < num; i++) { if (names[i] == NULL) continue; slen = strlen(names[i]); if (slen == 0) panic("Clock parent names array have empty string"); len += slen + 1; } outptr = malloc(len, M_CLOCK, M_WAITOK | M_ZERO); ptr = (char *)(outptr + num); for (i = 0; i < num; i++) { if (names[i] == NULL) continue; outptr[i] = ptr; slen = strlen(names[i]) + 1; bcopy(names[i], __DECONST(void *, outptr[i]), slen); ptr += slen; } return (outptr); } /* * Recompute the cached frequency for this node and all its children. */ static int clknode_refresh_cache(struct clknode *clknode, uint64_t freq) { int rv; struct clknode *entry; CLK_TOPO_XASSERT(); /* Compute generated frequency. */ rv = CLKNODE_RECALC_FREQ(clknode, &freq); if (rv != 0) { /* XXX If an error happens while refreshing children * this leaves the world in a partially-updated state. * Panic for now. */ panic("clknode_refresh_cache failed for '%s'\n", clknode->name); return (rv); } /* Refresh cache for this node. */ clknode->freq = freq; /* Refresh cache for all children. */ TAILQ_FOREACH(entry, &(clknode->children), sibling_link) { rv = clknode_refresh_cache(entry, freq); if (rv != 0) return (rv); } return (0); } /* * Public interface. */ struct clknode * clknode_find_by_name(const char *name) { struct clknode *entry; CLK_TOPO_ASSERT(); TAILQ_FOREACH(entry, &clknode_list, clklist_link) { if (strcmp(entry->name, name) == 0) return (entry); } return (NULL); } struct clknode * clknode_find_by_id(struct clkdom *clkdom, intptr_t id) { struct clknode *entry; CLK_TOPO_ASSERT(); TAILQ_FOREACH(entry, &clkdom->clknode_list, clkdom_link) { if (entry->id == id) return (entry); } return (NULL); } /* -------------------------------------------------------------------------- */ /* * Clock domain functions */ /* Find clock domain associated to device in global list. */ struct clkdom * clkdom_get_by_dev(const device_t dev) { struct clkdom *entry; CLK_TOPO_ASSERT(); TAILQ_FOREACH(entry, &clkdom_list, link) { if (entry->dev == dev) return (entry); } return (NULL); } #ifdef FDT /* Default DT mapper. */ static int clknode_default_ofw_map(struct clkdom *clkdom, uint32_t ncells, phandle_t *cells, struct clknode **clk) { CLK_TOPO_ASSERT(); if (ncells == 0) *clk = clknode_find_by_id(clkdom, 1); else if (ncells == 1) *clk = clknode_find_by_id(clkdom, cells[0]); else return (ERANGE); if (*clk == NULL) return (ENXIO); return (0); } #endif /* * Create a clock domain. Returns with the topo lock held. */ struct clkdom * clkdom_create(device_t dev) { struct clkdom *clkdom; clkdom = malloc(sizeof(struct clkdom), M_CLOCK, M_WAITOK | M_ZERO); clkdom->dev = dev; TAILQ_INIT(&clkdom->clknode_list); #ifdef FDT clkdom->ofw_mapper = clknode_default_ofw_map; #endif SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "clocks", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, clkdom, 0, clkdom_sysctl, "A", "Clock list for the domain"); return (clkdom); } void clkdom_unlock(struct clkdom *clkdom) { CLK_TOPO_UNLOCK(); } void clkdom_xlock(struct clkdom *clkdom) { CLK_TOPO_XLOCK(); } /* * Finalize initialization of clock domain. Releases topo lock. * * XXX Revisit failure handling. */ int clkdom_finit(struct clkdom *clkdom) { struct clknode *clknode; int i, rv; #ifdef FDT phandle_t node; if ((node = ofw_bus_get_node(clkdom->dev)) == -1) { device_printf(clkdom->dev, "%s called on not ofw based device\n", __func__); return (ENXIO); } #endif rv = 0; /* Make clock domain globally visible. */ CLK_TOPO_XLOCK(); TAILQ_INSERT_TAIL(&clkdom_list, clkdom, link); #ifdef FDT OF_device_register_xref(OF_xref_from_node(node), clkdom->dev); #endif /* Register all clock names into global list. */ TAILQ_FOREACH(clknode, &clkdom->clknode_list, clkdom_link) { TAILQ_INSERT_TAIL(&clknode_list, clknode, clklist_link); } /* * At this point all domain nodes must be registered and all * parents must be valid. */ TAILQ_FOREACH(clknode, &clkdom->clknode_list, clkdom_link) { if (clknode->parent_cnt == 0) continue; for (i = 0; i < clknode->parent_cnt; i++) { if (clknode->parents[i] != NULL) continue; if (clknode->parent_names[i] == NULL) continue; clknode->parents[i] = clknode_find_by_name( clknode->parent_names[i]); if (clknode->parents[i] == NULL) { device_printf(clkdom->dev, "Clock %s have unknown parent: %s\n", clknode->name, clknode->parent_names[i]); rv = ENODEV; } } /* If parent index is not set yet... */ if (clknode->parent_idx == CLKNODE_IDX_NONE) { device_printf(clkdom->dev, "Clock %s have not set parent idx\n", clknode->name); rv = ENXIO; continue; } if (clknode->parents[clknode->parent_idx] == NULL) { device_printf(clkdom->dev, "Clock %s have unknown parent(idx %d): %s\n", clknode->name, clknode->parent_idx, clknode->parent_names[clknode->parent_idx]); rv = ENXIO; continue; } clknode_adjust_parent(clknode, clknode->parent_idx); } CLK_TOPO_UNLOCK(); return (rv); } /* Dump clock domain. */ void clkdom_dump(struct clkdom * clkdom) { struct clknode *clknode; int rv; uint64_t freq; CLK_TOPO_SLOCK(); TAILQ_FOREACH(clknode, &clkdom->clknode_list, clkdom_link) { rv = clknode_get_freq(clknode, &freq); printf("Clock: %s, parent: %s(%d), freq: %ju\n", clknode->name, clknode->parent == NULL ? "(NULL)" : clknode->parent->name, clknode->parent_idx, (uintmax_t)((rv == 0) ? freq: rv)); } CLK_TOPO_UNLOCK(); } /* * Create and initialize clock object, but do not register it. */ struct clknode * clknode_create(struct clkdom * clkdom, clknode_class_t clknode_class, const struct clknode_init_def *def) { struct clknode *clknode; struct sysctl_oid *clknode_oid; bool replaced; kobjop_desc_t kobj_desc; kobj_method_t *kobj_method; KASSERT(def->name != NULL, ("clock name is NULL")); KASSERT(def->name[0] != '\0', ("clock name is empty")); if (def->flags & CLK_NODE_LINKED) { KASSERT(def->parent_cnt == 0, ("Linked clock must not have parents")); KASSERT(clknode_class->size== 0, ("Linked clock cannot have own softc")); } /* Process duplicated clocks */ CLK_TOPO_SLOCK(); clknode = clknode_find_by_name(def->name); CLK_TOPO_UNLOCK(); if (clknode != NULL) { if (!(clknode->flags & CLK_NODE_LINKED) && def->flags & CLK_NODE_LINKED) { /* * New clock is linked and real already exists. * Do nothing and return real node. It is in right * domain, enqueued in right lists and fully initialized. */ return (clknode); } else if (clknode->flags & CLK_NODE_LINKED && !(def->flags & CLK_NODE_LINKED)) { /* * New clock is real but linked already exists. * Remove old linked node from originating domain * (real clock must be owned by another) and from * global names link (it will be added back into it * again in following clknode_register()). Then reuse * original clknode structure and reinitialize it * with new dat. By this, all lists containing this * node remains valid, but the new node virtually * replace the linked one. */ KASSERT(clkdom != clknode->clkdom, ("linked clock must be from another " "domain that real one")); TAILQ_REMOVE(&clkdom->clknode_list, clknode, clkdom_link); TAILQ_REMOVE(&clknode_list, clknode, clklist_link); replaced = true; } else if (clknode->flags & CLK_NODE_LINKED && def->flags & CLK_NODE_LINKED) { /* * Both clocks are linked. * Return old one, so we hold only one copy od link. */ return (clknode); } else { /* Both clocks are real */ panic("Duplicated clock registration: %s\n", def->name); } } else { /* Create clknode object and initialize it. */ clknode = malloc(sizeof(struct clknode), M_CLOCK, M_WAITOK | M_ZERO); sx_init(&clknode->lock, "Clocknode lock"); TAILQ_INIT(&clknode->children); replaced = false; } kobj_init((kobj_t)clknode, (kobj_class_t)clknode_class); /* Allocate softc if required. */ if (clknode_class->size > 0) { clknode->softc = malloc(clknode_class->size, M_CLOCK, M_WAITOK | M_ZERO); } /* Prepare array for ptrs to parent clocks. */ clknode->parents = malloc(sizeof(struct clknode *) * def->parent_cnt, M_CLOCK, M_WAITOK | M_ZERO); /* Copy all strings unless they're flagged as static. */ if (def->flags & CLK_NODE_STATIC_STRINGS) { clknode->name = def->name; clknode->parent_names = def->parent_names; } else { clknode->name = strdup(def->name, M_CLOCK); clknode->parent_names = strdup_list(def->parent_names, def->parent_cnt); } /* Rest of init. */ clknode->id = def->id; clknode->clkdom = clkdom; clknode->flags = def->flags; clknode->parent_cnt = def->parent_cnt; clknode->parent = NULL; clknode->parent_idx = CLKNODE_IDX_NONE; if (replaced) return (clknode); sysctl_ctx_init(&clknode->sysctl_ctx); clknode_oid = SYSCTL_ADD_NODE(&clknode->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw_clock), OID_AUTO, clknode->name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "A clock node"); SYSCTL_ADD_PROC(&clknode->sysctl_ctx, SYSCTL_CHILDREN(clknode_oid), OID_AUTO, "frequency", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, clknode, CLKNODE_SYSCTL_FREQUENCY, clknode_sysctl, "A", "The clock frequency"); /* Install gate handler only if clknode have 'set_gate' method */ kobj_desc = &clknode_set_gate_desc; kobj_method = kobj_lookup_method(((kobj_t)clknode)->ops->cls, NULL, kobj_desc); if (kobj_method != &kobj_desc->deflt && kobj_method->func != (kobjop_t)clknode_method_set_gate) { SYSCTL_ADD_PROC(&clknode->sysctl_ctx, SYSCTL_CHILDREN(clknode_oid), OID_AUTO, "gate", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, clknode, CLKNODE_SYSCTL_GATE, clknode_sysctl, "A", "The clock gate status"); } SYSCTL_ADD_PROC(&clknode->sysctl_ctx, SYSCTL_CHILDREN(clknode_oid), OID_AUTO, "parent", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, clknode, CLKNODE_SYSCTL_PARENT, clknode_sysctl, "A", "The clock parent"); SYSCTL_ADD_PROC(&clknode->sysctl_ctx, SYSCTL_CHILDREN(clknode_oid), OID_AUTO, "parents", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, clknode, CLKNODE_SYSCTL_PARENTS_LIST, clknode_sysctl, "A", "The clock parents list"); SYSCTL_ADD_PROC(&clknode->sysctl_ctx, SYSCTL_CHILDREN(clknode_oid), OID_AUTO, "childrens", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, clknode, CLKNODE_SYSCTL_CHILDREN_LIST, clknode_sysctl, "A", "The clock childrens list"); SYSCTL_ADD_INT(&clknode->sysctl_ctx, SYSCTL_CHILDREN(clknode_oid), OID_AUTO, "enable_cnt", CTLFLAG_RD, &clknode->enable_cnt, 0, "The clock enable counter"); return (clknode); } /* * Register clock object into clock domain hierarchy. */ struct clknode * clknode_register(struct clkdom * clkdom, struct clknode *clknode) { int rv; /* Skip already registered linked node */ if (clknode->flags & CLK_NODE_REGISTERED) return(clknode); rv = CLKNODE_INIT(clknode, clknode_get_device(clknode)); if (rv != 0) { printf(" CLKNODE_INIT failed: %d\n", rv); return (NULL); } TAILQ_INSERT_TAIL(&clkdom->clknode_list, clknode, clkdom_link); clknode->flags |= CLK_NODE_REGISTERED; return (clknode); } static void clknode_finish(void *dummy) { struct clknode *clknode; CLK_TOPO_SLOCK(); TAILQ_FOREACH(clknode, &clknode_list, clklist_link) { if (clknode->flags & CLK_NODE_LINKED) printf("Unresolved linked clock found: %s\n", clknode->name); } CLK_TOPO_UNLOCK(); } /* * Clock providers interface. */ /* * Reparent clock node. */ static void clknode_adjust_parent(struct clknode *clknode, int idx) { CLK_TOPO_XASSERT(); if (clknode->parent_cnt == 0) return; if ((idx == CLKNODE_IDX_NONE) || (idx >= clknode->parent_cnt)) panic("%s: Invalid parent index %d for clock %s", __func__, idx, clknode->name); if (clknode->parents[idx] == NULL) panic("%s: Invalid parent index %d for clock %s", __func__, idx, clknode->name); /* Remove me from old children list. */ if (clknode->parent != NULL) { TAILQ_REMOVE(&clknode->parent->children, clknode, sibling_link); } /* Insert into children list of new parent. */ clknode->parent_idx = idx; clknode->parent = clknode->parents[idx]; TAILQ_INSERT_TAIL(&clknode->parent->children, clknode, sibling_link); } /* * Set parent index - init function. */ void clknode_init_parent_idx(struct clknode *clknode, int idx) { if (clknode->parent_cnt == 0) { clknode->parent_idx = CLKNODE_IDX_NONE; clknode->parent = NULL; return; } if ((idx == CLKNODE_IDX_NONE) || (idx >= clknode->parent_cnt) || (clknode->parent_names[idx] == NULL)) panic("%s: Invalid parent index %d for clock %s", __func__, idx, clknode->name); clknode->parent_idx = idx; } int clknode_set_parent_by_idx(struct clknode *clknode, int idx) { int rv; uint64_t freq; int oldidx; /* We have exclusive topology lock, node lock is not needed. */ CLK_TOPO_XASSERT(); if (clknode->parent_cnt == 0) return (0); if (clknode->parent_idx == idx) return (0); oldidx = clknode->parent_idx; clknode_adjust_parent(clknode, idx); rv = CLKNODE_SET_MUX(clknode, idx); if (rv != 0) { clknode_adjust_parent(clknode, oldidx); return (rv); } rv = clknode_get_freq(clknode->parent, &freq); if (rv != 0) return (rv); rv = clknode_refresh_cache(clknode, freq); return (rv); } int clknode_set_parent_by_name(struct clknode *clknode, const char *name) { int rv; uint64_t freq; int oldidx, idx; /* We have exclusive topology lock, node lock is not needed. */ CLK_TOPO_XASSERT(); if (clknode->parent_cnt == 0) return (0); /* * If this node doesnt have mux, then passthrough request to parent. * This feature is used in clock domain initialization and allows us to * set clock source and target frequency on the tail node of the clock * chain. */ if (clknode->parent_cnt == 1) { rv = clknode_set_parent_by_name(clknode->parent, name); return (rv); } for (idx = 0; idx < clknode->parent_cnt; idx++) { if (clknode->parent_names[idx] == NULL) continue; if (strcmp(clknode->parent_names[idx], name) == 0) break; } if (idx >= clknode->parent_cnt) { return (ENXIO); } if (clknode->parent_idx == idx) return (0); oldidx = clknode->parent_idx; clknode_adjust_parent(clknode, idx); rv = CLKNODE_SET_MUX(clknode, idx); if (rv != 0) { clknode_adjust_parent(clknode, oldidx); CLKNODE_UNLOCK(clknode); return (rv); } rv = clknode_get_freq(clknode->parent, &freq); if (rv != 0) return (rv); rv = clknode_refresh_cache(clknode, freq); return (rv); } struct clknode * clknode_get_parent(struct clknode *clknode) { return (clknode->parent); } const char * clknode_get_name(struct clknode *clknode) { return (clknode->name); } const char ** clknode_get_parent_names(struct clknode *clknode) { return (clknode->parent_names); } int clknode_get_parents_num(struct clknode *clknode) { return (clknode->parent_cnt); } int clknode_get_parent_idx(struct clknode *clknode) { return (clknode->parent_idx); } int clknode_get_flags(struct clknode *clknode) { return (clknode->flags); } void * clknode_get_softc(struct clknode *clknode) { return (clknode->softc); } device_t clknode_get_device(struct clknode *clknode) { return (clknode->clkdom->dev); } #ifdef FDT void clkdom_set_ofw_mapper(struct clkdom * clkdom, clknode_ofw_mapper_func *map) { clkdom->ofw_mapper = map; } #endif /* * Real consumers executive */ int clknode_get_freq(struct clknode *clknode, uint64_t *freq) { int rv; CLK_TOPO_ASSERT(); /* Use cached value, if it exists. */ *freq = clknode->freq; if (*freq != 0) return (0); /* Get frequency from parent, if the clock has a parent. */ if (clknode->parent_cnt > 0) { rv = clknode_get_freq(clknode->parent, freq); if (rv != 0) { return (rv); } } /* And recalculate my output frequency. */ CLKNODE_XLOCK(clknode); rv = CLKNODE_RECALC_FREQ(clknode, freq); if (rv != 0) { CLKNODE_UNLOCK(clknode); printf("Cannot get frequency for clk: %s, error: %d\n", clknode->name, rv); return (rv); } /* Save new frequency to cache. */ clknode->freq = *freq; CLKNODE_UNLOCK(clknode); return (0); } static int _clknode_set_freq(struct clknode *clknode, uint64_t *freq, int flags, int enablecnt) { int rv, done; uint64_t parent_freq; /* We have exclusive topology lock, node lock is not needed. */ CLK_TOPO_XASSERT(); /* Check for no change */ if (clknode->freq == *freq) return (0); parent_freq = 0; /* * We can set frequency only if * clock is disabled * OR * clock is glitch free and is enabled by calling consumer only */ if ((flags & CLK_SET_DRYRUN) == 0 && clknode->enable_cnt > 1 && clknode->enable_cnt > enablecnt && (clknode->flags & CLK_NODE_GLITCH_FREE) == 0) { return (EBUSY); } /* Get frequency from parent, if the clock has a parent. */ if (clknode->parent_cnt > 0) { rv = clknode_get_freq(clknode->parent, &parent_freq); if (rv != 0) { return (rv); } } /* Set frequency for this clock. */ rv = CLKNODE_SET_FREQ(clknode, parent_freq, freq, flags, &done); if (rv != 0) { printf("Cannot set frequency for clk: %s, error: %d\n", clknode->name, rv); if ((flags & CLK_SET_DRYRUN) == 0) clknode_refresh_cache(clknode, parent_freq); return (rv); } if (done) { /* Success - invalidate frequency cache for all children. */ if ((flags & CLK_SET_DRYRUN) == 0) { clknode->freq = *freq; /* Clock might have reparent during set_freq */ if (clknode->parent_cnt > 0) { rv = clknode_get_freq(clknode->parent, &parent_freq); if (rv != 0) { return (rv); } } clknode_refresh_cache(clknode, parent_freq); } } else if (clknode->parent != NULL) { /* Nothing changed, pass request to parent. */ rv = _clknode_set_freq(clknode->parent, freq, flags, enablecnt); } else { /* End of chain without action. */ printf("Cannot set frequency for clk: %s, end of chain\n", clknode->name); rv = ENXIO; } return (rv); } int clknode_set_freq(struct clknode *clknode, uint64_t freq, int flags, int enablecnt) { return (_clknode_set_freq(clknode, &freq, flags, enablecnt)); } int clknode_test_freq(struct clknode *clknode, uint64_t freq, int flags, int enablecnt, uint64_t *out_freq) { int rv; rv = _clknode_set_freq(clknode, &freq, flags | CLK_SET_DRYRUN, enablecnt); if (out_freq != NULL) *out_freq = freq; return (rv); } int clknode_enable(struct clknode *clknode) { int rv; CLK_TOPO_ASSERT(); /* Enable clock for each node in chain, starting from source. */ if (clknode->parent_cnt > 0) { rv = clknode_enable(clknode->parent); if (rv != 0) { return (rv); } } /* Handle this node */ CLKNODE_XLOCK(clknode); if (clknode->enable_cnt == 0) { rv = CLKNODE_SET_GATE(clknode, 1); if (rv != 0) { CLKNODE_UNLOCK(clknode); return (rv); } } clknode->enable_cnt++; CLKNODE_UNLOCK(clknode); return (0); } int clknode_disable(struct clknode *clknode) { int rv; CLK_TOPO_ASSERT(); rv = 0; CLKNODE_XLOCK(clknode); /* Disable clock for each node in chain, starting from consumer. */ if ((clknode->enable_cnt == 1) && ((clknode->flags & CLK_NODE_CANNOT_STOP) == 0)) { rv = CLKNODE_SET_GATE(clknode, 0); if (rv != 0) { CLKNODE_UNLOCK(clknode); return (rv); } } clknode->enable_cnt--; CLKNODE_UNLOCK(clknode); if (clknode->parent_cnt > 0) { rv = clknode_disable(clknode->parent); } return (rv); } int clknode_stop(struct clknode *clknode, int depth) { int rv; CLK_TOPO_ASSERT(); rv = 0; CLKNODE_XLOCK(clknode); /* The first node cannot be enabled. */ if ((clknode->enable_cnt != 0) && (depth == 0)) { CLKNODE_UNLOCK(clknode); return (EBUSY); } /* Stop clock for each node in chain, starting from consumer. */ if ((clknode->enable_cnt == 0) && ((clknode->flags & CLK_NODE_CANNOT_STOP) == 0)) { rv = CLKNODE_SET_GATE(clknode, 0); if (rv != 0) { CLKNODE_UNLOCK(clknode); return (rv); } } CLKNODE_UNLOCK(clknode); if (clknode->parent_cnt > 0) rv = clknode_stop(clknode->parent, depth + 1); return (rv); } /* -------------------------------------------------------------------------- * * Clock consumers interface. * */ /* Helper function for clk_get*() */ static clk_t clk_create(struct clknode *clknode, device_t dev) { struct clk *clk; CLK_TOPO_ASSERT(); clk = malloc(sizeof(struct clk), M_CLOCK, M_WAITOK); clk->dev = dev; clk->clknode = clknode; clk->enable_cnt = 0; clknode->ref_cnt++; return (clk); } int clk_get_freq(clk_t clk, uint64_t *freq) { int rv; struct clknode *clknode; clknode = clk->clknode; KASSERT(clknode->ref_cnt > 0, ("Attempt to access unreferenced clock: %s\n", clknode->name)); CLK_TOPO_SLOCK(); rv = clknode_get_freq(clknode, freq); CLK_TOPO_UNLOCK(); return (rv); } int clk_set_freq(clk_t clk, uint64_t freq, int flags) { int rv; struct clknode *clknode; flags &= CLK_SET_USER_MASK; clknode = clk->clknode; KASSERT(clknode->ref_cnt > 0, ("Attempt to access unreferenced clock: %s\n", clknode->name)); CLK_TOPO_XLOCK(); rv = clknode_set_freq(clknode, freq, flags, clk->enable_cnt); CLK_TOPO_UNLOCK(); return (rv); } int clk_test_freq(clk_t clk, uint64_t freq, int flags) { int rv; struct clknode *clknode; flags &= CLK_SET_USER_MASK; clknode = clk->clknode; KASSERT(clknode->ref_cnt > 0, ("Attempt to access unreferenced clock: %s\n", clknode->name)); CLK_TOPO_XLOCK(); rv = clknode_set_freq(clknode, freq, flags | CLK_SET_DRYRUN, 0); CLK_TOPO_UNLOCK(); return (rv); } int clk_get_parent(clk_t clk, clk_t *parent) { struct clknode *clknode; struct clknode *parentnode; clknode = clk->clknode; KASSERT(clknode->ref_cnt > 0, ("Attempt to access unreferenced clock: %s\n", clknode->name)); CLK_TOPO_SLOCK(); parentnode = clknode_get_parent(clknode); if (parentnode == NULL) { CLK_TOPO_UNLOCK(); return (ENODEV); } *parent = clk_create(parentnode, clk->dev); CLK_TOPO_UNLOCK(); return (0); } int clk_set_parent_by_clk(clk_t clk, clk_t parent) { int rv; struct clknode *clknode; struct clknode *parentnode; clknode = clk->clknode; parentnode = parent->clknode; KASSERT(clknode->ref_cnt > 0, ("Attempt to access unreferenced clock: %s\n", clknode->name)); KASSERT(parentnode->ref_cnt > 0, ("Attempt to access unreferenced clock: %s\n", clknode->name)); CLK_TOPO_XLOCK(); rv = clknode_set_parent_by_name(clknode, parentnode->name); CLK_TOPO_UNLOCK(); return (rv); } int clk_enable(clk_t clk) { int rv; struct clknode *clknode; clknode = clk->clknode; KASSERT(clknode->ref_cnt > 0, ("Attempt to access unreferenced clock: %s\n", clknode->name)); CLK_TOPO_SLOCK(); rv = clknode_enable(clknode); if (rv == 0) clk->enable_cnt++; CLK_TOPO_UNLOCK(); return (rv); } int clk_disable(clk_t clk) { int rv; struct clknode *clknode; clknode = clk->clknode; KASSERT(clknode->ref_cnt > 0, ("Attempt to access unreferenced clock: %s\n", clknode->name)); KASSERT(clk->enable_cnt > 0, ("Attempt to disable already disabled clock: %s\n", clknode->name)); CLK_TOPO_SLOCK(); rv = clknode_disable(clknode); if (rv == 0) clk->enable_cnt--; CLK_TOPO_UNLOCK(); return (rv); } int clk_stop(clk_t clk) { int rv; struct clknode *clknode; clknode = clk->clknode; KASSERT(clknode->ref_cnt > 0, ("Attempt to access unreferenced clock: %s\n", clknode->name)); KASSERT(clk->enable_cnt == 0, ("Attempt to stop already enabled clock: %s\n", clknode->name)); CLK_TOPO_SLOCK(); rv = clknode_stop(clknode, 0); CLK_TOPO_UNLOCK(); return (rv); } int clk_release(clk_t clk) { struct clknode *clknode; clknode = clk->clknode; KASSERT(clknode->ref_cnt > 0, ("Attempt to access unreferenced clock: %s\n", clknode->name)); CLK_TOPO_SLOCK(); while (clk->enable_cnt > 0) { clknode_disable(clknode); clk->enable_cnt--; } CLKNODE_XLOCK(clknode); clknode->ref_cnt--; CLKNODE_UNLOCK(clknode); CLK_TOPO_UNLOCK(); free(clk, M_CLOCK); return (0); } const char * clk_get_name(clk_t clk) { const char *name; struct clknode *clknode; clknode = clk->clknode; KASSERT(clknode->ref_cnt > 0, ("Attempt to access unreferenced clock: %s\n", clknode->name)); name = clknode_get_name(clknode); return (name); } int clk_get_by_name(device_t dev, const char *name, clk_t *clk) { struct clknode *clknode; CLK_TOPO_SLOCK(); clknode = clknode_find_by_name(name); if (clknode == NULL) { CLK_TOPO_UNLOCK(); return (ENODEV); } *clk = clk_create(clknode, dev); CLK_TOPO_UNLOCK(); return (0); } int clk_get_by_id(device_t dev, struct clkdom *clkdom, intptr_t id, clk_t *clk) { struct clknode *clknode; CLK_TOPO_SLOCK(); clknode = clknode_find_by_id(clkdom, id); if (clknode == NULL) { CLK_TOPO_UNLOCK(); return (ENODEV); } *clk = clk_create(clknode, dev); CLK_TOPO_UNLOCK(); return (0); } #ifdef FDT static void clk_set_assigned_parent(device_t dev, clk_t clk, int idx) { clk_t parent; const char *pname; int rv; rv = clk_get_by_ofw_index_prop(dev, 0, "assigned-clock-parents", idx, &parent); if (rv != 0) { device_printf(dev, "cannot get parent at idx %d\n", idx); return; } pname = clk_get_name(parent); rv = clk_set_parent_by_clk(clk, parent); if (rv != 0) device_printf(dev, "Cannot set parent %s for clock %s\n", pname, clk_get_name(clk)); else if (bootverbose) device_printf(dev, "Set %s as the parent of %s\n", pname, clk_get_name(clk)); clk_release(parent); } static void clk_set_assigned_rates(device_t dev, clk_t clk, uint32_t freq) { int rv; rv = clk_set_freq(clk, freq, CLK_SET_ROUND_DOWN | CLK_SET_ROUND_UP); if (rv != 0) { device_printf(dev, "Failed to set %s to a frequency of %u\n", clk_get_name(clk), freq); return; } if (bootverbose) device_printf(dev, "Set %s to %u\n", clk_get_name(clk), freq); } int clk_set_assigned(device_t dev, phandle_t node) { clk_t clk; uint32_t *rates; int rv, nclocks, nrates, nparents, i; rv = ofw_bus_parse_xref_list_get_length(node, "assigned-clocks", "#clock-cells", &nclocks); if (rv != 0) { if (rv != ENOENT) device_printf(dev, "cannot parse assigned-clock property\n"); return (rv); } nrates = OF_getencprop_alloc_multi(node, "assigned-clock-rates", sizeof(*rates), (void **)&rates); if (nrates <= 0) nrates = 0; if (ofw_bus_parse_xref_list_get_length(node, "assigned-clock-parents", "#clock-cells", &nparents) != 0) nparents = -1; for (i = 0; i < nclocks; i++) { /* First get the clock we are supposed to modify */ rv = clk_get_by_ofw_index_prop(dev, 0, "assigned-clocks", i, &clk); if (rv != 0) { if (bootverbose) device_printf(dev, "cannot get assigned clock at idx %d\n", i); continue; } /* First set it's parent if needed */ if (i < nparents) clk_set_assigned_parent(dev, clk, i); /* Then set a new frequency */ if (i < nrates && rates[i] != 0) clk_set_assigned_rates(dev, clk, rates[i]); clk_release(clk); } if (rates != NULL) OF_prop_free(rates); return (0); } int clk_get_by_ofw_index_prop(device_t dev, phandle_t cnode, const char *prop, int idx, clk_t *clk) { phandle_t parent, *cells; device_t clockdev; int ncells, rv; struct clkdom *clkdom; struct clknode *clknode; *clk = NULL; if (cnode <= 0) cnode = ofw_bus_get_node(dev); if (cnode <= 0) { device_printf(dev, "%s called on not ofw based device\n", __func__); return (ENXIO); } rv = ofw_bus_parse_xref_list_alloc(cnode, prop, "#clock-cells", idx, &parent, &ncells, &cells); if (rv != 0) { return (rv); } clockdev = OF_device_from_xref(parent); if (clockdev == NULL) { rv = ENODEV; goto done; } CLK_TOPO_SLOCK(); clkdom = clkdom_get_by_dev(clockdev); if (clkdom == NULL){ CLK_TOPO_UNLOCK(); rv = ENXIO; goto done; } rv = clkdom->ofw_mapper(clkdom, ncells, cells, &clknode); if (rv == 0) { *clk = clk_create(clknode, dev); } CLK_TOPO_UNLOCK(); done: if (cells != NULL) OF_prop_free(cells); return (rv); } int clk_get_by_ofw_index(device_t dev, phandle_t cnode, int idx, clk_t *clk) { return (clk_get_by_ofw_index_prop(dev, cnode, "clocks", idx, clk)); } int clk_get_by_ofw_name(device_t dev, phandle_t cnode, const char *name, clk_t *clk) { int rv, idx; if (cnode <= 0) cnode = ofw_bus_get_node(dev); if (cnode <= 0) { device_printf(dev, "%s called on not ofw based device\n", __func__); return (ENXIO); } rv = ofw_bus_find_string_index(cnode, "clock-names", name, &idx); if (rv != 0) return (rv); return (clk_get_by_ofw_index(dev, cnode, idx, clk)); } /* -------------------------------------------------------------------------- * * Support functions for parsing various clock related OFW things. */ /* * Get "clock-output-names" and (optional) "clock-indices" lists. * Both lists are allocated using M_OFWPROP specifier. * * Returns number of items or 0. */ int clk_parse_ofw_out_names(device_t dev, phandle_t node, const char ***out_names, uint32_t **indices) { int name_items, rv; *out_names = NULL; *indices = NULL; if (!OF_hasprop(node, "clock-output-names")) return (0); rv = ofw_bus_string_list_to_array(node, "clock-output-names", out_names); if (rv <= 0) return (0); name_items = rv; if (!OF_hasprop(node, "clock-indices")) return (name_items); rv = OF_getencprop_alloc_multi(node, "clock-indices", sizeof (uint32_t), (void **)indices); if (rv != name_items) { device_printf(dev, " Size of 'clock-output-names' and " "'clock-indices' differs\n"); OF_prop_free(*out_names); OF_prop_free(*indices); return (0); } return (name_items); } /* * Get output clock name for single output clock node. */ int clk_parse_ofw_clk_name(device_t dev, phandle_t node, const char **name) { const char **out_names; const char *tmp_name; int rv; *name = NULL; if (!OF_hasprop(node, "clock-output-names")) { tmp_name = ofw_bus_get_name(dev); if (tmp_name == NULL) return (ENXIO); *name = strdup(tmp_name, M_OFWPROP); return (0); } rv = ofw_bus_string_list_to_array(node, "clock-output-names", &out_names); if (rv != 1) { OF_prop_free(out_names); device_printf(dev, "Malformed 'clock-output-names' property\n"); return (ENXIO); } *name = strdup(out_names[0], M_OFWPROP); OF_prop_free(out_names); return (0); } #endif static int clkdom_sysctl(SYSCTL_HANDLER_ARGS) { struct clkdom *clkdom = arg1; struct clknode *clknode; struct sbuf *sb; int ret; sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); CLK_TOPO_SLOCK(); TAILQ_FOREACH(clknode, &clkdom->clknode_list, clkdom_link) { sbuf_printf(sb, "%s ", clknode->name); } CLK_TOPO_UNLOCK(); ret = sbuf_finish(sb); sbuf_delete(sb); return (ret); } static int clknode_sysctl(SYSCTL_HANDLER_ARGS) { struct clknode *clknode, *children; enum clknode_sysctl_type type = arg2; struct sbuf *sb; const char **parent_names; uint64_t freq; bool enable; int ret, i; clknode = arg1; sb = sbuf_new_for_sysctl(NULL, NULL, 512, req); if (sb == NULL) return (ENOMEM); CLK_TOPO_SLOCK(); switch (type) { case CLKNODE_SYSCTL_PARENT: if (clknode->parent) sbuf_printf(sb, "%s", clknode->parent->name); break; case CLKNODE_SYSCTL_PARENTS_LIST: parent_names = clknode_get_parent_names(clknode); for (i = 0; i < clknode->parent_cnt; i++) sbuf_printf(sb, "%s ", parent_names[i]); break; case CLKNODE_SYSCTL_CHILDREN_LIST: TAILQ_FOREACH(children, &(clknode->children), sibling_link) { sbuf_printf(sb, "%s ", children->name); } break; case CLKNODE_SYSCTL_FREQUENCY: ret = clknode_get_freq(clknode, &freq); if (ret == 0) sbuf_printf(sb, "%ju ", (uintmax_t)freq); else sbuf_printf(sb, "Error: %d ", ret); break; case CLKNODE_SYSCTL_GATE: ret = CLKNODE_GET_GATE(clknode, &enable); if (ret == 0) sbuf_printf(sb, enable ? "enabled": "disabled"); else if (ret == ENXIO) sbuf_printf(sb, "unimplemented"); else if (ret == ENOENT) sbuf_printf(sb, "unreadable"); else sbuf_printf(sb, "Error: %d ", ret); break; } CLK_TOPO_UNLOCK(); ret = sbuf_finish(sb); sbuf_delete(sb); return (ret); } diff --git a/sys/dev/extres/clk/clk.h b/sys/dev/clk/clk.h similarity index 98% rename from sys/dev/extres/clk/clk.h rename to sys/dev/clk/clk.h index caface7bc765..4702e8741a67 100644 --- a/sys/dev/extres/clk/clk.h +++ b/sys/dev/clk/clk.h @@ -1,155 +1,156 @@ /*- * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ -#ifndef _DEV_EXTRES_CLK_H_ -#define _DEV_EXTRES_CLK_H_ +#ifndef _DEV_CLK_H_ +#define _DEV_CLK_H_ + #include "opt_platform.h" #include #ifdef FDT #include #endif #include "clknode_if.h" #define CLKNODE_IDX_NONE -1 /* Not-selected index */ /* clknode flags. */ #define CLK_NODE_STATIC_STRINGS 0x00000001 /* Static name strings */ #define CLK_NODE_GLITCH_FREE 0x00000002 /* Freq can change w/o stop */ #define CLK_NODE_CANNOT_STOP 0x00000004 /* Cannot be disabled */ #define CLK_NODE_LINKED 0x00000008 /* Is linked clock */ #define CLK_NODE_REGISTERED 0x00000020 /* Is already registered */ /* Flags passed to clk_set_freq() and clknode_set_freq(). */ #define CLK_SET_ROUND(x) ((x) & (CLK_SET_ROUND_UP | CLK_SET_ROUND_DOWN)) #define CLK_SET_ROUND_EXACT 0 #define CLK_SET_ROUND_UP 0x00000001 #define CLK_SET_ROUND_DOWN 0x00000002 #define CLK_SET_ROUND_MULTIPLE 0x00000004 #define CLK_SET_ROUND_ANY (CLK_SET_ROUND_UP | CLK_SET_ROUND_DOWN) #define CLK_SET_USER_MASK 0x0000FFFF #define CLK_SET_DRYRUN 0x00010000 typedef struct clk *clk_t; /* Initialization parameters for clocknode creation. */ struct clknode_init_def { const char *name; intptr_t id; const char **parent_names; int parent_cnt; int flags; }; /* * Shorthands for constructing method tables. */ #define CLKNODEMETHOD KOBJMETHOD #define CLKNODEMETHOD_END KOBJMETHOD_END #define clknode_method_t kobj_method_t #define clknode_class_t kobj_class_t DECLARE_CLASS(clknode_class); /* * Clock domain functions. */ struct clkdom *clkdom_create(device_t dev); int clkdom_finit(struct clkdom *clkdom); void clkdom_dump(struct clkdom * clkdom); void clkdom_unlock(struct clkdom *clkdom); void clkdom_xlock(struct clkdom *clkdom); /* * Clock providers interface. */ struct clkdom *clkdom_get_by_dev(const device_t dev); struct clknode *clknode_create(struct clkdom *clkdom, clknode_class_t clknode_class, const struct clknode_init_def *def); struct clknode *clknode_register(struct clkdom *cldom, struct clknode *clk); #ifdef FDT typedef int clknode_ofw_mapper_func(struct clkdom *clkdom, uint32_t ncells, phandle_t *cells, struct clknode **clk); void clkdom_set_ofw_mapper(struct clkdom *clkdom, clknode_ofw_mapper_func *cmp); #endif void clknode_init_parent_idx(struct clknode *clknode, int idx); int clknode_set_parent_by_idx(struct clknode *clk, int idx); int clknode_set_parent_by_name(struct clknode *clk, const char *name); const char *clknode_get_name(struct clknode *clk); const char **clknode_get_parent_names(struct clknode *clk); int clknode_get_parents_num(struct clknode *clk); int clknode_get_parent_idx(struct clknode *clk); struct clknode *clknode_get_parent(struct clknode *clk); int clknode_get_flags(struct clknode *clk); void *clknode_get_softc(struct clknode *clk); device_t clknode_get_device(struct clknode *clk); struct clknode *clknode_find_by_name(const char *name); struct clknode *clknode_find_by_id(struct clkdom *clkdom, intptr_t id); int clknode_get_freq(struct clknode *clknode, uint64_t *freq); int clknode_set_freq(struct clknode *clknode, uint64_t freq, int flags, int enablecnt); int clknode_test_freq(struct clknode *clknode, uint64_t freq, int flags, int enablecnt, uint64_t *out_freq); int clknode_enable(struct clknode *clknode); int clknode_disable(struct clknode *clknode); int clknode_stop(struct clknode *clknode, int depth); /* * Clock consumers interface. */ int clk_get_by_name(device_t dev, const char *name, clk_t *clk); int clk_get_by_id(device_t dev, struct clkdom *clkdom, intptr_t id, clk_t *clk); int clk_release(clk_t clk); int clk_get_freq(clk_t clk, uint64_t *freq); int clk_set_freq(clk_t clk, uint64_t freq, int flags); int clk_test_freq(clk_t clk, uint64_t freq, int flags); int clk_enable(clk_t clk); int clk_disable(clk_t clk); int clk_stop(clk_t clk); int clk_get_parent(clk_t clk, clk_t *parent); int clk_set_parent_by_clk(clk_t clk, clk_t parent); const char *clk_get_name(clk_t clk); static inline uint64_t clk_freq_diff(uint64_t x, uint64_t y) { return (x >= y ? x - y : y - x); } #ifdef FDT int clk_set_assigned(device_t dev, phandle_t node); int clk_get_by_ofw_index(device_t dev, phandle_t node, int idx, clk_t *clk); int clk_get_by_ofw_index_prop(device_t dev, phandle_t cnode, const char *prop, int idx, clk_t *clk); int clk_get_by_ofw_name(device_t dev, phandle_t node, const char *name, clk_t *clk); int clk_parse_ofw_out_names(device_t dev, phandle_t node, const char ***out_names, uint32_t **indices); int clk_parse_ofw_clk_name(device_t dev, phandle_t node, const char **name); #endif -#endif /* _DEV_EXTRES_CLK_H_ */ +#endif /* _DEV_CLK_H_ */ diff --git a/sys/dev/extres/clk/clk_bus.c b/sys/dev/clk/clk_bus.c similarity index 100% rename from sys/dev/extres/clk/clk_bus.c rename to sys/dev/clk/clk_bus.c diff --git a/sys/dev/extres/clk/clk_div.c b/sys/dev/clk/clk_div.c similarity index 99% rename from sys/dev/extres/clk/clk_div.c rename to sys/dev/clk/clk_div.c index ab65e2bae2d2..b43136b7b4f0 100644 --- a/sys/dev/extres/clk/clk_div.c +++ b/sys/dev/clk/clk_div.c @@ -1,262 +1,262 @@ /*- * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include -#include +#include #include "clkdev_if.h" #define WR4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define RD4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define MD4(_clk, off, clr, set ) \ CLKDEV_MODIFY_4(clknode_get_device(_clk), off, clr, set) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) static int clknode_div_init(struct clknode *clk, device_t dev); static int clknode_div_recalc(struct clknode *clk, uint64_t *req); static int clknode_div_set_freq(struct clknode *clknode, uint64_t fin, uint64_t *fout, int flag, int *stop); struct clknode_div_sc { struct mtx *mtx; struct resource *mem_res; uint32_t offset; uint32_t i_shift; uint32_t i_mask; uint32_t i_width; uint32_t f_shift; uint32_t f_mask; uint32_t f_width; int div_flags; uint32_t divider; /* in natural form */ struct clk_div_table *div_table; }; static clknode_method_t clknode_div_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, clknode_div_init), CLKNODEMETHOD(clknode_recalc_freq, clknode_div_recalc), CLKNODEMETHOD(clknode_set_freq, clknode_div_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(clknode_div, clknode_div_class, clknode_div_methods, sizeof(struct clknode_div_sc), clknode_class); static uint32_t clknode_div_table_get_divider(struct clknode_div_sc *sc, uint32_t divider) { struct clk_div_table *table; if (!(sc->div_flags & CLK_DIV_WITH_TABLE)) return (divider); for (table = sc->div_table; table->divider != 0; table++) if (table->value == sc->divider) return (table->divider); return (0); } static int clknode_div_table_get_value(struct clknode_div_sc *sc, uint32_t *divider) { struct clk_div_table *table; if (!(sc->div_flags & CLK_DIV_WITH_TABLE)) return (0); for (table = sc->div_table; table->divider != 0; table++) if (table->divider == *divider) { *divider = table->value; return (0); } return (ENOENT); } static int clknode_div_init(struct clknode *clk, device_t dev) { uint32_t reg; struct clknode_div_sc *sc; uint32_t i_div, f_div; int rv; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); rv = RD4(clk, sc->offset, ®); DEVICE_UNLOCK(clk); if (rv != 0) return (rv); i_div = (reg >> sc->i_shift) & sc->i_mask; if (!(sc->div_flags & CLK_DIV_WITH_TABLE) && !(sc->div_flags & CLK_DIV_ZERO_BASED)) i_div++; f_div = (reg >> sc->f_shift) & sc->f_mask; sc->divider = i_div << sc->f_width | f_div; sc->divider = clknode_div_table_get_divider(sc, sc->divider); if (sc->divider == 0) panic("%s: divider is zero!\n", clknode_get_name(clk)); clknode_init_parent_idx(clk, 0); return(0); } static int clknode_div_recalc(struct clknode *clk, uint64_t *freq) { struct clknode_div_sc *sc; sc = clknode_get_softc(clk); if (sc->divider == 0) { printf("%s: %s divider is zero!\n", clknode_get_name(clk), __func__); *freq = 0; return(EINVAL); } *freq = (*freq << sc->f_width) / sc->divider; return (0); } static int clknode_div_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout, int flags, int *stop) { struct clknode_div_sc *sc; uint64_t divider, _fin, _fout; uint32_t reg, i_div, f_div, hw_i_div; int rv; sc = clknode_get_softc(clk); /* For fractional divider. */ _fin = fin << sc->f_width; divider = (_fin + *fout / 2) / *fout; _fout = _fin / divider; /* Rounding. */ if ((flags & CLK_SET_ROUND_UP) && (*fout < _fout)) divider--; else if ((flags & CLK_SET_ROUND_DOWN) && (*fout > _fout)) divider++; /* Break divider into integer and fractional parts. */ i_div = divider >> sc->f_width; f_div = divider & sc->f_mask; if (i_div == 0) { printf("%s: %s integer divider is zero!\n", clknode_get_name(clk), __func__); return(EINVAL); } *stop = 1; hw_i_div = i_div; if (sc->div_flags & CLK_DIV_WITH_TABLE) { if (clknode_div_table_get_value(sc, &hw_i_div) != 0) return (ERANGE); } else { if (!(sc->div_flags & CLK_DIV_ZERO_BASED)) hw_i_div--; if (i_div > sc->i_mask) { /* XXX Pass to parent or return error? */ printf("%s: %s integer divider is too big: %u\n", clknode_get_name(clk), __func__, i_div); hw_i_div = sc->i_mask; *stop = 0; } i_div = hw_i_div; if (!(sc->div_flags & CLK_DIV_ZERO_BASED)) i_div++; } divider = i_div << sc->f_width | f_div; if ((flags & CLK_SET_DRYRUN) == 0) { if ((*stop != 0) && ((flags & (CLK_SET_ROUND_UP | CLK_SET_ROUND_DOWN)) == 0) && (*fout != (_fin / divider))) return (ERANGE); DEVICE_LOCK(clk); rv = MD4(clk, sc->offset, (sc->i_mask << sc->i_shift) | (sc->f_mask << sc->f_shift), (hw_i_div << sc->i_shift) | (f_div << sc->f_shift)); if (rv != 0) { DEVICE_UNLOCK(clk); return (rv); } RD4(clk, sc->offset, ®); DEVICE_UNLOCK(clk); sc->divider = divider; } *fout = _fin / divider; return (0); } int clknode_div_register(struct clkdom *clkdom, struct clk_div_def *clkdef) { struct clknode *clk; struct clknode_div_sc *sc; clk = clknode_create(clkdom, &clknode_div_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->offset = clkdef->offset; sc->i_shift = clkdef->i_shift; sc->i_width = clkdef->i_width; sc->i_mask = (1 << clkdef->i_width) - 1; sc->f_shift = clkdef->f_shift; sc->f_width = clkdef->f_width; sc->f_mask = (1 << clkdef->f_width) - 1; sc->div_flags = clkdef->div_flags; sc->div_table = clkdef->div_table; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/extres/clk/clk_div.h b/sys/dev/clk/clk_div.h similarity index 94% rename from sys/dev/extres/clk/clk_div.h rename to sys/dev/clk/clk_div.h index 7db3f5885b1f..328fc40fec8b 100644 --- a/sys/dev/extres/clk/clk_div.h +++ b/sys/dev/clk/clk_div.h @@ -1,53 +1,53 @@ /*- * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ -#ifndef _DEV_EXTRES_CLK_DIV_H_ -#define _DEV_EXTRES_CLK_DIV_H_ +#ifndef _DEV_CLK_DIV_H_ +#define _DEV_CLK_DIV_H_ -#include +#include #define CLK_DIV_ZERO_BASED 0x0001 /* Zero based divider. */ #define CLK_DIV_WITH_TABLE 0x0002 /* Table to lookup the real value */ struct clk_div_table { uint32_t value; uint32_t divider; }; struct clk_div_def { struct clknode_init_def clkdef; uint32_t offset; /* Divider register offset */ uint32_t i_shift; /* Pos of div bits in reg */ uint32_t i_width; /* Width of div bit field */ uint32_t f_shift; /* Fractional divide bits, */ uint32_t f_width; /* set to 0 for int divider */ int div_flags; /* Divider-specific flags */ struct clk_div_table *div_table; /* Divider table */ }; int clknode_div_register(struct clkdom *clkdom, struct clk_div_def *clkdef); -#endif /*_DEV_EXTRES_CLK_DIV_H_*/ +#endif /*_DEV_CLK_DIV_H_*/ diff --git a/sys/dev/extres/clk/clk_fixed.c b/sys/dev/clk/clk_fixed.c similarity index 99% rename from sys/dev/extres/clk/clk_fixed.c rename to sys/dev/clk/clk_fixed.c index e5ca6b31d000..647ca6a05568 100644 --- a/sys/dev/extres/clk/clk_fixed.c +++ b/sys/dev/clk/clk_fixed.c @@ -1,288 +1,288 @@ /*- * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #endif -#include +#include #define CLK_TYPE_FIXED 1 #define CLK_TYPE_FIXED_FACTOR 2 static int clknode_fixed_init(struct clknode *clk, device_t dev); static int clknode_fixed_recalc(struct clknode *clk, uint64_t *freq); static int clknode_fixed_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout, int flags, int *stop); struct clknode_fixed_sc { int fixed_flags; uint64_t freq; uint32_t mult; uint32_t div; }; static clknode_method_t clknode_fixed_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, clknode_fixed_init), CLKNODEMETHOD(clknode_recalc_freq, clknode_fixed_recalc), CLKNODEMETHOD(clknode_set_freq, clknode_fixed_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(clknode_fixed, clknode_fixed_class, clknode_fixed_methods, sizeof(struct clknode_fixed_sc), clknode_class); static int clknode_fixed_init(struct clknode *clk, device_t dev) { struct clknode_fixed_sc *sc; sc = clknode_get_softc(clk); if (sc->freq == 0) clknode_init_parent_idx(clk, 0); return(0); } static int clknode_fixed_recalc(struct clknode *clk, uint64_t *freq) { struct clknode_fixed_sc *sc; sc = clknode_get_softc(clk); if ((sc->mult != 0) && (sc->div != 0)) *freq = (*freq / sc->div) * sc->mult; else *freq = sc->freq; return (0); } static int clknode_fixed_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout, int flags, int *stop) { struct clknode_fixed_sc *sc; sc = clknode_get_softc(clk); if (sc->mult == 0 || sc->div == 0) { /* Fixed frequency clock. */ *stop = 1; if (*fout != sc->freq) return (ERANGE); return (0); } /* Fixed factor clock. */ *stop = 0; *fout = (*fout / sc->mult) * sc->div; return (0); } int clknode_fixed_register(struct clkdom *clkdom, struct clk_fixed_def *clkdef) { struct clknode *clk; struct clknode_fixed_sc *sc; clk = clknode_create(clkdom, &clknode_fixed_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->fixed_flags = clkdef->fixed_flags; sc->freq = clkdef->freq; sc->mult = clkdef->mult; sc->div = clkdef->div; clknode_register(clkdom, clk); return (0); } #ifdef FDT static struct ofw_compat_data compat_data[] = { {"fixed-clock", CLK_TYPE_FIXED}, {"fixed-factor-clock", CLK_TYPE_FIXED_FACTOR}, {NULL, 0}, }; struct clk_fixed_softc { device_t dev; struct clkdom *clkdom; }; static int clk_fixed_probe(device_t dev) { intptr_t clk_type; clk_type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; switch (clk_type) { case CLK_TYPE_FIXED: if (OF_hasprop(ofw_bus_get_node(dev), "clock-frequency") == 0) { device_printf(dev, "clock-fixed has no clock-frequency\n"); return (ENXIO); } device_set_desc(dev, "Fixed clock"); return (BUS_PROBE_DEFAULT); case CLK_TYPE_FIXED_FACTOR: device_set_desc(dev, "Fixed factor clock"); return (BUS_PROBE_DEFAULT); default: return (ENXIO); } } static int clk_fixed_init_fixed(struct clk_fixed_softc *sc, phandle_t node, struct clk_fixed_def *def) { uint32_t freq; int rv; def->clkdef.id = 1; rv = OF_getencprop(node, "clock-frequency", &freq, sizeof(freq)); if (rv <= 0) return (ENXIO); def->freq = freq; return (0); } static int clk_fixed_init_fixed_factor(struct clk_fixed_softc *sc, phandle_t node, struct clk_fixed_def *def) { int rv; clk_t parent; def->clkdef.id = 1; rv = OF_getencprop(node, "clock-mult", &def->mult, sizeof(def->mult)); if (rv <= 0) return (ENXIO); rv = OF_getencprop(node, "clock-div", &def->div, sizeof(def->div)); if (rv <= 0) return (ENXIO); /* Get name of parent clock */ rv = clk_get_by_ofw_index(sc->dev, 0, 0, &parent); if (rv != 0) return (ENXIO); def->clkdef.parent_names = malloc(sizeof(char *), M_OFWPROP, M_WAITOK); def->clkdef.parent_names[0] = clk_get_name(parent); def->clkdef.parent_cnt = 1; clk_release(parent); return (0); } static int clk_fixed_attach(device_t dev) { struct clk_fixed_softc *sc; intptr_t clk_type; phandle_t node; struct clk_fixed_def def; int rv; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); clk_type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; bzero(&def, sizeof(def)); if (clk_type == CLK_TYPE_FIXED) rv = clk_fixed_init_fixed(sc, node, &def); else if (clk_type == CLK_TYPE_FIXED_FACTOR) rv = clk_fixed_init_fixed_factor(sc, node, &def); else rv = ENXIO; if (rv != 0) { device_printf(sc->dev, "Cannot FDT parameters.\n"); goto fail; } rv = clk_parse_ofw_clk_name(dev, node, &def.clkdef.name); if (rv != 0) { device_printf(sc->dev, "Cannot parse clock name.\n"); goto fail; } sc->clkdom = clkdom_create(dev); KASSERT(sc->clkdom != NULL, ("Clock domain is NULL")); rv = clknode_fixed_register(sc->clkdom, &def); if (rv != 0) { device_printf(sc->dev, "Cannot register fixed clock.\n"); rv = ENXIO; goto fail; } rv = clkdom_finit(sc->clkdom); if (rv != 0) { device_printf(sc->dev, "Clk domain finit fails.\n"); rv = ENXIO; goto fail; } #ifdef CLK_DEBUG clkdom_dump(sc->clkdom); #endif OF_prop_free(__DECONST(char *, def.clkdef.name)); OF_prop_free(def.clkdef.parent_names); return (bus_generic_attach(dev)); fail: OF_prop_free(__DECONST(char *, def.clkdef.name)); OF_prop_free(def.clkdef.parent_names); return (rv); } static device_method_t clk_fixed_methods[] = { /* Device interface */ DEVMETHOD(device_probe, clk_fixed_probe), DEVMETHOD(device_attach, clk_fixed_attach), DEVMETHOD_END }; DEFINE_CLASS_0(clk_fixed, clk_fixed_driver, clk_fixed_methods, sizeof(struct clk_fixed_softc)); EARLY_DRIVER_MODULE(clk_fixed, simplebus, clk_fixed_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(clk_fixed, 1); #endif diff --git a/sys/dev/extres/clk/clk_fixed.h b/sys/dev/clk/clk_fixed.h similarity index 93% rename from sys/dev/extres/clk/clk_fixed.h rename to sys/dev/clk/clk_fixed.h index 103ca69126ba..56f0fcbbb449 100644 --- a/sys/dev/extres/clk/clk_fixed.h +++ b/sys/dev/clk/clk_fixed.h @@ -1,50 +1,50 @@ /*- * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ -#ifndef _DEV_EXTRES_CLK_FIXED_H_ -#define _DEV_EXTRES_CLK_FIXED_H_ +#ifndef _DEV_CLK_FIXED_H_ +#define _DEV_CLK_FIXED_H_ -#include +#include /* * A fixed clock can represent several different real-world objects, including * an oscillator with a fixed output frequency, a fixed divider (multiplier and * divisor must both be > 0), or a phase-fractional divider within a PLL * (however the code currently divides first, then multiplies, potentially * leading to different roundoff errors than the hardware PLL). */ struct clk_fixed_def { struct clknode_init_def clkdef; uint64_t freq; uint32_t mult; uint32_t div; int fixed_flags; }; int clknode_fixed_register(struct clkdom *clkdom, struct clk_fixed_def *clkdef); -#endif /*_DEV_EXTRES_CLK_FIXED_H_*/ +#endif /*_DEV_CLK_FIXED_H_*/ diff --git a/sys/dev/extres/clk/clk_gate.c b/sys/dev/clk/clk_gate.c similarity index 99% rename from sys/dev/extres/clk/clk_gate.c rename to sys/dev/clk/clk_gate.c index 1ed9987c0b33..59c65d0015fe 100644 --- a/sys/dev/extres/clk/clk_gate.c +++ b/sys/dev/clk/clk_gate.c @@ -1,138 +1,138 @@ /*- * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include -#include +#include #include "clkdev_if.h" #define WR4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define RD4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define MD4(_clk, off, clr, set ) \ CLKDEV_MODIFY_4(clknode_get_device(_clk), off, clr, set) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) static int clknode_gate_init(struct clknode *clk, device_t dev); static int clknode_gate_set_gate(struct clknode *clk, bool enable); static int clknode_gate_get_gate(struct clknode *clk, bool *enable); struct clknode_gate_sc { uint32_t offset; uint32_t shift; uint32_t mask; uint32_t on_value; uint32_t off_value; int gate_flags; }; static clknode_method_t clknode_gate_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, clknode_gate_init), CLKNODEMETHOD(clknode_set_gate, clknode_gate_set_gate), CLKNODEMETHOD(clknode_get_gate, clknode_gate_get_gate), CLKNODEMETHOD_END }; DEFINE_CLASS_1(clknode_gate, clknode_gate_class, clknode_gate_methods, sizeof(struct clknode_gate_sc), clknode_class); static int clknode_gate_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return(0); } static int clknode_gate_set_gate(struct clknode *clk, bool enable) { uint32_t reg; struct clknode_gate_sc *sc; int rv; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); rv = MD4(clk, sc->offset, sc->mask << sc->shift, (enable ? sc->on_value : sc->off_value) << sc->shift); if (rv != 0) { DEVICE_UNLOCK(clk); return (rv); } RD4(clk, sc->offset, ®); DEVICE_UNLOCK(clk); return(0); } static int clknode_gate_get_gate(struct clknode *clk, bool *enabled) { uint32_t reg; struct clknode_gate_sc *sc; int rv; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); rv = RD4(clk, sc->offset, ®); DEVICE_UNLOCK(clk); if (rv != 0) return (rv); reg = (reg >> sc->shift) & sc->mask; *enabled = reg == sc->on_value; return(0); } int clknode_gate_register(struct clkdom *clkdom, struct clk_gate_def *clkdef) { struct clknode *clk; struct clknode_gate_sc *sc; clk = clknode_create(clkdom, &clknode_gate_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->offset = clkdef->offset; sc->shift = clkdef->shift; sc->mask = clkdef->mask; sc->on_value = clkdef->on_value; sc->off_value = clkdef->off_value; sc->gate_flags = clkdef->gate_flags; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/extres/clk/clk_gate.h b/sys/dev/clk/clk_gate.h similarity index 92% rename from sys/dev/extres/clk/clk_gate.h rename to sys/dev/clk/clk_gate.h index 2e9a5969d3e6..ea8de7380227 100644 --- a/sys/dev/extres/clk/clk_gate.h +++ b/sys/dev/clk/clk_gate.h @@ -1,44 +1,44 @@ /*- * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ -#ifndef _DEV_EXTRES_CLK_GATE_H_ -#define _DEV_EXTRES_CLK_GATE_H_ +#ifndef _DEV_CLK_GATE_H_ +#define _DEV_CLK_GATE_H_ -#include +#include struct clk_gate_def { struct clknode_init_def clkdef; uint32_t offset; uint32_t shift; uint32_t mask; uint32_t on_value; uint32_t off_value; int gate_flags; }; int clknode_gate_register(struct clkdom *clkdom, struct clk_gate_def *clkdef); -#endif /* _DEV_EXTRES_CLK_GATE_H_ */ +#endif /* _DEV_CLK_GATE_H_ */ diff --git a/sys/dev/extres/clk/clk_link.c b/sys/dev/clk/clk_link.c similarity index 99% rename from sys/dev/extres/clk/clk_link.c rename to sys/dev/clk/clk_link.c index dc82a093bab9..14debc48cdac 100644 --- a/sys/dev/extres/clk/clk_link.c +++ b/sys/dev/clk/clk_link.c @@ -1,117 +1,117 @@ /*- * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include -#include +#include static int clknode_link_init(struct clknode *clk, device_t dev); static int clknode_link_recalc(struct clknode *clk, uint64_t *freq); static int clknode_link_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout, int flags, int *stop); static int clknode_link_set_mux(struct clknode *clk, int idx); static int clknode_link_set_gate(struct clknode *clk, bool enable); static clknode_method_t clknode_link_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, clknode_link_init), CLKNODEMETHOD(clknode_recalc_freq, clknode_link_recalc), CLKNODEMETHOD(clknode_set_freq, clknode_link_set_freq), CLKNODEMETHOD(clknode_set_gate, clknode_link_set_gate), CLKNODEMETHOD(clknode_set_mux, clknode_link_set_mux), CLKNODEMETHOD_END }; DEFINE_CLASS_1(clknode_link, clknode_link_class, clknode_link_methods, 0, clknode_class); static int clknode_link_init(struct clknode *clk, device_t dev) { return(0); } static int clknode_link_recalc(struct clknode *clk, uint64_t *freq) { printf("%s: Attempt to use unresolved linked clock: %s\n", __func__, clknode_get_name(clk)); return (EBADF); } static int clknode_link_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout, int flags, int *stop) { printf("%s: Attempt to use unresolved linked clock: %s\n", __func__, clknode_get_name(clk)); return (EBADF); } static int clknode_link_set_mux(struct clknode *clk, int idx) { printf("%s: Attempt to use unresolved linked clock: %s\n", __func__, clknode_get_name(clk)); return (EBADF); } static int clknode_link_set_gate(struct clknode *clk, bool enable) { printf("%s: Attempt to use unresolved linked clock: %s\n", __func__, clknode_get_name(clk)); return (EBADF); } int clknode_link_register(struct clkdom *clkdom, struct clk_link_def *clkdef) { struct clknode *clk; struct clknode_init_def tmp; tmp = clkdef->clkdef; tmp.flags |= CLK_NODE_LINKED; clk = clknode_create(clkdom, &clknode_link_class, &tmp); if (clk == NULL) return (1); clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/extres/clk/clk_link.h b/sys/dev/clk/clk_link.h similarity index 92% rename from sys/dev/extres/clk/clk_link.h rename to sys/dev/clk/clk_link.h index 1591e8166794..9610f842abcf 100644 --- a/sys/dev/extres/clk/clk_link.h +++ b/sys/dev/clk/clk_link.h @@ -1,45 +1,45 @@ /*- * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ -#ifndef _DEV_EXTRES_CLK_LINK_H_ -#define _DEV_EXTRES_CLK_LINK_H_ +#ifndef _DEV_CLK_LINK_H_ +#define _DEV_CLK_LINK_H_ -#include +#include /* * A linked clock is used as placeholder for not yet available clock. * It will be replaced by equally named clock from other domain, created * in future stage of system initialization. */ struct clk_link_def { struct clknode_init_def clkdef; }; int clknode_link_register(struct clkdom *clkdom, struct clk_link_def *clkdef); -#endif /*_DEV_EXTRES_CLK_LINK_H_*/ +#endif /*_DEV_CLK_LINK_H_*/ diff --git a/sys/dev/extres/clk/clk_mux.c b/sys/dev/clk/clk_mux.c similarity index 99% rename from sys/dev/extres/clk/clk_mux.c rename to sys/dev/clk/clk_mux.c index bfdf2ba028f0..279adddae42c 100644 --- a/sys/dev/extres/clk/clk_mux.c +++ b/sys/dev/clk/clk_mux.c @@ -1,130 +1,130 @@ /*- * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include -#include +#include #include "clkdev_if.h" #define WR4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define RD4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define MD4(_clk, off, clr, set ) \ CLKDEV_MODIFY_4(clknode_get_device(_clk), off, clr, set) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) static int clknode_mux_init(struct clknode *clk, device_t dev); static int clknode_mux_set_mux(struct clknode *clk, int idx); struct clknode_mux_sc { uint32_t offset; uint32_t shift; uint32_t mask; int mux_flags; }; static clknode_method_t clknode_mux_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, clknode_mux_init), CLKNODEMETHOD(clknode_set_mux, clknode_mux_set_mux), CLKNODEMETHOD_END }; DEFINE_CLASS_1(clknode_mux, clknode_mux_class, clknode_mux_methods, sizeof(struct clknode_mux_sc), clknode_class); static int clknode_mux_init(struct clknode *clk, device_t dev) { uint32_t reg; struct clknode_mux_sc *sc; int rv; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); rv = RD4(clk, sc->offset, ®); DEVICE_UNLOCK(clk); if (rv != 0) { return (rv); } reg = (reg >> sc->shift) & sc->mask; clknode_init_parent_idx(clk, reg); return(0); } static int clknode_mux_set_mux(struct clknode *clk, int idx) { uint32_t reg; struct clknode_mux_sc *sc; int rv; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); rv = MD4(clk, sc->offset, sc->mask << sc->shift, (idx & sc->mask) << sc->shift); if (rv != 0) { DEVICE_UNLOCK(clk); return (rv); } RD4(clk, sc->offset, ®); DEVICE_UNLOCK(clk); return(0); } int clknode_mux_register(struct clkdom *clkdom, struct clk_mux_def *clkdef) { struct clknode *clk; struct clknode_mux_sc *sc; clk = clknode_create(clkdom, &clknode_mux_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->offset = clkdef->offset; sc->shift = clkdef->shift; sc->mask = (1 << clkdef->width) - 1; sc->mux_flags = clkdef->mux_flags; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/extres/clk/clk_mux.h b/sys/dev/clk/clk_mux.h similarity index 92% rename from sys/dev/extres/clk/clk_mux.h rename to sys/dev/clk/clk_mux.h index ff5bbaa54b20..4bfa2c829b4a 100644 --- a/sys/dev/extres/clk/clk_mux.h +++ b/sys/dev/clk/clk_mux.h @@ -1,41 +1,42 @@ /*- * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ -#ifndef _DEV_EXTRESF_CLK_MUX_H_ -#define _DEV_EXTRESF_CLK_MUX_H_ -#include +#ifndef _DEV_CLK_MUX_H_ +#define _DEV_CLK_MUX_H_ + +#include struct clk_mux_def { struct clknode_init_def clkdef; uint32_t offset; uint32_t shift; uint32_t width; int mux_flags; }; int clknode_mux_register(struct clkdom *clkdom, struct clk_mux_def *clkdef); -#endif /* _DEV_EXTRESF_CLK_MUX_H_ */ +#endif /* _DEV_CLK_MUX_H_ */ diff --git a/sys/dev/extres/clk/clkdev_if.m b/sys/dev/clk/clkdev_if.m similarity index 100% rename from sys/dev/extres/clk/clkdev_if.m rename to sys/dev/clk/clkdev_if.m diff --git a/sys/dev/extres/clk/clknode_if.m b/sys/dev/clk/clknode_if.m similarity index 100% rename from sys/dev/extres/clk/clknode_if.m rename to sys/dev/clk/clknode_if.m diff --git a/sys/dev/clk/rockchip/rk3288_cru.c b/sys/dev/clk/rockchip/rk3288_cru.c index 354a293ff7f2..9c8ff18ce01c 100644 --- a/sys/dev/clk/rockchip/rk3288_cru.c +++ b/sys/dev/clk/rockchip/rk3288_cru.c @@ -1,919 +1,919 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include #include #define CRU_SOFTRST_SIZE 12 #define CRU_APLL_CON(x) (0x000 + (x) * 0x4) #define CRU_DPLL_CON(x) (0x010 + (x) * 0x4) #define CRU_CPLL_CON(x) (0x020 + (x) * 0x4) #define CRU_GPLL_CON(x) (0x030 + (x) * 0x4) #define CRU_NPLL_CON(x) (0x040 + (x) * 0x4) #define CRU_MODE_CON 0x050 #define CRU_CLKSEL_CON(x) (0x060 + (x) * 0x4) #define CRU_CLKGATE_CON(x) (0x160 + (x) * 0x4) #define CRU_GLB_SRST_FST_VALUE 0x1b0 #define CRU_GLB_SRST_SND_VALUE 0x1b4 #define CRU_SOFTRST_CON(x) (0x1b8 + (x) * 0x4) #define CRU_MISC_CON 0x1e8 #define CRU_GLB_CNT_TH 0x1ec #define CRU_GLB_RST_CON 0x1f0 #define CRU_GLB_RST_ST 0x1f8 #define CRU_SDMMC_CON0 0x200 #define CRU_SDMMC_CON1 0x204 #define CRU_SDIO0_CON0 0x208 #define CRU_SDIO0_CON1 0x20c #define CRU_SDIO1_CON0 0x210 #define CRU_SDIO1_CON1 0x214 #define CRU_EMMC_CON0 0x218 #define CRU_EMMC_CON1 0x21c /* GATES */ #define GATE(_idx, _clkname, _pname, _o, _s) \ { \ .id = _idx, \ .name = _clkname, \ .parent_name = _pname, \ .offset = CRU_CLKGATE_CON(_o), \ .shift = _s, \ } static struct rk_cru_gate rk3288_gates[] = { /* CRU_CLKGATE_CON0 */ GATE(0, "sclk_acc_efuse", "xin24m", 0, 12), GATE(0, "cpll_aclk_cpu", "cpll", 0, 11), GATE(0, "gpll_aclk_cpu", "gpll", 0, 10), GATE(0, "gpll_ddr", "gpll", 0, 9), GATE(0, "dpll_ddr", "dpll", 0, 8), GATE(0, "aclk_bus_2pmu", "aclk_cpu_pre", 0, 7), GATE(PCLK_CPU, "pclk_cpu", "pclk_cpu_s", 0, 5), GATE(HCLK_CPU, "hclk_cpu", "hclk_cpu_s", 0, 4), GATE(ACLK_CPU, "aclk_cpu", "aclk_cpu_pre", 0, 3), GATE(0, "gpll_core", "gpll", 0, 2), GATE(0, "apll_core", "apll", 0, 1), /* CRU_CLKGATE_CON1 */ GATE(0, "uart3_frac", "uart3_frac_s", 1, 15), GATE(0, "uart3_src", "uart3_src_s", 1, 14), GATE(0, "uart2_frac", "uart2_frac_s", 1, 13), GATE(0, "uart2_src", "uart2_src_s", 1, 12), GATE(0, "uart1_frac", "uart1_frac_s", 1, 11), GATE(0, "uart1_src", "uart1_src_s", 1, 10), GATE(0, "uart0_frac", "uart0_frac_s", 1, 9), GATE(0, "uart0_src", "uart0_src_s", 1, 8), GATE(SCLK_TIMER5, "sclk_timer5", "xin24m", 1, 5), GATE(SCLK_TIMER4, "sclk_timer4", "xin24m", 1, 4), GATE(SCLK_TIMER3, "sclk_timer3", "xin24m", 1, 3), GATE(SCLK_TIMER2, "sclk_timer2", "xin24m", 1, 2), GATE(SCLK_TIMER1, "sclk_timer1", "xin24m", 1, 1), GATE(SCLK_TIMER0, "sclk_timer0", "xin24m", 1, 0), /* CRU_CLKGATE_CON2 */ GATE(0, "uart4_frac", "uart4_frac_s", 2, 13), GATE(0, "uart4_src", "uart4_src_s", 2, 12), GATE(SCLK_SPI2, "sclk_spi2", "sclk_spi2_s", 2, 11), GATE(SCLK_SPI1, "sclk_spi1", "sclk_spi1_s", 2, 10), GATE(SCLK_SPI0, "sclk_spi0", "sclk_spi0_s", 2, 9), GATE(SCLK_SARADC, "sclk_saradc", "sclk_saradc_s", 2, 8), GATE(SCLK_TSADC, "sclk_tsadc", "sclk_tsadc_s", 2, 7), GATE(0, "hsadc_src", "hsadc_src_s", 2, 6), GATE(0, "mac_pll_src", "mac_pll_src_s", 2, 5), GATE(PCLK_PERI, "pclk_peri", "pclk_peri_s", 2, 3), GATE(HCLK_PERI, "hclk_peri", "hclk_peri_s", 2, 2), GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", 2, 1), GATE(0, "aclk_peri_src", "aclk_peri_src_s", 2, 0), /* CRU_CLKGATE_CON3 */ GATE(SCLK_ISP_JPE, "sclk_isp_jpe", "sclk_isp_jpe_s", 3, 15), GATE(SCLK_ISP, "sclk_isp", "sclk_isp_s", 3, 14), GATE(SCLK_EDP, "sclk_edp", "sclk_edp_s", 3, 13), GATE(SCLK_EDP_24M, "sclk_edp_24m", "sclk_edp_24m_s", 3, 12), GATE(0, "aclk_vdpu", "aclk_vdpu_s", 3, 11), GATE(0, "hclk_vcodec_pre", "hclk_vcodec_pre_s", 3, 10), GATE(0, "aclk_vepu", "aclk_vepu_s", 3, 9), GATE(0, "vip_src", "vip_src_s", 3, 7), /* 6 - Not in TRM, sclk_hsicphy480m in Linux */ GATE(0, "aclk_rga_pre", "aclk_rga_pre_s", 3, 5), GATE(SCLK_RGA, "sclk_rga", "sclk_rga_s", 3, 4), GATE(DCLK_VOP1, "dclk_vop1", "dclk_vop1_s", 3, 3), GATE(0, "aclk_vio1", "aclk_vio1_s", 3, 2), GATE(DCLK_VOP0, "dclk_vop0", "dclk_vop0_s", 3, 1), GATE(0, "aclk_vio0", "aclk_vio0_s", 3, 0), /* CRU_CLKGATE_CON4 */ /* 15 - Test clock generator */ GATE(0, "jtag", "ext_jtag", 4, 14), GATE(0, "sclk_ddrphy1", "ddrphy", 4, 13), GATE(0, "sclk_ddrphy0", "ddrphy", 4, 12), GATE(0, "sclk_tspout", "sclk_tspout_s", 4, 11), GATE(0, "sclk_tsp", "sclk_tsp_s", 4, 10), GATE(SCLK_SPDIF8CH, "sclk_spdif_8ch", "spdif_8ch_mux", 4, 9), GATE(0, "spdif_8ch_frac", "spdif_8ch_frac_s", 4, 8), GATE(0, "spdif_8ch_pre", "spdif_8ch_pre_s", 4, 7), GATE(SCLK_SPDIF, "sclk_spdif", "spdif_mux", 4, 6), GATE(0, "spdif_frac", "spdif_frac_s", 4, 5), GATE(0, "spdif_pre", "spdif_pre_s", 4, 4), GATE(SCLK_I2S0, "sclk_i2s0", "i2s_pre", 4, 3), GATE(0, "i2s_frac", "i2s_frac_s", 4, 2), GATE(0, "i2s_src", "i2s_src_s", 4, 1), GATE(SCLK_I2S0_OUT, "i2s0_clkout", "i2s0_clkout_s", 4, 1), /* CRU_CLKGATE_CON5 */ GATE(SCLK_MIPIDSI_24M, "sclk_mipidsi_24m", "xin24m", 5, 15), GATE(SCLK_USBPHY480M_SRC, "usbphy480m_src", "usbphy480m_src_s", 5, 14), GATE(SCLK_PS2C, "sclk_ps2c", "xin24m", 5, 13), GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 5, 12), GATE(SCLK_HDMI_CEC, "sclk_hdmi_cec", "xin32k", 5, 11), GATE(SCLK_PVTM_GPU, "sclk_pvtm_gpu", "xin24m", 5, 10), GATE(SCLK_PVTM_CORE, "sclk_pvtm_core", "xin24m", 5, 9), GATE(0, "pclk_pd_pmu", "pclk_pd_pmu_s", 5, 8), GATE(SCLK_GPU, "sclk_gpu", "sclk_gpu_s", 5, 7), GATE(SCLK_NANDC1, "sclk_nandc1", "sclk_nandc1_s", 5, 6), GATE(SCLK_NANDC0, "sclk_nandc0", "sclk_nandc0_s", 5, 5), GATE(SCLK_CRYPTO, "crypto", "crypto_s", 5, 4), GATE(SCLK_MACREF_OUT, "sclk_macref_out", "mac_clk", 5, 3), GATE(SCLK_MACREF, "sclk_macref", "mac_clk", 5, 2), GATE(SCLK_MAC_TX, "sclk_mac_tx", "mac_clk", 5, 1), GATE(SCLK_MAC_RX, "sclk_mac_rx", "mac_clk", 5, 0), /* CRU_CLKGATE_CON6 */ GATE(PCLK_I2C4, "pclk_i2c4", "pclk_peri", 6, 15), GATE(PCLK_I2C3, "pclk_i2c3", "pclk_peri", 6, 14), GATE(PCLK_I2C1, "pclk_i2c1", "pclk_peri", 6, 13), GATE(PCLK_UART4, "pclk_uart4", "pclk_peri", 6, 12), GATE(PCLK_UART3, "pclk_uart3", "pclk_peri", 6, 11), GATE(PCLK_UART1, "pclk_uart1", "pclk_peri", 6, 9), GATE(PCLK_UART0, "pclk_uart0", "pclk_peri", 6, 8), GATE(PCLK_PS2C, "pclk_ps2c", "pclk_peri", 6, 7), GATE(PCLK_SPI2, "pclk_spi2", "pclk_peri", 6, 6), GATE(PCLK_SPI1, "pclk_spi1", "pclk_peri", 6, 5), GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 6, 4), GATE(ACLK_DMAC2, "aclk_dmac2", "aclk_peri", 6, 3), GATE(0, "aclk_peri_axi_matrix", "aclk_peri", 6, 2), GATE(0, "pclk_peri_matrix", "pclk_peri", 6, 1), GATE(0, "hclk_peri_matrix", "hclk_peri", 6, 0), /* CRU_CLKGATE_CON7 */ GATE(HCLK_NANDC1, "hclk_nandc1", "hclk_peri", 7, 15), GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 7, 14), GATE(0, "hclk_mem", "hclk_peri", 7, 13), GATE(0, "hclk_emem", "hclk_peri", 7, 12), GATE(0, "aclk_peri_niu", "aclk_peri", 7, 11), GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", 7, 10), GATE(0, "hclk_usb_peri", "hclk_peri", 7, 9), /* 8 - Not in TRM - hclk_hsic in Linux */ GATE(HCLK_USBHOST1, "hclk_host1", "hclk_peri", 7, 7), GATE(HCLK_USBHOST0, "hclk_host0", "hclk_peri", 7, 6), GATE(0, "pmu_hclk_otg0", "hclk_peri", 7, 5), GATE(HCLK_OTG0, "hclk_otg0", "hclk_peri", 7, 4), GATE(PCLK_SIM, "pclk_sim", "pclk_peri", 7, 3), GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 7, 2), GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 7, 1), GATE(PCLK_I2C5, "pclk_i2c5", "pclk_peri", 7, 0), /* CRU_CLKGATE_CON8 */ GATE(ACLK_MMU, "aclk_mmu", "aclk_peri", 8, 12), /* 11 - 9 27m_tsp, hsadc_1_tsp, hsadc_1_tsp */ GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 8, 8), GATE(HCLK_HSADC, "hclk_hsadc", "hclk_peri", 8, 7), GATE(HCLK_EMMC, "hclk_emmc", "hclk_peri", 8, 6), GATE(HCLK_SDIO1, "hclk_sdio1", "hclk_peri", 8, 5), GATE(HCLK_SDIO0, "hclk_sdio0", "hclk_peri", 8, 4), GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 8, 3), GATE(HCLK_GPS, "hclk_gps", "aclk_peri", 8, 2), GATE(PCLK_GMAC, "pclk_gmac", "pclk_peri", 8, 1), GATE(ACLK_GMAC, "aclk_gmac", "aclk_peri", 8, 0), /* CRU_CLKGATE_CON9 */ GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_vcodec_pre", 9, 1), GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 9, 0), /* CRU_CLKGATE_CON10 */ GATE(PCLK_PUBL0, "pclk_publ0", "pclk_cpu", 10, 15), GATE(PCLK_DDRUPCTL0, "pclk_ddrupctl0", "pclk_cpu", 10, 14), GATE(0, "aclk_strc_sys", "aclk_cpu", 10, 13), GATE(ACLK_DMAC1, "aclk_dmac1", "aclk_cpu", 10, 12), GATE(HCLK_SPDIF8CH, "hclk_spdif_8ch", "hclk_cpu", 10, 11), GATE(HCLK_SPDIF, "hclk_spdif", "hclk_cpu", 10, 10), GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", 10, 9), GATE(HCLK_I2S0, "hclk_i2s0", "hclk_cpu", 10, 8), GATE(0, "sclk_intmem2", "aclk_cpu", 10, 7), GATE(0, "sclk_intmem1", "aclk_cpu", 10, 6), GATE(0, "sclk_intmem0", "aclk_cpu", 10, 5), GATE(0, "aclk_intmem", "aclk_cpu", 10, 4), GATE(PCLK_I2C2, "pclk_i2c2", "pclk_cpu", 10, 3), GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 10, 2), GATE(PCLK_TIMER, "pclk_timer", "pclk_cpu", 10, 1), GATE(PCLK_PWM, "pclk_pwm", "pclk_cpu", 10, 0), /* CRU_CLKGATE_CON11 */ GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", 11, 11), GATE(PCLK_EFUSE256, "pclk_efuse_256", "pclk_cpu", 11, 10), GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 11, 9), GATE(0, "aclk_ccp", "aclk_cpu", 11, 8), GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_cpu", 11, 7), GATE(ACLK_CRYPTO, "aclk_crypto", "aclk_cpu", 11, 6), GATE(0, "nclk_ddrupctl1", "ddrphy", 11, 5), GATE(0, "nclk_ddrupctl0", "ddrphy", 11, 4), GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 11, 3), GATE(PCLK_EFUSE1024, "pclk_efuse_1024", "pclk_cpu", 11, 2), GATE(PCLK_PUBL1, "pclk_publ1", "pclk_cpu", 11, 1), GATE(PCLK_DDRUPCTL1, "pclk_ddrupctl1", "pclk_cpu", 11, 0), /* CRU_CLKGATE_CON12 */ GATE(0, "pclk_core_niu", "pclk_dbg_pre", 12, 11), GATE(0, "cs_dbg", "pclk_dbg_pre", 12, 10), GATE(0, "pclk_dbg", "pclk_dbg_pre", 12, 9), GATE(0, "armcore0", "armcore0_s", 12, 8), GATE(0, "armcore1", "armcore1_s", 12, 7), GATE(0, "armcore2", "armcore2_s", 12, 6), GATE(0, "armcore3", "armcore3_s", 12, 5), GATE(0, "l2ram", "l2ram_s", 12, 4), GATE(0, "aclk_core_m0", "aclk_core_m0_s", 12, 3), GATE(0, "aclk_core_mp", "aclk_core_mp_s", 12, 2), GATE(0, "atclk", "atclk_s", 12, 1), GATE(0, "pclk_dbg_pre", "pclk_dbg_pre_s", 12, 0), /* CRU_CLKGATE_CON13 */ GATE(SCLK_HEVC_CORE, "sclk_hevc_core", "sclk_hevc_core_s", 13, 15), GATE(SCLK_HEVC_CABAC, "sclk_hevc_cabac", "sclk_hevc_cabac_s", 13, 14), GATE(ACLK_HEVC, "aclk_hevc", "aclk_hevc_s", 13, 13), GATE(0, "wii", "wifi_frac_s", 13, 12), GATE(SCLK_LCDC_PWM1, "sclk_lcdc_pwm1", "xin24m", 13, 11), GATE(SCLK_LCDC_PWM0, "sclk_lcdc_pwm0", "xin24m", 13, 10), /* 9 - Not in TRM - hsicphy12m_xin12m in Linux */ GATE(0, "c2c_host", "aclk_cpu_src", 13, 8), GATE(SCLK_OTG_ADP, "sclk_otg_adp", "xin32k", 13, 7), GATE(SCLK_OTGPHY2, "sclk_otgphy2", "xin24m", 13, 6), GATE(SCLK_OTGPHY1, "sclk_otgphy1", "xin24m", 13, 5), GATE(SCLK_OTGPHY0, "sclk_otgphy0", "xin24m", 13, 4), GATE(SCLK_EMMC, "sclk_emmc", "sclk_emmc_s", 13, 3), GATE(SCLK_SDIO1, "sclk_sdio1", "sclk_sdio1_s", 13, 2), GATE(SCLK_SDIO0, "sclk_sdio0", "sclk_sdio0_s", 13, 1), GATE(SCLK_SDMMC, "sclk_sdmmc", "sclk_sdmmc_s", 13, 0), /* CRU_CLKGATE_CON14 */ GATE(0, "pclk_alive_niu", "pclk_pd_alive", 14, 12), GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", 14, 11), GATE(PCLK_GPIO8, "pclk_gpio8", "pclk_pd_alive", 14, 8), GATE(PCLK_GPIO7, "pclk_gpio7", "pclk_pd_alive", 14, 7), GATE(PCLK_GPIO6, "pclk_gpio6", "pclk_pd_alive", 14, 6), GATE(PCLK_GPIO5, "pclk_gpio5", "pclk_pd_alive", 14, 5), GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_pd_alive", 14, 4), GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 14, 3), GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 14, 2), GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 14, 1), /* CRU_CLKGATE_CON15*/ GATE(HCLK_VIP, "hclk_vip", "hclk_vio", 15, 15), GATE(ACLK_VIP, "aclk_vip", "aclk_vio0", 15, 14), GATE(ACLK_RGA_NIU, "aclk_rga_niu", "aclk_rga_pre", 15, 13), GATE(ACLK_VIO1_NIU, "aclk_vio1_niu", "aclk_vio1", 15, 12), GATE(ACLK_VIO0_NIU, "aclk_vio0_niu", "aclk_vio0", 15, 11), GATE(HCLK_VIO_NIU, "hclk_vio_niu", "hclk_vio", 15, 10), GATE(HCLK_VIO_AHB_ARBI, "hclk_vio_ahb_arbi", "hclk_vio",15, 9), GATE(HCLK_VOP1, "hclk_vop1", "hclk_vio", 15, 8), GATE(ACLK_VOP1, "aclk_vop1", "aclk_vio1", 15, 7), GATE(HCLK_VOP0, "hclk_vop0", "hclk_vio", 15, 6), GATE(ACLK_VOP0, "aclk_vop0", "aclk_vio0", 15, 5), /* 4 - aclk_lcdc_iep */ GATE(HCLK_IEP, "hclk_iep", "hclk_vio", 15, 3), GATE(ACLK_IEP, "aclk_iep", "aclk_vio0", 15, 2), GATE(HCLK_RGA, "hclk_rga", "hclk_vio", 15, 1), GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 15, 0), /* CRU_CLKGATE_CON16 */ GATE(PCLK_VIO2_H2P, "pclk_vio2_h2p", "hclk_vio", 16, 11), GATE(HCLK_VIO2_H2P, "hclk_vio2_h2p", "hclk_vio", 16, 10), GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "hclk_vio", 16, 9), GATE(PCLK_EDP_CTRL, "pclk_edp_ctrl", "hclk_vio", 16, 8), GATE(PCLK_LVDS_PHY, "pclk_lvds_phy", "hclk_vio", 16, 7), GATE(PCLK_MIPI_CSI, "pclk_mipi_csi", "hclk_vio", 16, 6), GATE(PCLK_MIPI_DSI1, "pclk_mipi_dsi1", "hclk_vio", 16, 5), GATE(PCLK_MIPI_DSI0, "pclk_mipi_dsi0", "hclk_vio", 16, 4), GATE(PCLK_ISP_IN, "pclk_isp_in", "ext_isp", 16, 3), GATE(ACLK_ISP, "aclk_isp", "aclk_vio1", 16, 2), GATE(HCLK_ISP, "hclk_isp", "hclk_vio", 16, 1), GATE(0, "pclk_vip_in", "ext_vip", 16, 0), /* CRU_CLKGATE_CON17 */ GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 17, 4), GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", 17, 3), GATE(0, "pclk_pmu_niu", "pclk_pd_pmu", 17, 2), GATE(0, "pclk_intmem1", "pclk_pd_pmu", 17, 1), GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", 17, 0), /* CRU_CLKGATE_CON18 */ GATE(ACLK_GPU, "aclk_gpu", "sclk_gpu", 18, 0), }; /* * PLLs */ #define PLL_RATE_BA(_hz, _ref, _fb, _post, _ba) \ { \ .freq = _hz, \ .refdiv = _ref, \ .fbdiv = _fb, \ .postdiv1 = _post, \ .bwadj = _ba, \ } #define PLL_RATE(_mhz, _ref, _fb, _post) \ PLL_RATE_BA(_mhz, _ref, _fb, _post, ((_fb < 2) ? 1 : _fb >> 1)) static struct rk_clk_pll_rate rk3288_pll_rates[] = { PLL_RATE( 2208000000, 1, 92, 1), PLL_RATE( 2184000000, 1, 91, 1), PLL_RATE( 2160000000, 1, 90, 1), PLL_RATE( 2136000000, 1, 89, 1), PLL_RATE( 2112000000, 1, 88, 1), PLL_RATE( 2088000000, 1, 87, 1), PLL_RATE( 2064000000, 1, 86, 1), PLL_RATE( 2040000000, 1, 85, 1), PLL_RATE( 2016000000, 1, 84, 1), PLL_RATE( 1992000000, 1, 83, 1), PLL_RATE( 1968000000, 1, 82, 1), PLL_RATE( 1944000000, 1, 81, 1), PLL_RATE( 1920000000, 1, 80, 1), PLL_RATE( 1896000000, 1, 79, 1), PLL_RATE( 1872000000, 1, 78, 1), PLL_RATE( 1848000000, 1, 77, 1), PLL_RATE( 1824000000, 1, 76, 1), PLL_RATE( 1800000000, 1, 75, 1), PLL_RATE( 1776000000, 1, 74, 1), PLL_RATE( 1752000000, 1, 73, 1), PLL_RATE( 1728000000, 1, 72, 1), PLL_RATE( 1704000000, 1, 71, 1), PLL_RATE( 1680000000, 1, 70, 1), PLL_RATE( 1656000000, 1, 69, 1), PLL_RATE( 1632000000, 1, 68, 1), PLL_RATE( 1608000000, 1, 67, 1), PLL_RATE( 1560000000, 1, 65, 1), PLL_RATE( 1512000000, 1, 63, 1), PLL_RATE( 1488000000, 1, 62, 1), PLL_RATE( 1464000000, 1, 61, 1), PLL_RATE( 1440000000, 1, 60, 1), PLL_RATE( 1416000000, 1, 59, 1), PLL_RATE( 1392000000, 1, 58, 1), PLL_RATE( 1368000000, 1, 57, 1), PLL_RATE( 1344000000, 1, 56, 1), PLL_RATE( 1320000000, 1, 55, 1), PLL_RATE( 1296000000, 1, 54, 1), PLL_RATE( 1272000000, 1, 53, 1), PLL_RATE( 1248000000, 1, 52, 1), PLL_RATE( 1224000000, 1, 51, 1), PLL_RATE( 1200000000, 1, 50, 1), PLL_RATE( 1188000000, 2, 99, 1), PLL_RATE( 1176000000, 1, 49, 1), PLL_RATE( 1128000000, 1, 47, 1), PLL_RATE( 1104000000, 1, 46, 1), PLL_RATE( 1008000000, 1, 84, 2), PLL_RATE( 912000000, 1, 76, 2), PLL_RATE( 891000000, 8, 594, 2), PLL_RATE( 888000000, 1, 74, 2), PLL_RATE( 816000000, 1, 68, 2), PLL_RATE( 798000000, 2, 133, 2), PLL_RATE( 792000000, 1, 66, 2), PLL_RATE( 768000000, 1, 64, 2), PLL_RATE( 742500000, 8, 495, 2), PLL_RATE( 696000000, 1, 58, 2), PLL_RATE_BA( 621000000, 1, 207, 8, 1), PLL_RATE( 600000000, 1, 50, 2), PLL_RATE_BA( 594000000, 1, 198, 8, 1), PLL_RATE( 552000000, 1, 46, 2), PLL_RATE( 504000000, 1, 84, 4), PLL_RATE( 500000000, 3, 125, 2), PLL_RATE( 456000000, 1, 76, 4), PLL_RATE( 428000000, 1, 107, 6), PLL_RATE( 408000000, 1, 68, 4), PLL_RATE( 400000000, 3, 100, 2), PLL_RATE_BA( 394000000, 1, 197, 12, 1), PLL_RATE( 384000000, 2, 128, 4), PLL_RATE( 360000000, 1, 60, 4), PLL_RATE_BA( 356000000, 1, 178, 12, 1), PLL_RATE_BA( 324000000, 1, 189, 14, 1), PLL_RATE( 312000000, 1, 52, 4), PLL_RATE_BA( 308000000, 1, 154, 12, 1), PLL_RATE_BA( 303000000, 1, 202, 16, 1), PLL_RATE( 300000000, 1, 75, 6), PLL_RATE_BA( 297750000, 2, 397, 16, 1), PLL_RATE_BA( 293250000, 2, 391, 16, 1), PLL_RATE_BA( 292500000, 1, 195, 16, 1), PLL_RATE( 273600000, 1, 114, 10), PLL_RATE_BA( 273000000, 1, 182, 16, 1), PLL_RATE_BA( 270000000, 1, 180, 16, 1), PLL_RATE_BA( 266250000, 2, 355, 16, 1), PLL_RATE_BA( 256500000, 1, 171, 16, 1), PLL_RATE( 252000000, 1, 84, 8), PLL_RATE_BA( 250500000, 1, 167, 16, 1), PLL_RATE_BA( 243428571, 1, 142, 14, 1), PLL_RATE( 238000000, 1, 119, 12), PLL_RATE_BA( 219750000, 2, 293, 16, 1), PLL_RATE_BA( 216000000, 1, 144, 16, 1), PLL_RATE_BA( 213000000, 1, 142, 16, 1), PLL_RATE( 195428571, 1, 114, 14), PLL_RATE( 160000000, 1, 80, 12), PLL_RATE( 157500000, 1, 105, 16), PLL_RATE( 126000000, 1, 84, 16), PLL_RATE( 48000000, 1, 64, 32), {}, }; static struct rk_clk_armclk_rates rk3288_armclk_rates[] = { { 1800000000, 1}, { 1704000000, 1}, { 1608000000, 1}, { 1512000000, 1}, { 1416000000, 1}, { 1200000000, 1}, { 1008000000, 1}, { 816000000, 1}, { 696000000, 1}, { 600000000, 1}, { 408000000, 1}, { 312000000, 1}, { 216000000, 1}, { 126000000, 1}, }; /* Standard PLL. */ #define PLL(_id, _name, _base, _shift) \ { \ .type = RK3066_CLK_PLL, \ .clk.pll = &(struct rk_clk_pll_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = pll_src_p, \ .clkdef.parent_cnt = nitems(pll_src_p), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .base_offset = _base, \ .mode_reg = CRU_MODE_CON, \ .mode_shift = _shift, \ .rates = rk3288_pll_rates, \ }, \ } #define ARMDIV(_id, _name, _pn, _r, _o, _ds, _dw, _ms, _mw, _mp, _ap) \ { \ .type = RK_CLK_ARMCLK, \ .clk.armclk = &(struct rk_clk_armclk_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = _pn, \ .clkdef.parent_cnt = nitems(_pn), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .muxdiv_offset = CRU_CLKSEL_CON(_o), \ .mux_shift = _ms, \ .mux_width = _mw, \ .div_shift = _ds, \ .div_width = _dw, \ .main_parent = _mp, \ .alt_parent = _ap, \ .rates = _r, \ .nrates = nitems(_r), \ }, \ } PLIST(pll_src_p) = {"xin24m", "xin24m", "xin32k"}; PLIST(armclk_p)= {"apll_core", "gpll_core"}; PLIST(ddrphy_p) = {"dpll_ddr", "gpll_ddr"}; PLIST(aclk_cpu_p) = {"cpll_aclk_cpu", "gpll_aclk_cpu"}; PLIST(cpll_gpll_p) = {"cpll", "gpll"}; PLIST(npll_cpll_gpll_p) = {"npll", "cpll", "gpll"}; PLIST(cpll_gpll_npll_p) = {"cpll", "gpll", "npll"}; PLIST(cpll_gpll_usb480m_p)= {"cpll", "gpll", "usbphy480m_src"}; PLIST(cpll_gpll_usb480m_npll_p) = {"cpll", "gpll", "usbphy480m_src", "npll"}; PLIST(mmc_p) = {"cpll", "gpll", "xin24m", "xin24m"}; PLIST(i2s_pre_p) = {"i2s_src", "i2s_frac", "ext_i2s", "xin12m"}; PLIST(i2s_clkout_p) = {"i2s_pre", "xin12m"}; PLIST(spdif_p) = {"spdif_pre", "spdif_frac", "xin12m"}; PLIST(spdif_8ch_p) = {"spdif_8ch_pre", "spdif_8ch_frac", "xin12m"}; PLIST(uart0_p) = {"uart0_src", "uart0_frac", "xin24m"}; PLIST(uart1_p) = {"uart1_src", "uart1_frac", "xin24m"}; PLIST(uart2_p) = {"uart2_src", "uart2_frac", "xin24m"}; PLIST(uart3_p) = {"uart3_src", "uart3_frac", "xin24m"}; PLIST(uart4_p) = {"uart4_src", "uart4_frac", "xin24m"}; PLIST(vip_out_p) = {"vip_src", "xin24m"}; PLIST(mac_p) = {"mac_pll_src", "ext_gmac"}; PLIST(hsadcout_p) = {"hsadc_src", "ext_hsadc"}; PLIST(edp_24m_p) = {"ext_edp_24m", "xin24m"}; PLIST(tspout_p) = {"cpll", "gpll", "npll", "xin27m"}; PLIST(wifi_p) = {"cpll", "gpll"}; PLIST(usbphy480m_p) = {"sclk_otgphy1_480m", "sclk_otgphy2_480m", "sclk_otgphy0_480m"}; /* PLIST(aclk_vcodec_pre_p) = {"aclk_vepu", "aclk_vdpu"}; */ static struct rk_clk rk3288_clks[] = { /* External clocks */ LINK("xin24m"), FRATE(0, "xin32k", 32000), FRATE(0, "xin27m", 27000000), FRATE(0, "ext_hsadc", 0), FRATE(0, "ext_jtag", 0), FRATE(0, "ext_isp", 0), FRATE(0, "ext_vip", 0), FRATE(0, "ext_i2s", 0), FRATE(0, "ext_edp_24m", 0), FRATE(0, "sclk_otgphy0_480m", 0), FRATE(0, "sclk_otgphy1_480m", 0), FRATE(0, "sclk_otgphy2_480m", 0), FRATE(0, "aclk_vcodec_pre", 0), /* Fixed dividers */ FFACT(0, "xin12m", "xin24m", 1, 2), FFACT(0, "hclk_vcodec_pre_s", "aclk_vcodec_pre", 1, 4), PLL(PLL_APLL, "apll", CRU_APLL_CON(0), 0), PLL(PLL_DPLL, "dpll", CRU_DPLL_CON(0), 4), PLL(PLL_CPLL, "cpll", CRU_CPLL_CON(0), 8), PLL(PLL_GPLL, "gpll", CRU_GPLL_CON(0), 12), PLL(PLL_NPLL, "npll", CRU_NPLL_CON(0), 14), /* CRU_CLKSEL0_CON */ ARMDIV(ARMCLK, "armclk", armclk_p, rk3288_armclk_rates, 0, 8, 5, 15, 1, 0, 1), CDIV(0, "aclk_core_mp_s", "armclk", 0, 0, 4, 4), CDIV(0, "aclk_core_m0_s", "armclk", 0, 0, 0, 4), /* CRU_CLKSEL1_CON */ CDIV(0, "pclk_cpu_s", "aclk_cpu_pre", 0, 1, 12, 3), CDIV(0, "hclk_cpu_s", "aclk_cpu_pre", RK_CLK_COMPOSITE_DIV_EXP, 1, 8, 2), COMP(0, "aclk_cpu_src", aclk_cpu_p, 0, 1, 3, 5, 15, 1), CDIV(0, "aclk_cpu_pre", "aclk_cpu_src", 0, 1, 0, 3), /* CRU_CLKSEL2_CON */ /* 12:8 testout_div */ CDIV(0, "sclk_tsadc_s", "xin32k", 0, 2, 0, 6), /* CRU_CLKSEL3_CON */ MUX(SCLK_UART4, "sclk_uart4", uart4_p, 0, 3, 8, 2), CDIV(0, "uart4_src_s", "uart_src", 0, 3, 0, 7), /* CRU_CLKSEL4_CON */ MUX(0, "i2s_pre", i2s_pre_p, 0, 4, 8, 2), MUX(0, "i2s0_clkout_s", i2s_clkout_p, 0, 4, 12, 1), COMP(0, "i2s_src_s", cpll_gpll_p, 0, 4, 0, 7, 15, 1), /* CRU_CLKSEL5_CON */ MUX(0, "spdif_src", cpll_gpll_p, 0, 5, 15, 1), MUX(0, "spdif_mux", spdif_p, 0, 5, 8, 2), CDIV(0, "spdif_pre_s", "spdif_src", 0, 5, 0, 7), /* CRU_CLKSEL6_CON */ COMP(0, "sclk_isp_jpe_s", cpll_gpll_npll_p, 0, 6, 8, 6, 14, 2), COMP(0, "sclk_isp_s", cpll_gpll_npll_p, 0, 6, 0, 6, 6, 2), /* CRU_CLKSEL7_CON */ FRACT(0, "uart4_frac_s", "uart4_src", 0, 7), /* CRU_CLKSEL8_CON */ FRACT(0, "i2s_frac_s", "i2s_src", 0, 8), /* CRU_CLKSEL9_CON */ FRACT(0, "spdif_frac_s", "spdif_src", 0, 9), /* CRU_CLKSEL10_CON */ CDIV(0, "pclk_peri_s", "aclk_peri_src", RK_CLK_COMPOSITE_DIV_EXP, 10, 12, 2), CDIV(0, "hclk_peri_s", "aclk_peri_src", RK_CLK_COMPOSITE_DIV_EXP, 10, 8, 2), COMP(0, "aclk_peri_src_s", cpll_gpll_p, 0, 10, 0, 5, 15, 1), /* CRU_CLKSEL11_CON */ COMP(0, "sclk_sdmmc_s", mmc_p, 0, 11, 0, 6, 6, 2), /* CRU_CLKSEL12_CON */ COMP(0, "sclk_emmc_s", mmc_p, 0, 12, 8, 6, 14, 2), COMP(0, "sclk_sdio0_s", mmc_p, 0, 12, 0, 6, 6, 2), /* CRU_CLKSEL13_CON */ MUX(0, "uart_src", cpll_gpll_p, 0, 13, 15, 1), MUX(0, "usbphy480m_src_s", usbphy480m_p, 0, 13, 11, 2), MUX(SCLK_UART0, "sclk_uart0", uart0_p, 0, 13, 8, 2), COMP(0, "uart0_src_s", cpll_gpll_usb480m_npll_p, 0, 13, 0, 7, 13, 2), /* CRU_CLKSEL14_CON */ MUX(SCLK_UART1, "sclk_uart1", uart1_p, 0, 14, 8, 2), CDIV(0, "uart1_src_s", "uart_src", 0, 14, 0, 7), /* CRU_CLKSEL15_CON */ MUX(SCLK_UART2, "sclk_uart2", uart2_p, 0, 15, 8, 2), CDIV(0, "uart2_src_s", "uart_src", 0, 15, 0, 7), /* CRU_CLKSEL16_CON */ MUX(SCLK_UART3, "sclk_uart3", uart3_p, 0, 16, 8, 2), CDIV(0, "uart3_src_s", "uart_src", 0, 16, 0, 7), /* CRU_CLKSEL17_CON */ FRACT(0, "uart0_frac_s", "uart0_src", 0, 17), /* CRU_CLKSEL18_CON */ FRACT(0, "uart1_frac_s", "uart1_src", 0, 18), /* CRU_CLKSEL19_CON */ FRACT(0, "uart2_frac_s", "uart2_src", 0, 19), /* CRU_CLKSEL20_CON */ FRACT(0, "uart3_frac_s", "uart3_src", 0, 20), /* CRU_CLKSEL21_CON */ COMP(0, "mac_pll_src_s", npll_cpll_gpll_p, 0, 21, 8, 5, 0, 2), MUX(SCLK_MAC, "mac_clk", mac_p, 0, 21, 4, 1), /* CRU_CLKSEL22_CON */ MUX(0, "sclk_hsadc_out", hsadcout_p, 0, 22, 4, 1), COMP(0, "hsadc_src_s", cpll_gpll_p, 0, 22, 8, 8, 0, 1), MUX(0, "wifi_src", wifi_p, 0, 22, 1, 1), /* 7 - inverter "sclk_hsadc", "sclk_hsadc_out" */ /* CRU_CLKSEL23_CON */ FRACT(0, "wifi_frac_s", "wifi_src", 0, 23), /* CRU_CLKSEL24_CON */ CDIV(0, "sclk_saradc_s", "xin24m", 0, 24, 8, 8), /* CRU_CLKSEL25_CON */ COMP(0, "sclk_spi1_s", cpll_gpll_p, 0, 25, 8, 7, 15, 1), COMP(0, "sclk_spi0_s", cpll_gpll_p, 0, 25, 0, 7, 7, 1), /* CRU_CLKSEL26_CON */ COMP(SCLK_VIP_OUT, "sclk_vip_out", vip_out_p, 0, 26, 9, 5, 15, 1), MUX(0, "vip_src_s", cpll_gpll_p, 0, 26, 8, 1), CDIV(0, "crypto_s", "aclk_cpu_pre", 0, 26, 6, 2), COMP(0, "ddrphy", ddrphy_p, RK_CLK_COMPOSITE_DIV_EXP, 26, 0, 2, 2, 1), /* CRU_CLKSEL27_CON */ COMP(0, "dclk_vop0_s", cpll_gpll_npll_p, 0, 27, 8, 8, 0, 2), MUX(0, "sclk_edp_24m_s", edp_24m_p, 0, 28, 15, 1), CDIV(0, "hclk_vio", "aclk_vio0", 0, 28, 8, 5), COMP(0, "sclk_edp_s", cpll_gpll_npll_p, 0, 28, 0, 6, 6, 2), /* CRU_CLKSEL29_CON */ COMP(0, "dclk_vop1_s", cpll_gpll_npll_p, 0, 29, 8, 8, 6, 2), /* 4 - inverter "pclk_vip" "pclk_vip_in" */ /* 3 - inverter "pclk_isp", "pclk_isp_in" */ /* CRU_CLKSEL30_CON */ COMP(0, "sclk_rga_s", cpll_gpll_usb480m_p, 0, 30, 8, 5, 14, 2), COMP(0, "aclk_rga_pre_s", cpll_gpll_usb480m_p, 0, 30, 0, 5, 6, 2), /* CRU_CLKSEL31_CON */ COMP(0, "aclk_vio1_s", cpll_gpll_usb480m_p, 0, 31, 8, 5, 14, 2), COMP(0, "aclk_vio0_s", cpll_gpll_usb480m_p, 0, 31, 0, 5, 6, 2), /* CRU_CLKSEL32_CON */ COMP(0, "aclk_vdpu_s", cpll_gpll_usb480m_p, 0, 32, 8, 5, 14, 2), COMP(0, "aclk_vepu_s", cpll_gpll_usb480m_p, 0, 32, 0, 5, 6, 2), /* CRU_CLKSEL33_CON */ CDIV(0, "pclk_pd_alive", "gpll", 0, 33, 8, 5), CDIV(0, "pclk_pd_pmu_s", "gpll", 0, 33, 0, 5), /* CRU_CLKSEL34_CON */ COMP(0, "sclk_sdio1_s", mmc_p, 0, 34, 8, 6, 14, 2), COMP(0, "sclk_gpu_s", cpll_gpll_usb480m_npll_p, 0, 34, 0, 5, 6, 2), /* CRU_CLKSEL35_CON */ COMP(0, "sclk_tspout_s", tspout_p, 0, 35, 8, 5, 14, 2), COMP(0, "sclk_tsp_s", cpll_gpll_npll_p, 0, 35, 0, 5, 6, 2), /* CRU_CLKSEL36_CON */ CDIV(0, "armcore3_s", "armclk", 0, 36, 12, 3), CDIV(0, "armcore2_s", "armclk", 0, 36, 8, 3), CDIV(0, "armcore1_s", "armclk", 0, 36, 4, 3), CDIV(0, "armcore0_s", "armclk", 0, 36, 0, 3), /* CRU_CLKSEL37_CON */ CDIV(0, "pclk_dbg_pre_s", "armclk", 0, 37, 9, 5), CDIV(0, "atclk_s", "armclk", 0, 37, 4, 5), CDIV(0, "l2ram_s", "armclk", 0, 37, 0, 3), /* CRU_CLKSEL38_CON */ COMP(0, "sclk_nandc1_s", cpll_gpll_p, 0, 38, 8, 5, 15, 1), COMP(0, "sclk_nandc0_s", cpll_gpll_p, 0, 38, 0, 5, 7, 1), /* CRU_CLKSEL39_CON */ COMP(0, "aclk_hevc_s", cpll_gpll_npll_p, 0, 39, 8, 5, 14, 2), COMP(0, "sclk_spi2_s", cpll_gpll_p, 0, 39, 0, 7, 7, 1), /* CRU_CLKSEL40_CON */ CDIV(HCLK_HEVC, "hclk_hevc", "aclk_hevc", 0, 40, 12, 2), MUX(0, "spdif_8ch_mux", spdif_8ch_p, 0, 40, 8, 2), CDIV(0, "spdif_8ch_pre_s", "spdif_src", 0, 40, 0, 7), /* CRU_CLKSEL41_CON */ FRACT(0, "spdif_8ch_frac_s", "spdif_8ch_pre", 0, 41), /* CRU_CLKSEL42_CON */ COMP(0, "sclk_hevc_core_s", cpll_gpll_npll_p, 0, 42, 8, 5, 14, 2), COMP(0, "sclk_hevc_cabac_s", cpll_gpll_npll_p, 0, 42, 0, 5, 6, 2), /* * not yet implemented MMC clocks * id name src reg * SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RK3288_SDMMC_CON0 * SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3288_SDMMC_CON1, * SCLK_SDIO0_DRV, "sdio0_drv", "sclk_sdio0", RK3288_SDIO0_CON0, 1), * SCLK_SDIO0_SAMPLE, "sdio0_sample", "sclk_sdio0", RK3288_SDIO0_CON1, 0), * SCLK_SDIO1_DRV, "sdio1_drv", "sclk_sdio1", RK3288_SDIO1_CON0, 1), * SCLK_SDIO1_SAMPLE, "sdio1_sample", "sclk_sdio1", RK3288_SDIO1_CON1, 0), * SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", RK3288_EMMC_CON0, 1), * SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK3288_EMMC_CON1, 0), * * and GFR based mux for "aclk_vcodec_pre" */ }; static int rk3288_cru_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "rockchip,rk3288-cru")) { device_set_desc(dev, "Rockchip RK3288 Clock and Reset Unit"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int rk3288_cru_attach(device_t dev) { struct rk_cru_softc *sc; sc = device_get_softc(dev); sc->dev = dev; sc->gates = rk3288_gates; sc->ngates = nitems(rk3288_gates); sc->clks = rk3288_clks; sc->nclks = nitems(rk3288_clks); sc->reset_num = CRU_SOFTRST_SIZE * 16; sc->reset_offset = CRU_SOFTRST_CON(0); return (rk_cru_attach(dev)); } static device_method_t rk3288_cru_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk3288_cru_probe), DEVMETHOD(device_attach, rk3288_cru_attach), DEVMETHOD_END }; DEFINE_CLASS_1(rk3288_cru, rk3288_cru_driver, rk3288_cru_methods, sizeof(struct rk_cru_softc), rk_cru_driver); EARLY_DRIVER_MODULE(rk3288_cru, simplebus, rk3288_cru_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE + 1); diff --git a/sys/dev/clk/rockchip/rk3328_cru.c b/sys/dev/clk/rockchip/rk3328_cru.c index 6a9583250516..0edd562ddd08 100644 --- a/sys/dev/clk/rockchip/rk3328_cru.c +++ b/sys/dev/clk/rockchip/rk3328_cru.c @@ -1,1115 +1,1115 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018-2021 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include -#include -#include +#include +#include +#include #include #define CRU_CLKSEL_CON(x) (0x100 + (x) * 0x4) #define CRU_CLKGATE_CON(x) (0x200 + (x) * 0x4) /* Registers */ #define RK3328_GRF_SOC_CON4 0x410 #define RK3328_GRF_MAC_CON1 0x904 #define RK3328_GRF_MAC_CON2 0x908 /* Exported clocks */ #define PLL_APLL 1 #define PLL_DPLL 2 #define PLL_CPLL 3 #define PLL_GPLL 4 #define PLL_NPLL 5 #define ARMCLK 6 /* SCLK */ #define SCLK_RTC32K 30 #define SCLK_SDMMC_EXT 31 #define SCLK_SPI 32 #define SCLK_SDMMC 33 #define SCLK_SDIO 34 #define SCLK_EMMC 35 #define SCLK_TSADC 36 #define SCLK_SARADC 37 #define SCLK_UART0 38 #define SCLK_UART1 39 #define SCLK_UART2 40 #define SCLK_I2S0 41 #define SCLK_I2S1 42 #define SCLK_I2S2 43 #define SCLK_I2S1_OUT 44 #define SCLK_I2S2_OUT 45 #define SCLK_SPDIF 46 #define SCLK_TIMER0 47 #define SCLK_TIMER1 48 #define SCLK_TIMER2 49 #define SCLK_TIMER3 50 #define SCLK_TIMER4 51 #define SCLK_TIMER5 52 #define SCLK_WIFI 53 #define SCLK_CIF_OUT 54 #define SCLK_I2C0 55 #define SCLK_I2C1 56 #define SCLK_I2C2 57 #define SCLK_I2C3 58 #define SCLK_CRYPTO 59 #define SCLK_PWM 60 #define SCLK_PDM 61 #define SCLK_EFUSE 62 #define SCLK_OTP 63 #define SCLK_DDRCLK 64 #define SCLK_VDEC_CABAC 65 #define SCLK_VDEC_CORE 66 #define SCLK_VENC_DSP 67 #define SCLK_VENC_CORE 68 #define SCLK_RGA 69 #define SCLK_HDMI_SFC 70 #define SCLK_HDMI_CEC 71 /* Unused ? */ #define SCLK_USB3_REF 72 #define SCLK_USB3_SUSPEND 73 #define SCLK_SDMMC_DRV 74 #define SCLK_SDIO_DRV 75 #define SCLK_EMMC_DRV 76 #define SCLK_SDMMC_EXT_DRV 77 #define SCLK_SDMMC_SAMPLE 78 #define SCLK_SDIO_SAMPLE 79 #define SCLK_EMMC_SAMPLE 80 #define SCLK_SDMMC_EXT_SAMPLE 81 #define SCLK_VOP 82 #define SCLK_MAC2PHY_RXTX 83 #define SCLK_MAC2PHY_SRC 84 #define SCLK_MAC2PHY_REF 85 #define SCLK_MAC2PHY_OUT 86 #define SCLK_MAC2IO_RX 87 #define SCLK_MAC2IO_TX 88 #define SCLK_MAC2IO_REFOUT 89 #define SCLK_MAC2IO_REF 90 #define SCLK_MAC2IO_OUT 91 #define SCLK_TSP 92 #define SCLK_HSADC_TSP 93 #define SCLK_USB3PHY_REF 94 #define SCLK_REF_USB3OTG 95 #define SCLK_USB3OTG_REF 96 #define SCLK_USB3OTG_SUSPEND 97 #define SCLK_REF_USB3OTG_SRC 98 #define SCLK_MAC2IO_SRC 99 #define SCLK_MAC2IO 100 #define SCLK_MAC2PHY 101 #define SCLK_MAC2IO_EXT 102 /* DCLK */ #define DCLK_LCDC 120 #define DCLK_HDMIPHY 121 #define HDMIPHY 122 #define USB480M 123 #define DCLK_LCDC_SRC 124 /* ACLK */ #define ACLK_AXISRAM 130 /* Unused */ #define ACLK_VOP_PRE 131 #define ACLK_USB3OTG 132 #define ACLK_RGA_PRE 133 #define ACLK_DMAC 134 /* Unused */ #define ACLK_GPU 135 #define ACLK_BUS_PRE 136 #define ACLK_PERI_PRE 137 #define ACLK_RKVDEC_PRE 138 #define ACLK_RKVDEC 139 #define ACLK_RKVENC 140 #define ACLK_VPU_PRE 141 #define ACLK_VIO_PRE 142 #define ACLK_VPU 143 #define ACLK_VIO 144 #define ACLK_VOP 145 #define ACLK_GMAC 146 #define ACLK_H265 147 #define ACLK_H264 148 #define ACLK_MAC2PHY 149 #define ACLK_MAC2IO 150 #define ACLK_DCF 151 #define ACLK_TSP 152 #define ACLK_PERI 153 #define ACLK_RGA 154 #define ACLK_IEP 155 #define ACLK_CIF 156 #define ACLK_HDCP 157 /* PCLK */ #define PCLK_GPIO0 200 #define PCLK_GPIO1 201 #define PCLK_GPIO2 202 #define PCLK_GPIO3 203 #define PCLK_GRF 204 #define PCLK_I2C0 205 #define PCLK_I2C1 206 #define PCLK_I2C2 207 #define PCLK_I2C3 208 #define PCLK_SPI 209 #define PCLK_UART0 210 #define PCLK_UART1 211 #define PCLK_UART2 212 #define PCLK_TSADC 213 #define PCLK_PWM 214 #define PCLK_TIMER 215 #define PCLK_BUS_PRE 216 #define PCLK_PERI_PRE 217 /* Unused */ #define PCLK_HDMI_CTRL 218 /* Unused */ #define PCLK_HDMI_PHY 219 /* Unused */ #define PCLK_GMAC 220 #define PCLK_H265 221 #define PCLK_MAC2PHY 222 #define PCLK_MAC2IO 223 #define PCLK_USB3PHY_OTG 224 #define PCLK_USB3PHY_PIPE 225 #define PCLK_USB3_GRF 226 #define PCLK_USB2_GRF 227 #define PCLK_HDMIPHY 228 #define PCLK_DDR 229 #define PCLK_PERI 230 #define PCLK_HDMI 231 #define PCLK_HDCP 232 #define PCLK_DCF 233 #define PCLK_SARADC 234 #define PCLK_ACODECPHY 235 #define PCLK_WDT 236 /* Controlled from the secure GRF */ /* HCLK */ #define HCLK_PERI 308 #define HCLK_TSP 309 #define HCLK_GMAC 310 /* Unused */ #define HCLK_I2S0_8CH 311 #define HCLK_I2S1_8CH 312 #define HCLK_I2S2_2CH 313 #define HCLK_SPDIF_8CH 314 #define HCLK_VOP 315 #define HCLK_NANDC 316 /* Unused */ #define HCLK_SDMMC 317 #define HCLK_SDIO 318 #define HCLK_EMMC 319 #define HCLK_SDMMC_EXT 320 #define HCLK_RKVDEC_PRE 321 #define HCLK_RKVDEC 322 #define HCLK_RKVENC 323 #define HCLK_VPU_PRE 324 #define HCLK_VIO_PRE 325 #define HCLK_VPU 326 /* 327 doesn't exists */ #define HCLK_BUS_PRE 328 #define HCLK_PERI_PRE 329 /* Unused */ #define HCLK_H264 330 #define HCLK_CIF 331 #define HCLK_OTG_PMU 332 #define HCLK_OTG 333 #define HCLK_HOST0 334 #define HCLK_HOST0_ARB 335 #define HCLK_CRYPTO_MST 336 #define HCLK_CRYPTO_SLV 337 #define HCLK_PDM 338 #define HCLK_IEP 339 #define HCLK_RGA 340 #define HCLK_HDCP 341 static struct rk_cru_gate rk3328_gates[] = { /* CRU_CLKGATE_CON0 */ GATE(0, "core_apll_clk", "apll", 0, 0), GATE(0, "core_dpll_clk", "dpll", 0, 1), GATE(0, "core_gpll_clk", "gpll", 0, 2), /* Bit 3 bus_src_clk_en */ /* Bit 4 clk_ddrphy_src_en */ /* Bit 5 clk_ddrpd_src_en */ /* Bit 6 clk_ddrmon_en */ /* Bit 7-8 unused */ /* Bit 9 testclk_en */ GATE(SCLK_WIFI, "sclk_wifi", "sclk_wifi_c", 0, 10), GATE(SCLK_RTC32K, "clk_rtc32k", "clk_rtc32k_c", 0, 11), GATE(0, "core_npll_clk", "npll", 0, 12), /* Bit 13-15 unused */ /* CRU_CLKGATE_CON1 */ /* Bit 0 unused */ GATE(0, "clk_i2s0_div", "clk_i2s0_div_c", 1, 1), GATE(0, "clk_i2s0_frac", "clk_i2s0_frac_f", 1, 2), GATE(SCLK_I2S0, "clk_i2s0", "clk_i2s0_mux", 1, 3), GATE(0, "clk_i2s1_div", "clk_i2s1_div_c", 1, 4), GATE(0, "clk_i2s1_frac", "clk_i2s1_frac_f", 1, 5), GATE(SCLK_I2S1, "clk_i2s1", "clk_i2s1_mux", 1, 6), GATE(0, "clk_i2s1_out", "clk_i2s1_mux", 1, 7), GATE(0, "clk_i2s2_div", "clk_i2s2_div_c", 1, 8), GATE(0, "clk_i2s2_frac", "clk_i2s2_frac_f", 1, 9), GATE(SCLK_I2S2, "clk_i2s2", "clk_i2s2_mux", 1, 10), GATE(0, "clk_i2s2_out", "clk_i2s2_mux", 1, 11), GATE(0, "clk_spdif_div", "clk_spdif_div_c", 1, 12), GATE(0, "clk_spdif_frac", "clk_spdif_frac_f", 1, 13), GATE(0, "clk_uart0_div", "clk_uart0_div_c", 1, 14), GATE(0, "clk_uart0_frac", "clk_uart0_frac_f", 1, 15), /* CRU_CLKGATE_CON2 */ GATE(0, "clk_uart1_div", "clk_uart1_div_c", 2, 0), GATE(0, "clk_uart1_frac", "clk_uart1_frac_f", 2, 1), GATE(0, "clk_uart2_div", "clk_uart2_div_c", 2, 2), GATE(0, "clk_uart2_frac", "clk_uart2_frac_f", 2, 3), GATE(SCLK_CRYPTO, "clk_crypto", "clk_crypto_c", 2, 4), GATE(SCLK_TSP, "clk_tsp", "clk_tsp_c", 2, 5), GATE(SCLK_TSADC, "clk_tsadc_src", "clk_tsadc_c", 2, 6), GATE(SCLK_SPI, "clk_spi", "clk_spi_c", 2, 7), GATE(SCLK_PWM, "clk_pwm", "clk_pwm_c", 2, 8), GATE(SCLK_I2C0, "clk_i2c0_src", "clk_i2c0_c", 2, 9), GATE(SCLK_I2C1, "clk_i2c1_src", "clk_i2c1_c", 2, 10), GATE(SCLK_I2C2, "clk_i2c2_src", "clk_i2c2_c", 2, 11), GATE(SCLK_I2C3, "clk_i2c3_src", "clk_i2c3_c", 2, 12), GATE(SCLK_EFUSE, "clk_efuse", "clk_efuse_c", 2, 13), GATE(SCLK_SARADC, "clk_saradc", "clk_saradc_c", 2, 14), GATE(SCLK_PDM, "clk_pdm", "clk_pdm_c", 2, 15), /* CRU_CLKGATE_CON3 */ GATE(SCLK_MAC2PHY_SRC, "clk_mac2phy_src", "clk_mac2phy_src_c", 3, 0), GATE(SCLK_MAC2IO_SRC, "clk_mac2io_src", "clk_mac2io_src_c", 3, 1), GATE(ACLK_GMAC, "aclk_gmac", "aclk_gmac_c", 3, 2), /* Bit 3 gmac_gpll_src_en Unused ? */ /* Bit 4 gmac_vpll_src_en Unused ? */ GATE(SCLK_MAC2IO_OUT, "clk_mac2io_out", "clk_mac2io_out_c", 3, 5), /* Bit 6-7 unused */ GATE(SCLK_OTP, "clk_otp", "clk_otp_c", 3, 8), /* Bit 9-15 unused */ /* CRU_CLKGATE_CON4 */ GATE(0, "periph_gclk_src", "gpll", 4, 0), GATE(0, "periph_cclk_src", "cpll", 4, 1), GATE(0, "hdmiphy_peri", "hdmiphy", 4, 2), GATE(SCLK_SDMMC, "clk_mmc0_src", "clk_sdmmc_c", 4, 3), GATE(SCLK_SDIO, "clk_sdio_src", "clk_sdio_c", 4, 4), GATE(SCLK_EMMC, "clk_emmc_src", "clk_emmc_c", 4, 5), GATE(SCLK_REF_USB3OTG_SRC, "clk_ref_usb3otg_src", "clk_ref_usb3otg_src_c", 4, 6), GATE(SCLK_USB3OTG_REF, "clk_usb3_otg0_ref", "xin24m", 4, 7), GATE(SCLK_USB3OTG_SUSPEND, "clk_usb3otg_suspend", "clk_usb3otg_suspend_c", 4, 8), /* Bit 9 clk_usb3phy_ref_25m_en */ GATE(SCLK_SDMMC_EXT, "clk_sdmmc_ext", "clk_sdmmc_ext_c", 4, 10), /* Bit 11-15 unused */ /* CRU_CLKGATE_CON5 */ GATE(ACLK_RGA_PRE, "aclk_rga_pre", "aclk_rga_pre_c", 5, 0), GATE(SCLK_RGA, "sclk_rga", "sclk_rga_c", 5, 0), GATE(ACLK_VIO_PRE, "aclk_vio_pre", "aclk_vio_pre_c", 5, 2), GATE(SCLK_CIF_OUT, "clk_cif_src", "clk_cif_src_c", 5, 3), GATE(SCLK_HDMI_SFC, "clk_hdmi_sfc", "xin24m", 5, 4), GATE(ACLK_VOP_PRE, "aclk_vop_pre", "aclk_vop_pre_c", 5, 5), GATE(DCLK_LCDC_SRC, "vop_dclk_src", "vop_dclk_src_c", 5, 6), /* Bit 7-15 unused */ /* CRU_CLKGATE_CON6 */ GATE(ACLK_RKVDEC_PRE, "aclk_rkvdec_pre", "aclk_rkvdec_c", 6, 0), GATE(SCLK_VDEC_CABAC, "sclk_cabac", "sclk_cabac_c", 6, 1), GATE(SCLK_VDEC_CORE, "sclk_vdec_core", "sclk_vdec_core_c", 6, 2), GATE(ACLK_RKVENC, "aclk_rkvenc", "aclk_rkvenc_c", 6, 3), GATE(SCLK_VENC_CORE, "sclk_venc", "sclk_venc_c", 6, 4), GATE(ACLK_VPU_PRE, "aclk_vpu_pre", "aclk_vpu_pre_c", 6, 5), GATE(0, "aclk_gpu_pre", "aclk_gpu_pre_c", 6, 6), GATE(SCLK_VENC_DSP, "sclk_venc_dsp", "sclk_venc_dsp_c", 6, 7), /* Bit 8-15 unused */ /* CRU_CLKGATE_CON7 */ /* Bit 0 aclk_core_en */ /* Bit 1 clk_core_periph_en */ /* Bit 2 clk_jtag_en */ /* Bit 3 unused */ /* Bit 4 pclk_ddr_en */ /* Bit 5-15 unused */ /* CRU_CLKGATE_CON8 */ GATE(ACLK_BUS_PRE, "aclk_bus_pre", "aclk_bus_pre_c", 8, 0), GATE(HCLK_BUS_PRE, "hclk_bus_pre", "hclk_bus_pre_c", 8, 1), GATE(PCLK_BUS_PRE, "pclk_bus_pre", "pclk_bus_pre_c", 8, 2), GATE(0, "pclk_bus", "pclk_bus_pre", 8, 3), GATE(0, "pclk_phy", "pclk_bus_pre", 8, 4), GATE(SCLK_TIMER0, "sclk_timer0", "xin24m", 8, 5), GATE(SCLK_TIMER1, "sclk_timer1", "xin24m", 8, 6), GATE(SCLK_TIMER2, "sclk_timer2", "xin24m", 8, 7), GATE(SCLK_TIMER3, "sclk_timer3", "xin24m", 8, 8), GATE(SCLK_TIMER4, "sclk_timer4", "xin24m", 8, 9), GATE(SCLK_TIMER5, "sclk_timer5", "xin24m", 8, 10), /* Bit 11-15 unused */ /* CRU_CLKGATE_CON9 */ GATE(PCLK_GMAC, "pclk_gmac", "aclk_gmac", 9, 0), GATE(SCLK_MAC2PHY_RXTX, "clk_gmac2phy_rx", "clk_mac2phy", 9, 1), GATE(SCLK_MAC2PHY_OUT, "clk_mac2phy_out", "clk_mac2phy_out_c", 9, 2), GATE(SCLK_MAC2PHY_REF, "clk_gmac2phy_ref", "clk_mac2phy", 9, 3), GATE(SCLK_MAC2IO_RX, "clk_gmac2io_rx", "clk_mac2io", 9, 4), GATE(SCLK_MAC2IO_TX, "clk_gmac2io_tx", "clk_mac2io", 9, 5), GATE(SCLK_MAC2IO_REFOUT, "clk_gmac2io_refout", "clk_mac2io", 9, 6), GATE(SCLK_MAC2IO_REF, "clk_gmac2io_ref", "clk_mac2io", 9, 7), /* Bit 8-15 unused */ /* CRU_CLKGATE_CON10 */ GATE(ACLK_PERI, "aclk_peri", "aclk_peri_pre", 10, 0), GATE(HCLK_PERI, "hclk_peri", "hclk_peri_c", 10, 1), GATE(PCLK_PERI, "pclk_peri", "pclk_peri_c", 10, 2), /* Bit 3-15 unused */ /* CRU_CLKGATE_CON11 */ GATE(HCLK_RKVDEC_PRE, "hclk_rkvdec_pre", "aclk_rkvdec_pre", 11, 0), /* Bit 1-3 unused */ GATE(HCLK_RKVENC, "hclk_rkvenc", "aclk_rkvenc", 11, 4), /* Bit 5-7 unused */ GATE(HCLK_VPU_PRE, "hclk_vpu_pre", "aclk_vpu_pre", 11, 8), /* Bit 9-15 unused */ /* CRU_CLKGATE_CON12 */ /* unused */ /* CRU_CLKGATE_CON13 */ /* Bit 0 aclk_core_niu_en */ /* Bit 1 aclk_gic400_en */ /* Bit 2-15 unused */ /* CRU_CLKGATE_CON14 */ GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 14, 0), GATE(0, "aclk_gpu_niu", "aclk_gpu_pre", 14, 1), /* Bit 2-15 unused */ /* CRU_CLKGATE_CON15*/ /* Bit 0 aclk_intmem_en Unused */ /* Bit 1 aclk_dmac_bus_en Unused */ /* Bit 2 hclk_rom_en Unused */ GATE(HCLK_I2S0_8CH, "hclk_i2s0_8ch", "hclk_bus_pre", 15, 3), GATE(HCLK_I2S1_8CH, "hclk_i2s1_8ch", "hclk_bus_pre", 15, 4), GATE(HCLK_I2S2_2CH, "hclk_i2s2_2ch", "hclk_bus_pre", 15, 5), GATE(HCLK_SPDIF_8CH, "hclk_spdif_8ch", "hclk_bus_pre", 15, 6), GATE(HCLK_CRYPTO_MST, "hclk_crypto_mst", "hclk_bus_pre", 15, 7), GATE(HCLK_CRYPTO_SLV, "hclk_crypto_slv", "hclk_bus_pre", 15, 8), GATE(0, "pclk_efuse", "pclk_bus", 15, 9), GATE(PCLK_I2C0, "pclk_i2c0", "pclk_bus", 15, 10), GATE(ACLK_DCF, "aclk_dcf", "aclk_bus_pre", 15, 11), GATE(0, "aclk_bus_niu", "aclk_bus_pre", 15, 12), GATE(0, "hclk_bus_niu", "hclk_bus_pre", 15, 13), GATE(0, "pclk_bus_niu", "pclk_bus_pre", 15, 14), GATE(0, "pclk_phy_niu", "pclk_phy", 15, 14), /* Bit 15 pclk_phy_niu_en */ /* CRU_CLKGATE_CON16 */ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus", 16, 0), GATE(PCLK_I2C2, "pclk_i2c2", "pclk_bus", 16, 1), GATE(PCLK_I2C3, "pclk_i2c3", "pclk_bus", 16, 2), GATE(PCLK_TIMER, "pclk_timer0", "pclk_bus", 16, 3), GATE(0, "pclk_stimer", "pclk_bus", 16, 4), GATE(PCLK_SPI, "pclk_spi", "pclk_bus", 16, 5), GATE(PCLK_PWM, "pclk_pwm", "pclk_bus", 16, 6), GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_bus", 16, 7), GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_bus", 16, 8), GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_bus", 16, 9), GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_bus", 16, 10), GATE(PCLK_UART0, "pclk_uart0", "pclk_bus", 16, 11), GATE(PCLK_UART1, "pclk_uart1", "pclk_bus", 16, 12), GATE(PCLK_UART2, "pclk_uart2", "pclk_bus", 16, 13), GATE(PCLK_TSADC, "pclk_tsadc", "pclk_bus", 16, 14), GATE(PCLK_DCF, "pclk_dcf", "pclk_bus", 16, 15), /* CRU_CLKGATE_CON17 */ GATE(PCLK_GRF, "pclk_grf", "pclk_bus", 17, 0), /* Bit 1 unused */ GATE(PCLK_USB3_GRF, "pclk_usb3grf", "pclk_phy", 17, 2), GATE(0, "pclk_ddrphy", "pclk_phy", 17, 3), GATE(0, "pclk_cru", "pclk_bus", 17, 4), GATE(PCLK_ACODECPHY, "pclk_acodecphy", "pclk_phy", 17, 5), GATE(0, "pclk_sgrf", "pclk_bus", 17, 6), GATE(PCLK_HDMIPHY, "pclk_hdmiphy", "pclk_phy", 17, 7), GATE(0, "pclk_vdacphy", "pclk_bus", 17, 8), /* Bit 9 unused */ GATE(0, "pclk_sim", "pclk_bus", 17, 10), GATE(HCLK_TSP, "hclk_tsp", "hclk_bus_pre", 17, 11), GATE(ACLK_TSP, "aclk_tsp", "aclk_bus_pre", 17, 12), /* Bit 13 clk_hsadc_0_tsp_en Depend on a gpio clock ? */ GATE(PCLK_USB2_GRF, "pclk_usb2grf", "pclk_phy", 17, 14), GATE(PCLK_SARADC, "pclk_saradc", "pclk_bus", 17, 15), /* CRU_CLKGATE_CON18 */ /* Bit 0 unused */ /* Bit 1 pclk_ddr_upctl_en */ /* Bit 2 pclk_ddr_msch_en */ /* Bit 3 pclk_ddr_mon_en */ /* Bit 4 aclk_ddr_upctl_en */ /* Bit 5 clk_ddr_upctl_en */ /* Bit 6 clk_ddr_msch_en */ /* Bit 7 pclk_ddrstdby_en */ /* Bit 8-15 unused */ /* CRU_CLKGATE_CON19 */ GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 19, 0), GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 19, 1), GATE(HCLK_EMMC, "hclk_emmc", "hclk_peri", 19, 2), /* Bit 3-5 unused */ GATE(HCLK_HOST0, "hclk_host0", "hclk_peri", 19, 6), GATE(HCLK_HOST0_ARB, "hclk_host0_arg", "hclk_peri", 19, 7), GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 19, 8), GATE(HCLK_OTG_PMU, "hclk_otg_pmu", "hclk_peri", 19, 9), /* Bit 10 unused */ GATE(0, "aclk_peri_niu", "aclk_peri", 19, 11), GATE(0, "hclk_peri_niu", "hclk_peri", 19, 12), GATE(0, "pclk_peri_niu", "hclk_peri", 19, 13), GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 19, 14), GATE(HCLK_SDMMC_EXT, "hclk_sdmmc_ext", "hclk_peri", 19, 15), /* CRU_CLKGATE_CON20 */ /* unused */ /* CRU_CLKGATE_CON21 */ /* Bit 0-1 unused */ GATE(ACLK_VOP, "aclk_vop", "aclk_vop_pre", 21, 2), GATE(HCLK_VOP, "hclk_vop", "hclk_vio_pre", 21, 3), GATE(0, "aclk_vop_niu", "aclk_vop_pre", 21, 4), GATE(0, "hclk_vop_niu", "hclk_vio_pre", 21, 5), GATE(ACLK_IEP, "aclk_iep", "aclk_vio_pre", 21, 6), GATE(HCLK_IEP, "hclk_iep", "hclk_vio_pre", 21, 7), GATE(ACLK_CIF, "aclk_cif", "aclk_vio_pre", 21, 8), GATE(HCLK_CIF, "hclk_cif", "hclk_vio_pre", 21, 9), GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 21, 10), GATE(HCLK_RGA, "hclk_rga", "hclk_vio_pre", 21, 11), GATE(0, "hclk_ahb1tom", "hclk_vio_pre", 21, 12), GATE(0, "pclk_vio_h2p", "hclk_vio_pre", 21, 13), GATE(0, "hclk_vio_h2p", "hclk_vio_pre", 21, 14), GATE(ACLK_HDCP, "aclk_hdcp", "aclk_vio_pre", 21, 15), /* CRU_CLKGATE_CON22 */ GATE(HCLK_HDCP, "hclk_hdcp", "hclk_vio_pre", 22, 0), GATE(0, "hclk_vio_niu", "hclk_vio_pre", 22, 1), GATE(0, "aclk_vio_niu", "aclk_vio_pre", 22, 2), GATE(0, "aclk_rga_niu", "aclk_rga_pre", 22, 3), GATE(PCLK_HDMI, "pclk_hdmi", "hclk_vio_pre", 22, 4), GATE(PCLK_HDCP, "pclk_hdcp", "hclk_vio_pre", 22, 5), /* Bit 6-15 unused */ /* CRU_CLKGATE_CON23 */ GATE(ACLK_VPU, "aclk_vpu", "aclk_vpu_pre", 23, 0), GATE(HCLK_VPU, "hclk_vpu", "hclk_vpu_pre", 23, 1), GATE(0, "aclk_vpu_niu", "aclk_vpu_pre", 23, 2), GATE(0, "hclk_vpu_niu", "hclk_vpu_pre", 23, 3), /* Bit 4-15 unused */ /* CRU_CLKGATE_CON24 */ GATE(ACLK_RKVDEC, "aclk_rkvdec", "aclk_rkvdec_pre", 24, 0), GATE(HCLK_RKVDEC, "hclk_rkvdec", "hclk_rkvdec_pre", 24, 1), GATE(0, "aclk_rkvdec_niu", "aclk_rkvdec_pre", 24, 2), GATE(0, "hclk_rkvdec_niu", "hclk_rkvdec_pre", 24, 3), /* Bit 4-15 unused */ /* CRU_CLKGATE_CON25 */ GATE(0, "aclk_rkvenc_niu", "aclk_rkvenc", 25, 0), GATE(0, "hclk_rkvenc_niu", "hclk_rkvenc", 25, 1), GATE(ACLK_H265, "aclk_h265", "aclk_rkvenc", 25, 2), GATE(PCLK_H265, "pclk_h265", "hclk_rkvenc", 25, 3), GATE(ACLK_H264, "aclk_h264", "aclk_rkvenc", 25, 4), GATE(HCLK_H264, "hclk_h264", "hclk_rkvenc", 25, 5), GATE(0, "aclk_axisram", "hclk_rkvenc", 25, 6), /* Bit 7-15 unused */ /* CRU_CLKGATE_CON26 */ GATE(ACLK_MAC2PHY, "aclk_gmac2phy", "aclk_gmac", 26, 0), GATE(PCLK_MAC2PHY, "pclk_gmac2phy", "pclk_gmac", 26, 1), GATE(ACLK_MAC2IO, "aclk_gmac2io", "aclk_gmac", 26, 2), GATE(PCLK_MAC2IO, "pclk_gmac2io", "pclk_gmac", 26, 3), GATE(0, "aclk_gmac_niu", "aclk_gmac", 26, 4), GATE(0, "pclk_gmac_niu", "pclk_gmac", 26, 5), /* Bit 6-15 unused */ /* CRU_CLKGATE_CON27 */ /* Bit 0 clk_ddrphy_en */ /* Bit 1 clk4x_ddrphy_en */ /* CRU_CLKGATE_CON28 */ GATE(HCLK_PDM, "hclk_pdm", "hclk_bus_pre", 28, 0), GATE(PCLK_USB3PHY_OTG, "pclk_usb3phy_otg", "pclk_phy", 28, 1), GATE(PCLK_USB3PHY_PIPE, "pclk_usb3phy_pipe", "pclk_phy", 28, 2), GATE(0, "pclk_pmu", "pclk_bus", 28, 3), GATE(0, "pclk_otp", "pclk_bus", 28, 4) /* Bit 5-15 unused */ }; /* * PLLs */ #define PLL_RATE(_hz, _ref, _fb, _post1, _post2, _dspd, _frac) \ { \ .freq = _hz, \ .refdiv = _ref, \ .fbdiv = _fb, \ .postdiv1 = _post1, \ .postdiv2 = _post2, \ .dsmpd = _dspd, \ .frac = _frac, \ } static struct rk_clk_pll_rate rk3328_pll_rates[] = { /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */ PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0), PLL_RATE(1584000000, 1, 66, 1, 1, 1, 0), PLL_RATE(1560000000, 1, 65, 1, 1, 1, 0), PLL_RATE(1536000000, 1, 64, 1, 1, 1, 0), PLL_RATE(1512000000, 1, 63, 1, 1, 1, 0), PLL_RATE(1488000000, 1, 62, 1, 1, 1, 0), PLL_RATE(1464000000, 1, 61, 1, 1, 1, 0), PLL_RATE(1440000000, 1, 60, 1, 1, 1, 0), PLL_RATE(1416000000, 1, 59, 1, 1, 1, 0), PLL_RATE(1392000000, 1, 58, 1, 1, 1, 0), PLL_RATE(1368000000, 1, 57, 1, 1, 1, 0), PLL_RATE(1344000000, 1, 56, 1, 1, 1, 0), PLL_RATE(1320000000, 1, 55, 1, 1, 1, 0), PLL_RATE(1296000000, 1, 54, 1, 1, 1, 0), PLL_RATE(1272000000, 1, 53, 1, 1, 1, 0), PLL_RATE(1248000000, 1, 52, 1, 1, 1, 0), PLL_RATE(1200000000, 1, 50, 1, 1, 1, 0), PLL_RATE(1188000000, 2, 99, 1, 1, 1, 0), PLL_RATE(1104000000, 1, 46, 1, 1, 1, 0), PLL_RATE(1100000000, 12, 550, 1, 1, 1, 0), PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0), PLL_RATE(1000000000, 6, 500, 2, 1, 1, 0), PLL_RATE(984000000, 1, 82, 2, 1, 1, 0), PLL_RATE(960000000, 1, 80, 2, 1, 1, 0), PLL_RATE(936000000, 1, 78, 2, 1, 1, 0), PLL_RATE(912000000, 1, 76, 2, 1, 1, 0), PLL_RATE(900000000, 4, 300, 2, 1, 1, 0), PLL_RATE(888000000, 1, 74, 2, 1, 1, 0), PLL_RATE(864000000, 1, 72, 2, 1, 1, 0), PLL_RATE(840000000, 1, 70, 2, 1, 1, 0), PLL_RATE(816000000, 1, 68, 2, 1, 1, 0), PLL_RATE(800000000, 6, 400, 2, 1, 1, 0), PLL_RATE(700000000, 6, 350, 2, 1, 1, 0), PLL_RATE(696000000, 1, 58, 2, 1, 1, 0), PLL_RATE(600000000, 1, 75, 3, 1, 1, 0), PLL_RATE(594000000, 2, 99, 2, 1, 1, 0), PLL_RATE(504000000, 1, 63, 3, 1, 1, 0), PLL_RATE(500000000, 6, 250, 2, 1, 1, 0), PLL_RATE(408000000, 1, 68, 2, 2, 1, 0), PLL_RATE(312000000, 1, 52, 2, 2, 1, 0), PLL_RATE(216000000, 1, 72, 4, 2, 1, 0), PLL_RATE(96000000, 1, 64, 4, 4, 1, 0), {}, }; static struct rk_clk_pll_rate rk3328_pll_frac_rates[] = { PLL_RATE(1016064000, 3, 127, 1, 1, 0, 134217), PLL_RATE(983040000, 24, 983, 1, 1, 0, 671088), PLL_RATE(491520000, 24, 983, 2, 1, 0, 671088), PLL_RATE(61440000, 6, 215, 7, 2, 0, 671088), PLL_RATE(56448000, 12, 451, 4, 4, 0, 9797894), PLL_RATE(40960000, 12, 409, 4, 5, 0, 10066329), {}, }; /* Clock parents */ PLIST(pll_src_p) = {"xin24m"}; PLIST(xin24m_rtc32k_p) = {"xin24m", "clk_rtc32k"}; PLIST(pll_src_cpll_gpll_p) = {"cpll", "gpll"}; PLIST(pll_src_cpll_gpll_apll_p) = {"cpll", "gpll", "apll"}; PLIST(pll_src_cpll_gpll_xin24m_p) = {"cpll", "gpll", "xin24m", "xin24m" /* Dummy */}; PLIST(pll_src_cpll_gpll_usb480m_p) = {"cpll", "gpll", "usb480m"}; PLIST(pll_src_cpll_gpll_hdmiphy_p) = {"cpll", "gpll", "hdmi_phy"}; PLIST(pll_src_cpll_gpll_hdmiphy_usb480m_p) = {"cpll", "gpll", "hdmi_phy", "usb480m"}; PLIST(pll_src_apll_gpll_dpll_npll_p) = {"apll", "gpll", "dpll", "npll"}; PLIST(pll_src_cpll_gpll_xin24m_usb480m_p) = {"cpll", "gpll", "xin24m", "usb480m"}; PLIST(mux_ref_usb3otg_p) = { "xin24m", "clk_usb3_otg0_ref" }; PLIST(mux_mac2io_p) = { "clk_mac2io_src", "gmac_clkin" }; PLIST(mux_mac2io_ext_p) = { "clk_mac2io", "gmac_clkin" }; PLIST(mux_mac2phy_p) = { "clk_mac2phy_src", "phy_50m_out" }; PLIST(mux_i2s0_p) = { "clk_i2s0_div", "clk_i2s0_frac", "xin12m", "xin12m" }; PLIST(mux_i2s1_p) = { "clk_i2s1_div", "clk_i2s1_frac", "clkin_i2s1", "xin12m" }; PLIST(mux_i2s2_p) = { "clk_i2s2_div", "clk_i2s2_frac", "clkin_i2s2", "xin12m" }; PLIST(mux_dclk_lcdc_p) = {"hdmiphy", "vop_dclk_src"}; PLIST(mux_hdmiphy_p) = {"hdmi_phy", "xin24m"}; PLIST(mux_usb480m_p) = {"usb480m_phy", "xin24m"}; PLIST(mux_uart0_p) = {"clk_uart0_div", "clk_uart0_frac", "xin24m", "xin24m"}; PLIST(mux_uart1_p) = {"clk_uart1_div", "clk_uart1_frac", "xin24m", "xin24m"}; PLIST(mux_uart2_p) = {"clk_uart2_div", "clk_uart2_frac", "xin24m", "xin24m"}; PLIST(mux_spdif_p) = {"clk_spdif_div", "clk_spdif_frac", "xin12m", "xin12m"}; PLIST(mux_cif_p) = {"clk_cif_pll", "xin24m"}; static struct rk_clk_pll_def apll = { .clkdef = { .id = PLL_APLL, .name = "apll", .parent_names = pll_src_p, .parent_cnt = nitems(pll_src_p), }, .base_offset = 0x00, .gate_offset = 0x200, .gate_shift = 0, .mode_reg = 0x80, .mode_shift = 1, .flags = RK_CLK_PLL_HAVE_GATE, .frac_rates = rk3328_pll_frac_rates, }; static struct rk_clk_pll_def dpll = { .clkdef = { .id = PLL_DPLL, .name = "dpll", .parent_names = pll_src_p, .parent_cnt = nitems(pll_src_p), }, .base_offset = 0x20, .gate_offset = 0x200, .gate_shift = 1, .mode_reg = 0x80, .mode_shift = 4, .flags = RK_CLK_PLL_HAVE_GATE, }; static struct rk_clk_pll_def cpll = { .clkdef = { .id = PLL_CPLL, .name = "cpll", .parent_names = pll_src_p, .parent_cnt = nitems(pll_src_p), }, .base_offset = 0x40, .mode_reg = 0x80, .mode_shift = 8, .rates = rk3328_pll_rates, }; static struct rk_clk_pll_def gpll = { .clkdef = { .id = PLL_GPLL, .name = "gpll", .parent_names = pll_src_p, .parent_cnt = nitems(pll_src_p), }, .base_offset = 0x60, .gate_offset = 0x200, .gate_shift = 2, .mode_reg = 0x80, .mode_shift = 12, .flags = RK_CLK_PLL_HAVE_GATE, .frac_rates = rk3328_pll_frac_rates, }; static struct rk_clk_pll_def npll = { .clkdef = { .id = PLL_NPLL, .name = "npll", .parent_names = pll_src_p, .parent_cnt = nitems(pll_src_p), }, .base_offset = 0xa0, .gate_offset = 0x200, .gate_shift = 12, .mode_reg = 0x80, .mode_shift = 1, .flags = RK_CLK_PLL_HAVE_GATE, .rates = rk3328_pll_rates, }; static struct rk_clk_armclk_rates rk3328_armclk_rates[] = { { .freq = 1296000000, .div = 1, }, { .freq = 1200000000, .div = 1, }, { .freq = 1104000000, .div = 1, }, { .freq = 1008000000, .div = 1, }, { .freq = 912000000, .div = 1, }, { .freq = 816000000, .div = 1, }, { .freq = 696000000, .div = 1, }, { .freq = 600000000, .div = 1, }, { .freq = 408000000, .div = 1, }, { .freq = 312000000, .div = 1, }, { .freq = 216000000, .div = 1, }, { .freq = 96000000, .div = 1, }, }; static struct rk_clk_armclk_def armclk = { .clkdef = { .id = ARMCLK, .name = "armclk", .parent_names = pll_src_apll_gpll_dpll_npll_p, .parent_cnt = nitems(pll_src_apll_gpll_dpll_npll_p), }, .muxdiv_offset = 0x100, .mux_shift = 6, .mux_width = 2, .div_shift = 0, .div_width = 5, .flags = RK_CLK_COMPOSITE_HAVE_MUX, .main_parent = 3, /* npll */ .alt_parent = 0, /* apll */ .rates = rk3328_armclk_rates, .nrates = nitems(rk3328_armclk_rates), }; static struct rk_clk rk3328_clks[] = { /* External clocks */ LINK("xin24m"), LINK("gmac_clkin"), LINK("hdmi_phy"), LINK("usb480m_phy"), FRATE(0, "xin12m", 12000000), FRATE(0, "phy_50m_out", 50000000), FRATE(0, "clkin_i2s1", 0), FRATE(0, "clkin_i2s2", 0), /* PLLs */ { .type = RK3328_CLK_PLL, .clk.pll = &apll }, { .type = RK3328_CLK_PLL, .clk.pll = &dpll }, { .type = RK3328_CLK_PLL, .clk.pll = &cpll }, { .type = RK3328_CLK_PLL, .clk.pll = &gpll }, { .type = RK3328_CLK_PLL, .clk.pll = &npll }, { .type = RK_CLK_ARMCLK, .clk.armclk = &armclk, }, /* CRU_CRU_MISC */ MUXRAW(HDMIPHY, "hdmiphy", mux_hdmiphy_p, 0, 0x84, 13, 1), MUXRAW(USB480M, "usb480m", mux_usb480m_p, 0, 0x84, 15, 1), /* CRU_CLKSEL_CON0 */ /* COMP clk_core_div_con core_clk_pll_sel */ COMP(0, "aclk_bus_pre_c", pll_src_cpll_gpll_hdmiphy_p, 0, 0, 8, 5, 13, 2), /* CRU_CLKSEL_CON1 */ /* CDIV clk_core_dbg_div_con */ /* CDIV aclk_core_div_con */ CDIV(0, "hclk_bus_pre_c", "aclk_bus_pre", 0, 1, 8, 2), CDIV(0, "pclk_bus_pre_c", "aclk_bus_pre", 0, 1, 12, 2), /* CRU_CLKSEL_CON2 */ /* CDIV test_div_con */ /* CDIV func_24m_div_con */ /* CRU_CLKSEL_CON3 */ /* COMP ddr_div_cnt ddr_clk_pll_sel */ /* CRU_CLKSEL_CON4 */ COMP(0, "clk_otp_c", pll_src_cpll_gpll_xin24m_p, 0, 4, 0, 6, 6, 2), /* COMP pd_ddr_div_con ddrpdclk_clk_pll_sel */ /* CRU_CLKSEL_CON5 */ COMP(0, "clk_efuse_c", pll_src_cpll_gpll_xin24m_p, 0, 5, 8, 5, 14, 2), /* CRU_CLKSEL_CON6 */ MUX(0, "clk_i2s0_mux", mux_i2s0_p, RK_CLK_MUX_REPARENT, 6, 8, 2), COMP(0, "clk_i2s0_div_c", pll_src_cpll_gpll_p, 0, 6, 0, 7, 15, 1), /* CRU_CLKSEL_CON7 */ FRACT(0, "clk_i2s0_frac_f", "clk_i2s0_div", 0, 7), /* CRU_CLKSEL_CON8 */ MUX(0, "clk_i2s1_mux", mux_i2s1_p, RK_CLK_MUX_REPARENT, 8, 8, 2), COMP(0, "clk_i2s1_div_c", pll_src_cpll_gpll_p, 0, 8, 0, 7, 15, 1), /* MUX i2s1_out_sel */ /* CRU_CLKSEL_CON9 */ FRACT(0, "clk_i2s1_frac_f", "clk_i2s1_div", 0, 9), /* CRU_CLKSEL_CON10 */ MUX(0, "clk_i2s2_mux", mux_i2s2_p, RK_CLK_MUX_REPARENT, 10, 8, 2), COMP(0, "clk_i2s2_div_c", pll_src_cpll_gpll_p, 0, 10, 0, 7, 15, 1), /* MUX i2s2_out_sel */ /* CRU_CLKSEL_CON11 */ FRACT(0, "clk_i2s2_frac_f", "clk_i2s2_div", 0, 11), /* CRU_CLKSEL_CON12 */ MUX(0, "clk_spdif_pll", pll_src_cpll_gpll_p, 0, 12, 15, 1), MUX(SCLK_SPDIF, "clk_spdif", mux_spdif_p, 0, 12, 8, 2), CDIV(0, "clk_spdif_div_c", "clk_spdif_pll", 0, 12, 0, 7), /* CRU_CLKSEL_CON13 */ FRACT(0, "clk_spdif_frac_f", "clk_spdif", 0, 13), /* CRU_CLKSEL_CON14 */ MUX(0, "clk_uart0_pll", pll_src_cpll_gpll_usb480m_p, 0, 14, 12, 2), MUX(SCLK_UART0, "clk_uart0", mux_uart0_p, 0, 14, 8, 2), CDIV(0, "clk_uart0_div_c", "clk_uart0_pll", 0, 14, 0, 7), /* CRU_CLKSEL_CON15 */ FRACT(0, "clk_uart0_frac_f", "clk_uart0_pll", 0, 15), /* CRU_CLKSEL_CON16 */ MUX(0, "clk_uart1_pll", pll_src_cpll_gpll_usb480m_p, 0, 16, 12, 2), MUX(SCLK_UART1, "clk_uart1", mux_uart1_p, 0, 16, 8, 2), CDIV(0, "clk_uart1_div_c", "clk_uart1_pll", 0, 16, 0, 7), /* CRU_CLKSEL_CON17 */ FRACT(0, "clk_uart1_frac_f", "clk_uart1_pll", 0, 17), /* CRU_CLKSEL_CON18 */ MUX(0, "clk_uart2_pll", pll_src_cpll_gpll_usb480m_p, 0, 18, 12, 2), MUX(SCLK_UART2, "clk_uart2", mux_uart2_p, 0, 18, 8, 2), CDIV(0, "clk_uart2_div_c", "clk_uart2_pll", 0, 18, 0, 7), /* CRU_CLKSEL_CON19 */ FRACT(0, "clk_uart2_frac_f", "clk_uart2_pll", 0, 19), /* CRU_CLKSEL_CON20 */ COMP(0, "clk_pdm_c", pll_src_cpll_gpll_apll_p, 0, 20, 8, 5, 14, 2), COMP(0, "clk_crypto_c", pll_src_cpll_gpll_p, 0, 20, 0, 5, 7, 1), /* CRU_CLKSEL_CON21 */ COMP(0, "clk_tsp_c", pll_src_cpll_gpll_p, 0, 21, 8, 5, 15, 1), /* CRU_CLKSEL_CON22 */ CDIV(0, "clk_tsadc_c", "xin24m", 0, 22, 0, 10), /* CRU_CLKSEL_CON23 */ CDIV(0, "clk_saradc_c", "xin24m", 0, 23, 0, 10), /* CRU_CLKSEL_CON24 */ COMP(0, "clk_pwm_c", pll_src_cpll_gpll_p, 0, 24, 8, 7, 15, 1), COMP(0, "clk_spi_c", pll_src_cpll_gpll_p, 0, 24, 0, 7, 7, 1), /* CRU_CLKSEL_CON25 */ COMP(0, "aclk_gmac_c", pll_src_cpll_gpll_p, 0, 35, 0, 5, 6, 2), CDIV(0, "pclk_gmac_c", "pclk_gmac", 0, 25, 8, 3), /* CRU_CLKSEL_CON26 */ CDIV(0, "clk_mac2phy_out_c", "clk_mac2phy", 0, 26, 8, 2), COMP(0, "clk_mac2phy_src_c", pll_src_cpll_gpll_p, 0, 26, 0, 5, 7, 1), /* CRU_CLKSEL_CON27 */ COMP(0, "clk_mac2io_src_c", pll_src_cpll_gpll_p, 0, 27, 0, 5, 7, 1), COMP(0, "clk_mac2io_out_c", pll_src_cpll_gpll_p, 0, 27, 8, 5, 15, 1), /* CRU_CLKSEL_CON28 */ COMP(ACLK_PERI_PRE, "aclk_peri_pre", pll_src_cpll_gpll_hdmiphy_p, 0, 28, 0, 5, 6, 2), /* CRU_CLKSEL_CON29 */ CDIV(0, "pclk_peri_c", "aclk_peri_pre", 0, 29, 0, 2), CDIV(0, "hclk_peri_c", "aclk_peri_pre", 0, 29, 4, 3), /* CRU_CLKSEL_CON30 */ COMP(0, "clk_sdmmc_c", pll_src_cpll_gpll_xin24m_usb480m_p, 0, 30, 0, 8, 8, 2), /* CRU_CLKSEL_CON31 */ COMP(0, "clk_sdio_c", pll_src_cpll_gpll_xin24m_usb480m_p, 0, 31, 0, 8, 8, 2), /* CRU_CLKSEL_CON32 */ COMP(0, "clk_emmc_c", pll_src_cpll_gpll_xin24m_usb480m_p, 0, 32, 0, 8, 8, 2), /* CRU_CLKSEL_CON33 */ COMP(0, "clk_usb3otg_suspend_c", xin24m_rtc32k_p, 0, 33, 0, 10, 15, 1), /* CRU_CLKSEL_CON34 */ COMP(0, "clk_i2c0_c", pll_src_cpll_gpll_p, 0, 34, 0, 7, 7, 1), COMP(0, "clk_i2c1_c", pll_src_cpll_gpll_p, 0, 34, 8, 7, 15, 1), /* CRU_CLKSEL_CON35 */ COMP(0, "clk_i2c2_c", pll_src_cpll_gpll_p, 0, 35, 0, 7, 7, 1), COMP(0, "clk_i2c3_c", pll_src_cpll_gpll_p, 0, 35, 8, 7, 15, 1), /* CRU_CLKSEL_CON36 */ COMP(0, "aclk_rga_pre_c", pll_src_cpll_gpll_hdmiphy_usb480m_p, 0, 36, 8, 5, 14, 2), COMP(0, "sclk_rga_c", pll_src_cpll_gpll_hdmiphy_usb480m_p, 0, 36, 0, 5, 6, 2), /* CRU_CLKSEL_CON37 */ COMP(0, "aclk_vio_pre_c", pll_src_cpll_gpll_hdmiphy_usb480m_p, 0, 37, 0, 5, 6, 2), CDIV(HCLK_VIO_PRE, "hclk_vio_pre", "aclk_vio_pre", 0, 37, 8, 5), /* CRU_CLKSEL_CON38 */ COMP(0, "clk_rtc32k_c", pll_src_cpll_gpll_xin24m_p, 0, 38, 0, 14, 14, 2), /* CRU_CLKSEL_CON39 */ COMP(0, "aclk_vop_pre_c", pll_src_cpll_gpll_hdmiphy_usb480m_p, 0, 39, 0, 5, 6, 2), /* CRU_CLKSEL_CON40 */ COMP(0, "vop_dclk_src_c", pll_src_cpll_gpll_p, 0, 40, 8, 8, 0, 1), CDIV(DCLK_HDMIPHY, "hdmiphy_div", "vop_dclk_src", 0, 40, 3, 3), /* MUX vop_dclk_frac_sel */ MUX(DCLK_LCDC, "vop_dclk", mux_dclk_lcdc_p, 0, 40, 1, 1), /* CRU_CLKSEL_CON41 */ /* FRACT dclk_vop_frac_div_con */ /* CRU_CLKSEL_CON42 */ MUX(0, "clk_cif_pll", pll_src_cpll_gpll_p, 0, 42, 7, 1), COMP(0, "clk_cif_src_c", mux_cif_p, 0, 42, 0, 5, 5, 1), /* CRU_CLKSEL_CON43 */ COMP(0, "clk_sdmmc_ext_c", pll_src_cpll_gpll_xin24m_usb480m_p, 0, 43, 0, 8, 8, 2), /* CRU_CLKSEL_CON44 */ COMP(0, "aclk_gpu_pre_c", pll_src_cpll_gpll_hdmiphy_usb480m_p, 0, 44, 0, 5, 6, 2), /* CRU_CLKSEL_CON45 */ MUX(SCLK_REF_USB3OTG, "clk_ref_usb3otg", mux_ref_usb3otg_p, 0, 45, 8, 1), COMP(0, "clk_ref_usb3otg_src_c", pll_src_cpll_gpll_p, 0, 45, 0, 7, 7, 1), /* CRU_CLKSEL_CON46 */ /* Unused */ /* CRU_CLKSEL_CON47 */ /* Unused */ /* CRU_CLKSEL_CON48 */ COMP(0, "sclk_cabac_c", pll_src_cpll_gpll_hdmiphy_usb480m_p, 0, 48, 8, 5, 14, 2), COMP(0, "aclk_rkvdec_c", pll_src_cpll_gpll_hdmiphy_usb480m_p, 0, 48, 0, 5, 6, 2), /* CRU_CLKSEL_CON49 */ COMP(0, "sclk_vdec_core_c", pll_src_cpll_gpll_hdmiphy_usb480m_p, 0, 49, 0, 5, 6, 2), /* CRU_CLKSEL_CON50 */ COMP(0, "aclk_vpu_pre_c", pll_src_cpll_gpll_hdmiphy_usb480m_p, 0, 50, 0, 5, 6, 2), /* CRU_CLKSEL_CON51 */ COMP(0, "sclk_venc_c", pll_src_cpll_gpll_hdmiphy_usb480m_p, 0, 51, 8, 5, 14, 2), COMP(0, "aclk_rkvenc_c", pll_src_cpll_gpll_hdmiphy_usb480m_p, 0, 51, 0, 5, 6, 2), /* CRU_CLKSEL_CON52 */ COMP(0, "sclk_venc_dsp_c", pll_src_cpll_gpll_hdmiphy_usb480m_p, 0, 51, 8, 5, 14, 2), COMP(0, "sclk_wifi_c", pll_src_cpll_gpll_usb480m_p, 0, 51, 0, 6, 6, 2), /* GRF_SOC_CON4 */ MUXGRF(SCLK_MAC2IO_EXT, "clk_mac2io_ext", mux_mac2io_ext_p, 0, RK3328_GRF_SOC_CON4, 14, 1), /* GRF_MAC_CON1 */ MUXGRF(SCLK_MAC2IO, "clk_mac2io", mux_mac2io_p, 0, RK3328_GRF_MAC_CON1, 10, 1), /* GRF_MAC_CON2 */ MUXGRF(SCLK_MAC2PHY, "clk_mac2phy", mux_mac2phy_p, 0, RK3328_GRF_MAC_CON2, 10, 1), /* * This clock is controlled in the secure world */ FFACT(PCLK_WDT, "pclk_wdt", "pclk_bus", 1, 1), }; static int rk3328_cru_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "rockchip,rk3328-cru")) { device_set_desc(dev, "Rockchip RK3328 Clock and Reset Unit"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int rk3328_cru_attach(device_t dev) { struct rk_cru_softc *sc; sc = device_get_softc(dev); sc->dev = dev; sc->gates = rk3328_gates; sc->ngates = nitems(rk3328_gates); sc->clks = rk3328_clks; sc->nclks = nitems(rk3328_clks); sc->reset_offset = 0x300; sc->reset_num = 184; return (rk_cru_attach(dev)); } static device_method_t rk3328_cru_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk3328_cru_probe), DEVMETHOD(device_attach, rk3328_cru_attach), DEVMETHOD_END }; DEFINE_CLASS_1(rk3328_cru, rk3328_cru_driver, rk3328_cru_methods, sizeof(struct rk_cru_softc), rk_cru_driver); EARLY_DRIVER_MODULE(rk3328_cru, simplebus, rk3328_cru_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/dev/clk/rockchip/rk3399_cru.c b/sys/dev/clk/rockchip/rk3399_cru.c index 45d6592ceb73..c4b2dc0910d8 100644 --- a/sys/dev/clk/rockchip/rk3399_cru.c +++ b/sys/dev/clk/rockchip/rk3399_cru.c @@ -1,1270 +1,1270 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * Copyright (c) 2018 Val Packett * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include -#include -#include +#include +#include +#include #include #include #define CRU_CLKSEL_CON(x) (0x100 + (x) * 0x4) #define CRU_CLKGATE_CON(x) (0x300 + (x) * 0x4) /* GATES */ static struct rk_cru_gate rk3399_gates[] = { /* CRU_CLKGATE_CON0 */ /* 15-8 unused */ GATE(SCLK_PVTM_CORE_L, "clk_pvtm_core_l", "xin24m", 0, 7), GATE(0, "pclk_dbg_core_l", "pclk_dbg_core_l_c", 0, 6), GATE(0, "atclk_core_l", "atclk_core_l_c", 0, 5), GATE(0, "aclkm_core_l", "aclkm_core_l_c", 0, 4), GATE(0, "clk_core_l_gpll_src", "gpll", 0, 3), GATE(0, "clk_core_l_dpll_src", "dpll", 0, 2), GATE(0, "clk_core_l_bpll_src", "bpll", 0, 1), GATE(0, "clk_core_l_lpll_src", "lpll", 0, 0), /* CRU_CLKGATE_CON1 */ /* 15 - 8 unused */ GATE(SCLK_PVTM_CORE_B, "clk_pvtm_core_b", "xin24m", 1, 7), GATE(0, "pclk_dbg_core_b","pclk_dbg_core_b_c", 1, 6), GATE(0, "atclk_core_b", "atclk_core_b_c", 1, 5), GATE(0, "aclkm_core_b", "aclkm_core_b_c", 1, 4), GATE(0, "clk_core_b_gpll_src", "gpll", 1, 3), GATE(0, "clk_core_b_dpll_src", "dpll", 1, 2), GATE(0, "clk_core_b_bpll_src", "bpll", 1, 1), GATE(0, "clk_core_b_lpll_src", "lpll", 1, 0), /* CRU_CLKGATE_CON2 */ /* 15 - 11 unused */ GATE(0, "npll_cs", "npll", 2, 10), GATE(0, "gpll_cs", "gpll", 2, 9), GATE(0, "cpll_cs", "cpll", 2, 8), GATE(SCLK_CCI_TRACE, "clk_cci_trace", "clk_cci_trace_c", 2, 7), GATE(0, "gpll_cci_trace", "gpll", 2, 6), GATE(0, "cpll_cci_trace", "cpll", 2, 5), GATE(0, "aclk_cci_pre", "aclk_cci_pre_c", 2, 4), GATE(0, "vpll_aclk_cci_src", "vpll", 2, 3), GATE(0, "npll_aclk_cci_src", "npll", 2, 2), GATE(0, "gpll_aclk_cci_src", "gpll", 2, 1), GATE(0, "cpll_aclk_cci_src", "cpll", 2, 0), /* CRU_CLKGATE_CON3 */ /* 15 - 8 unused */ GATE(0, "aclk_center", "aclk_center_c", 3, 7), /* 6 unused */ /* 5 unused */ GATE(PCLK_DDR, "pclk_ddr", "pclk_ddr_c", 3, 4), GATE(0, "clk_ddrc_gpll_src", "gpll", 3, 3), GATE(0, "clk_ddrc_dpll_src", "dpll", 3, 2), GATE(0, "clk_ddrc_bpll_src", "bpll", 3, 1), GATE(0, "clk_ddrc_lpll_src", "lpll", 3, 0), /* CRU_CLKGATE_CON4 */ /* 15 - 12 unused */ GATE(SCLK_PVTM_DDR, "clk_pvtm_ddr", "xin24m", 4, 11), GATE(0, "clk_rga_core", "clk_rga_core_c", 4, 10), GATE(0, "hclk_rga_pre", "hclk_rga_pre_c", 4, 9), GATE(0, "aclk_rga_pre", "aclk_rga_pre_c", 4, 8), GATE(0, "hclk_iep_pre", "hclk_iep_pre_c", 4, 7), GATE(0, "aclk_iep_pre", "aclk_iep_pre_c", 4, 6), GATE(SCLK_VDU_CA, "clk_vdu_ca", "clk_vdu_ca_c", 4, 5), GATE(SCLK_VDU_CORE, "clk_vdu_core", "clk_vdu_core_c", 4, 4), GATE(0, "hclk_vdu_pre", "hclk_vdu_pre_c", 4, 3), GATE(0, "aclk_vdu_pre", "aclk_vdu_pre_c", 4, 2), GATE(0, "hclk_vcodec_pre", "hclk_vcodec_pre_c", 4, 1), GATE(0, "aclk_vcodec_pre", "aclk_vcodec_pre_c", 4, 0), /* CRU_CLKGATE_CON5 */ /* 15 - 10 unused */ GATE(SCLK_MAC_TX, "clk_rmii_tx", "clk_rmii_src", 5, 9), GATE(SCLK_MAC_RX, "clk_rmii_rx", "clk_rmii_src", 5, 8), GATE(SCLK_MACREF, "clk_mac_ref", "clk_rmii_src", 5, 7), GATE(SCLK_MACREF_OUT, "clk_mac_refout", "clk_rmii_src", 5, 6), GATE(SCLK_MAC, "clk_gmac", "clk_gmac_c", 5, 5), GATE(PCLK_PERIHP, "pclk_perihp", "pclk_perihp_c", 5, 4), GATE(HCLK_PERIHP, "hclk_perihp", "hclk_perihp_c", 5, 3), GATE(ACLK_PERIHP, "aclk_perihp", "aclk_perihp_c", 5, 2), GATE(0, "cpll_aclk_perihp_src", "cpll", 5, 1), GATE(0, "gpll_aclk_perihp_src", "gpll", 5, 0), /* CRU_CLKGATE_CON6 */ /* 15 unused */ GATE(SCLK_EMMC, "clk_emmc", "clk_emmc_c", 6, 14), GATE(0, "cpll_aclk_emmc_src", "cpll", 6, 13), GATE(0, "gpll_aclk_emmc_src", "gpll", 6, 12), GATE(0, "pclk_gmac_pre", "pclk_gmac_pre_c", 6, 11), GATE(0, "aclk_gmac_pre", "aclk_gmac_pre_c", 6, 10), GATE(0, "cpll_aclk_gmac_src", "cpll", 6, 9), GATE(0, "gpll_aclk_gmac_src", "gpll", 6, 8), /* 7 unused */ GATE(SCLK_USB2PHY1_REF, "clk_usb2phy1_ref", "xin24m", 6, 6), GATE(SCLK_USB2PHY0_REF, "clk_usb2phy0_ref", "xin24m", 6, 5), GATE(SCLK_HSICPHY, "clk_hsicphy", "clk_hsicphy_c", 6, 4), GATE(0, "clk_pcie_core_cru", "clk_pcie_core_cru_c", 6, 3), GATE(SCLK_PCIE_PM, "clk_pcie_pm", "clk_pcie_pm_c", 6, 2), GATE(SCLK_SDMMC, "clk_sdmmc", "clk_sdmmc_c", 6, 1), GATE(SCLK_SDIO, "clk_sdio", "clk_sdio_c", 6, 0), /* CRU_CLKGATE_CON7 */ /* 15 - 10 unused */ GATE(FCLK_CM0S, "fclk_cm0s", "fclk_cm0s_c", 7, 9), GATE(SCLK_CRYPTO1, "clk_crypto1", "clk_crypto1_c", 7, 8), GATE(SCLK_CRYPTO0, "clk_crypto0", "clk_crypto0_c", 7, 7), GATE(0, "cpll_fclk_cm0s_src", "cpll", 7, 6), GATE(0, "gpll_fclk_cm0s_src", "gpll", 7, 5), GATE(PCLK_PERILP0, "pclk_perilp0", "pclk_perilp0_c", 7, 4), GATE(HCLK_PERILP0, "hclk_perilp0", "hclk_perilp0_c", 7, 3), GATE(ACLK_PERILP0, "aclk_perilp0", "aclk_perilp0_c", 7, 2), GATE(0, "cpll_aclk_perilp0_src", "cpll", 7, 1), GATE(0, "gpll_aclk_perilp0_src", "gpll", 7, 0), /* CRU_CLKGATE_CON8 */ GATE(SCLK_SPDIF_8CH, "clk_spdif", "clk_spdif_mux", 8, 15), GATE(0, "clk_spdif_frac", "clk_spdif_frac_c", 8, 14), GATE(0, "clk_spdif_div", "clk_spdif_div_c", 8, 13), GATE(SCLK_I2S_8CH_OUT, "clk_i2sout", "clk_i2sout_c", 8, 12), GATE(SCLK_I2S2_8CH, "clk_i2s2", "clk_i2s2_mux", 8, 11), GATE(0, "clk_i2s2_frac", "clk_i2s2_frac_c", 8, 10), GATE(0, "clk_i2s2_div", "clk_i2s2_div_c", 8, 9), GATE(SCLK_I2S1_8CH, "clk_i2s1", "clk_i2s1_mux", 8, 8), GATE(0, "clk_i2s1_frac", "clk_i2s1_frac_c", 8, 7), GATE(0, "clk_i2s1_div", "clk_i2s1_div_c", 8, 6), GATE(SCLK_I2S0_8CH, "clk_i2s0", "clk_i2s0_mux", 8, 5), GATE(0, "clk_i2s0_frac","clk_i2s0_frac_c", 8, 4), GATE(0, "clk_i2s0_div","clk_i2s0_div_c", 8, 3), GATE(PCLK_PERILP1, "pclk_perilp1", "pclk_perilp1_c", 8, 2), GATE(HCLK_PERILP1, "cpll_hclk_perilp1_src", "cpll", 8, 1), GATE(0, "gpll_hclk_perilp1_src", "gpll", 8, 0), /* CRU_CLKGATE_CON9 */ GATE(SCLK_SPI4, "clk_spi4", "clk_spi4_c", 9, 15), GATE(SCLK_SPI2, "clk_spi2", "clk_spi2_c", 9, 14), GATE(SCLK_SPI1, "clk_spi1", "clk_spi1_c", 9, 13), GATE(SCLK_SPI0, "clk_spi0", "clk_spi0_c", 9, 12), GATE(SCLK_SARADC, "clk_saradc", "clk_saradc_c", 9, 11), GATE(SCLK_TSADC, "clk_tsadc", "clk_tsadc_c", 9, 10), /* 9 - 8 unused */ GATE(0, "clk_uart3_frac", "clk_uart3_frac_c", 9, 7), GATE(0, "clk_uart3_div", "clk_uart3_div_c", 9, 6), GATE(0, "clk_uart2_frac", "clk_uart2_frac_c", 9, 5), GATE(0, "clk_uart2_div", "clk_uart2_div_c", 9, 4), GATE(0, "clk_uart1_frac", "clk_uart1_frac_c", 9, 3), GATE(0, "clk_uart1_div", "clk_uart1_div_c", 9, 2), GATE(0, "clk_uart0_frac", "clk_uart0_frac_c", 9, 1), GATE(0, "clk_uart0_div", "clk_uart0_div_c", 9, 0), /* CRU_CLKGATE_CON10 */ GATE(SCLK_VOP1_PWM, "clk_vop1_pwm", "clk_vop1_pwm_c", 10, 15), GATE(SCLK_VOP0_PWM, "clk_vop0_pwm", "clk_vop0_pwm_c", 10, 14), GATE(DCLK_VOP0_DIV, "dclk_vop0_div", "dclk_vop0_div_c", 10, 12), GATE(DCLK_VOP1_DIV, "dclk_vop1_div", "dclk_vop1_div_c", 10, 13), GATE(0, "hclk_vop1_pre", "hclk_vop1_pre_c", 10, 11), GATE(ACLK_VOP1_PRE, "aclk_vop1_pre", "aclk_vop1_pre_c", 10, 10), GATE(0, "hclk_vop0_pre", "hclk_vop0_pre_c", 10, 9), GATE(ACLK_VOP0_PRE, "aclk_vop0_pre", "aclk_vop0_pre_c", 10, 8), GATE(0, "clk_cifout_src", "clk_cifout_src_c", 10, 7), GATE(SCLK_SPDIF_REC_DPTX, "clk_spdif_rec_dptx", "clk_spdif_rec_dptx_c", 10, 6), GATE(SCLK_I2C7, "clk_i2c7", "clk_i2c7_c", 10, 5), GATE(SCLK_I2C3, "clk_i2c3", "clk_i2c3_c", 10, 4), GATE(SCLK_I2C6, "clk_i2c6", "clk_i2c6_c", 10, 3), GATE(SCLK_I2C2, "clk_i2c2", "clk_i2c2_c", 10, 2), GATE(SCLK_I2C5, "clk_i2c5", "clk_i2c5_c", 10, 1), GATE(SCLK_I2C1, "clk_i2c1", "clk_i2c1_c", 10, 0), /* CRU_CLKGATE_CON11 */ GATE(SCLK_MIPIDPHY_CFG, "clk_mipidphy_cfg", "xin24m", 11, 15), GATE(SCLK_MIPIDPHY_REF, "clk_mipidphy_ref", "xin24m", 11, 14), /* 13-12 unused */ GATE(PCLK_EDP, "pclk_edp", "pclk_edp_c", 11, 11), GATE(PCLK_HDCP, "pclk_hdcp", "pclk_hdcp_c", 11, 10), /* 9 unuwsed */ GATE(SCLK_DP_CORE, "clk_dp_core", "clk_dp_core_c", 11, 8), GATE(SCLK_HDMI_CEC, "clk_hdmi_cec", "clk_hdmi_cec_c", 11, 7), GATE(SCLK_HDMI_SFR, "clk_hdmi_sfr", "xin24m", 11, 6), GATE(SCLK_ISP1, "clk_isp1", "clk_isp1_c", 11, 5), GATE(SCLK_ISP0, "clk_isp0", "clk_isp0_c", 11, 4), GATE(HCLK_HDCP, "hclk_hdcp", "hclk_hdcp_c", 11, 3), GATE(ACLK_HDCP, "aclk_hdcp", "aclk_hdcp_c", 11, 2), GATE(PCLK_VIO, "pclk_vio", "pclk_vio_c", 11, 1), GATE(ACLK_VIO, "aclk_vio", "aclk_vio_c", 11, 0), /* CRU_CLKGATE_CON12 */ /* 15 - 14 unused */ GATE(HCLK_SD, "hclk_sd", "hclk_sd_c", 12, 13), GATE(ACLK_GIC_PRE, "aclk_gic_pre", "aclk_gic_pre_c", 12, 12), GATE(HCLK_ISP1, "hclk_isp1", "hclk_isp1_c", 12, 11), GATE(ACLK_ISP1, "aclk_isp1", "aclk_isp1_c", 12, 10), GATE(HCLK_ISP0, "hclk_isp0", "hclk_isp0_c", 12, 9), GATE(ACLK_ISP0, "aclk_isp0", "aclk_isp0_c", 12, 8), /* 7 unused */ GATE(SCLK_PCIEPHY_REF100M, "clk_pciephy_ref100m", "clk_pciephy_ref100m_c", 12, 6), /* 5 unused */ GATE(SCLK_USB3OTG1_SUSPEND, "clk_usb3otg1_suspend", "clk_usb3otg1_suspend_c", 12, 4), GATE(SCLK_USB3OTG0_SUSPEND, "clk_usb3otg0_suspend", "clk_usb3otg0_suspend_c", 12, 3), GATE(SCLK_USB3OTG1_REF, "clk_usb3otg1_ref", "xin24m", 12, 2), GATE(SCLK_USB3OTG0_REF, "clk_usb3otg0_ref", "xin24m", 12, 1), GATE(ACLK_USB3, "aclk_usb3", "aclk_usb3_c", 12, 0), /* CRU_CLKGATE_CON13 */ GATE(SCLK_TESTCLKOUT2, "clk_testout2", "clk_testout2_c", 13, 15), GATE(SCLK_TESTCLKOUT1, "clk_testout1", "clk_testout1_c", 13, 14), GATE(SCLK_SPI5, "clk_spi5", "clk_spi5_c", 13, 13), GATE(0, "clk_usbphy0_480m_src", "clk_usbphy0_480m", 13, 12), GATE(0, "clk_usbphy1_480m_src", "clk_usbphy1_480m", 13, 12), GATE(0, "clk_test", "clk_test_c", 13, 11), /* 10 unused */ GATE(0, "clk_test_frac", "clk_test_frac_c", 13, 9), /* 8 unused */ GATE(SCLK_UPHY1_TCPDCORE, "clk_uphy1_tcpdcore", "clk_uphy1_tcpdcore_c", 13, 7), GATE(SCLK_UPHY1_TCPDPHY_REF, "clk_uphy1_tcpdphy_ref", "clk_uphy1_tcpdphy_ref_c", 13, 6), GATE(SCLK_UPHY0_TCPDCORE, "clk_uphy0_tcpdcore", "clk_uphy0_tcpdcore_c", 13, 5), GATE(SCLK_UPHY0_TCPDPHY_REF, "clk_uphy0_tcpdphy_ref", "clk_uphy0_tcpdphy_ref_c", 13, 4), /* 3 - 2 unused */ GATE(SCLK_PVTM_GPU, "aclk_pvtm_gpu", "xin24m", 13, 1), GATE(0, "aclk_gpu_pre", "aclk_gpu_pre_c", 13, 0), /* CRU_CLKGATE_CON14 */ /* 15 - 14 unused */ GATE(ACLK_PERF_CORE_L, "aclk_perf_core_l", "aclkm_core_l", 14, 13), GATE(ACLK_CORE_ADB400_CORE_L_2_CCI500, "aclk_core_adb400_core_l_2_cci500", "aclkm_core_l", 14, 12), GATE(ACLK_GIC_ADB400_CORE_L_2_GIC, "aclk_core_adb400_core_l_2_gic", "armclkl", 14, 11), GATE(ACLK_GIC_ADB400_GIC_2_CORE_L, "aclk_core_adb400_gic_2_core_l", "armclkl", 14, 10), GATE(0, "clk_dbg_pd_core_l", "armclkl", 14, 9), /* 8 - 7 unused */ GATE(ACLK_PERF_CORE_B, "aclk_perf_core_b", "aclkm_core_b", 14, 6), GATE(ACLK_CORE_ADB400_CORE_B_2_CCI500, "aclk_core_adb400_core_b_2_cci500", "aclkm_core_b", 14, 5), GATE(ACLK_GIC_ADB400_CORE_B_2_GIC, "aclk_core_adb400_core_b_2_gic", "armclkb", 14, 4), GATE(ACLK_GIC_ADB400_GIC_2_CORE_B, "aclk_core_adb400_gic_2_core_b", "armclkb", 14, 3), GATE(0, "pclk_dbg_cxcs_pd_core_b", "pclk_dbg_core_b", 14, 2), GATE(0, "clk_dbg_pd_core_b", "armclkb", 14, 1), /* 0 unused */ /* CRU_CLKGATE_CON15 */ /* 15 - 8 unused */ GATE(ACLK_CCI_GRF, "aclk_cci_grf", "aclk_cci_pre", 15, 7), GATE(0, "clk_dbg_noc", "clk_cs", 15, 6), GATE(0, "clk_dbg_cxcs", "clk_cs", 15, 5), GATE(ACLK_CCI_NOC1, "aclk_cci_noc1", "aclk_cci_pre", 15, 4), GATE(ACLK_CCI_NOC0, "aclk_cci_noc0", "aclk_cci_pre", 15, 3), GATE(ACLK_CCI, "aclk_cci", "aclk_cci_pre", 15, 2), GATE(ACLK_ADB400M_PD_CORE_B, "aclk_adb400m_pd_core_b", "aclk_cci_pre", 15, 1), GATE(ACLK_ADB400M_PD_CORE_L, "aclk_adb400m_pd_core_l", "aclk_cci_pre", 15, 0), /* CRU_CLKGATE_CON16 */ /* 15 - 12 unused */ GATE(HCLK_RGA_NOC, "hclk_rga_noc", "hclk_rga_pre", 16, 11), GATE(HCLK_RGA, "hclk_rga", "hclk_rga_pre", 16, 10), GATE(ACLK_RGA_NOC, "aclk_rga_noc", "aclk_rga_pre", 16, 9), GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 16, 8), /* 7 - 4 unused */ GATE(HCLK_IEP_NOC, "hclk_iep_noc", "hclk_iep_pre", 16, 3), GATE(HCLK_IEP, "hclk_iep", "hclk_iep_pre", 16, 2), GATE(ACLK_IEP_NOC, "aclk_iep_noc", "aclk_iep_pre", 16, 1), GATE(ACLK_IEP, "aclk_iep", "aclk_iep_pre", 16, 0), /* CRU_CLKGATE_CON17 */ /* 15 - 12 unused */ GATE(HCLK_VDU_NOC, "hclk_vdu_noc", "hclk_vdu_pre", 17, 11), GATE(HCLK_VDU, "hclk_vdu", "hclk_vdu_pre", 17, 10), GATE(ACLK_VDU_NOC, "aclk_vdu_noc", "aclk_vdu_pre", 17, 9), GATE(ACLK_VDU, "aclk_vdu", "aclk_vdu_pre", 17, 8), GATE(0, "hclk_vcodec_noc", "hclk_vcodec_pre", 17, 3), GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_vcodec_pre", 17, 2), GATE(0, "aclk_vcodec_noc", "aclk_vcodec_pre", 17, 1), GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 17, 0), /* CRU_CLKGATE_CON18 */ GATE(PCLK_CIC, "pclk_cic", "pclk_ddr", 18, 15), GATE(0, "clk_ddr_mon_timer", "xin24m", 18, 14), GATE(0, "clk_ddr_mon", "clk_ddrc_div2", 18, 13), GATE(PCLK_DDR_MON, "pclk_ddr_mon", "pclk_ddr", 18, 12), GATE(0, "clk_ddr_cic", "clk_ddrc_div2", 18, 11), GATE(PCLK_CENTER_MAIN_NOC, "pclk_center_main_noc", "pclk_ddr", 18, 10), GATE(0, "clk_ddrcfg_msch1", "clk_ddrc_div2", 18, 9), GATE(0, "clk_ddrphy1", "clk_ddrc_div2", 18, 8), GATE(0, "clk_ddrphy_ctrl1", "clk_ddrc_div2", 18, 7), GATE(0, "clk_ddrc1", "clk_ddrc_div2", 18, 6), GATE(0, "clk_ddr1_msch", "clk_ddrc_div2", 18, 5), GATE(0, "clk_ddrcfg_msch0", "clk_ddrc_div2", 18, 4), GATE(0, "clk_ddrphy0", "clk_ddrc_div2", 18, 3), GATE(0, "clk_ddrphy_ctrl0", "clk_ddrc_div2", 18, 2), GATE(0, "clk_ddrc0", "clk_ddrc_div2", 18, 1), /* CRU_CLKGATE_CON19 */ /* 15 - 3 unused */ GATE(PCLK_DDR_SGRF, "pclk_ddr_sgrf", "pclk_ddr", 19, 2), GATE(ACLK_CENTER_PERI_NOC, "aclk_center_peri_noc", "aclk_center", 19, 1), GATE(ACLK_CENTER_MAIN_NOC, "aclk_center_main_noc", "aclk_center", 19, 0), /* CRU_CLKGATE_CON20 */ GATE(0, "hclk_ahb1tom", "hclk_perihp", 20, 15), GATE(0, "pclk_perihp_noc", "pclk_perihp", 20, 14), GATE(0, "hclk_perihp_noc", "hclk_perihp", 20, 13), GATE(0, "aclk_perihp_noc", "aclk_perihp", 20, 12), GATE(PCLK_PCIE, "pclk_pcie", "pclk_perihp", 20, 11), GATE(ACLK_PCIE, "aclk_pcie", "aclk_perihp", 20, 10), GATE(HCLK_HSIC, "hclk_hsic", "hclk_perihp", 20, 9), GATE(HCLK_HOST1_ARB, "hclk_host1_arb", "hclk_perihp", 20, 8), GATE(HCLK_HOST1, "hclk_host1", "hclk_perihp", 20, 7), GATE(HCLK_HOST0_ARB, "hclk_host0_arb", "hclk_perihp", 20, 6), GATE(HCLK_HOST0, "hclk_host0", "hclk_perihp", 20, 5), GATE(PCLK_PERIHP_GRF, "pclk_perihp_grf", "pclk_perihp", 20, 4), GATE(ACLK_PERF_PCIE, "aclk_perf_pcie", "aclk_perihp", 20, 2), /* 1 - 0 unused */ /* CRU_CLKGATE_CON21 */ /* 15 - 10 unused */ GATE(PCLK_UPHY1_TCPD_G, "pclk_uphy1_tcpd_g", "pclk_alive", 21, 9), GATE(PCLK_UPHY1_TCPHY_G, "pclk_uphy1_tcphy_g", "pclk_alive", 21, 8), /* 7 unused */ GATE(PCLK_UPHY0_TCPD_G, "pclk_uphy0_tcpd_g", "pclk_alive", 21, 6), GATE(PCLK_UPHY0_TCPHY_G, "pclk_uphy0_tcphy_g", "pclk_alive", 21, 5), GATE(PCLK_USBPHY_MUX_G, "pclk_usbphy_mux_g", "pclk_alive", 21, 4), GATE(SCLK_DPHY_RX0_CFG, "clk_dphy_rx0_cfg", "clk_mipidphy_cfg", 21, 3), GATE(SCLK_DPHY_TX1RX1_CFG, "clk_dphy_tx1rx1_cfg", "clk_mipidphy_cfg", 21, 2), GATE(SCLK_DPHY_TX0_CFG, "clk_dphy_tx0_cfg", "clk_mipidphy_cfg", 21, 1), GATE(SCLK_DPHY_PLL, "clk_dphy_pll", "clk_mipidphy_ref", 21, 0), /* CRU_CLKGATE_CON22 */ GATE(PCLK_EFUSE1024S, "pclk_efuse1024s", "pclk_perilp1", 22, 15), GATE(PCLK_EFUSE1024NS, "pclk_efuse1024ns", "pclk_perilp1", 22, 14), GATE(PCLK_TSADC, "pclk_tsadc", "pclk_perilp1", 22, 13), GATE(PCLK_SARADC, "pclk_saradc", "pclk_perilp1", 22, 12), GATE(PCLK_MAILBOX0, "pclk_mailbox0", "pclk_perilp1", 22, 11), GATE(PCLK_I2C3, "pclk_i2c3", "pclk_perilp1", 22, 10), GATE(PCLK_I2C2, "pclk_i2c2", "pclk_perilp1", 22, 9), GATE(PCLK_I2C6, "pclk_i2c6", "pclk_perilp1", 22, 8), GATE(PCLK_I2C5, "pclk_i2c5", "pclk_perilp1", 22, 7), GATE(PCLK_I2C1, "pclk_i2c1", "pclk_perilp1", 22, 6), GATE(PCLK_I2C7, "pclk_i2c7", "pclk_perilp1", 22, 5), GATE(PCLK_UART3, "pclk_uart3", "pclk_perilp1", 22, 3), GATE(PCLK_UART2, "pclk_uart2", "pclk_perilp1", 22, 2), GATE(PCLK_UART1, "pclk_uart1", "pclk_perilp1", 22, 1), GATE(PCLK_UART0, "pclk_uart0", "pclk_perilp1", 22, 0), /* CRU_CLKGATE_CON23 */ /* 15 - 14 unused */ GATE(PCLK_SPI4, "pclk_spi4", "pclk_perilp1", 23, 13), GATE(PCLK_SPI2, "pclk_spi2", "pclk_perilp1", 23, 12), GATE(PCLK_SPI1, "pclk_spi1", "pclk_perilp1", 23, 11), GATE(PCLK_SPI0, "pclk_spi0", "pclk_perilp1", 23, 10), GATE(PCLK_DCF, "pclk_dcf", "pclk_perilp0", 23, 9), GATE(ACLK_DCF, "aclk_dcf", "aclk_perilp0", 23, 8), GATE(SCLK_INTMEM5, "clk_intmem5", "aclk_perilp0", 23, 7), GATE(SCLK_INTMEM4, "clk_intmem4", "aclk_perilp0", 23, 6), GATE(SCLK_INTMEM3, "clk_intmem3", "aclk_perilp0", 23, 5), GATE(SCLK_INTMEM2, "clk_intmem2", "aclk_perilp0", 23, 4), GATE(SCLK_INTMEM1, "clk_intmem1", "aclk_perilp0", 23, 3), GATE(SCLK_INTMEM0, "clk_intmem0", "aclk_perilp0", 23, 2), GATE(ACLK_TZMA, "aclk_tzma", "aclk_perilp0", 23, 1), GATE(ACLK_INTMEM, "aclk_intmem", "aclk_perilp0", 23, 0), /* CRU_CLKGATE_CON24 */ GATE(HCLK_S_CRYPTO1, "hclk_s_crypto1", "hclk_perilp0", 24, 15), GATE(HCLK_M_CRYPTO1, "hclk_m_crypto1", "hclk_perilp0", 24, 14), GATE(PCLK_PERIHP_GRF, "pclk_perilp_sgrf", "pclk_perilp1", 24, 13), GATE(SCLK_M0_PERILP_DEC, "clk_m0_perilp_dec", "fclk_cm0s", 24, 11), GATE(DCLK_M0_PERILP, "dclk_m0_perilp", "fclk_cm0s", 24, 10), GATE(HCLK_M0_PERILP, "hclk_m0_perilp", "fclk_cm0s", 24, 9), GATE(SCLK_M0_PERILP, "sclk_m0_perilp", "fclk_cm0s", 24, 8), /* 7 - unused */ GATE(HCLK_S_CRYPTO0, "hclk_s_crypto0", "hclk_perilp0", 24, 6), GATE(HCLK_M_CRYPTO0, "hclk_m_crypto0", "hclk_perilp0", 24, 5), GATE(HCLK_ROM, "hclk_rom", "hclk_perilp0", 24, 4), /* 3 - 0 unused */ /* CRU_CLKGATE_CON25 */ /* 15 - 13 unused */ GATE(0, "hclk_sdio_noc", "hclk_perilp1", 25, 12), GATE(HCLK_M0_PERILP_NOC, "hclk_m0_perilp_noc", "fclk_cm0s", 25, 11), GATE(0, "pclk_perilp1_noc", "pclk_perilp1", 25, 10), GATE(0, "hclk_perilp1_noc", "hclk_perilp1", 25, 9), GATE(HCLK_PERILP0_NOC, "hclk_perilp0_noc", "hclk_perilp0", 25, 8), GATE(ACLK_PERILP0_NOC, "aclk_perilp0_noc", "aclk_perilp0", 25, 7), GATE(ACLK_DMAC1_PERILP, "aclk_dmac1_perilp", "aclk_perilp0", 25, 6), GATE(ACLK_DMAC0_PERILP, "aclk_dmac0_perilp", "aclk_perilp0", 25, 5), /* 4 - 0 unused */ /* CRU_CLKGATE_CON26 */ /* 15 - 12 unused */ GATE(SCLK_TIMER11, "clk_timer11", "xin24m", 26, 11), GATE(SCLK_TIMER10, "clk_timer10", "xin24m", 26, 10), GATE(SCLK_TIMER09, "clk_timer09", "xin24m", 26, 9), GATE(SCLK_TIMER08, "clk_timer08", "xin24m", 26, 8), GATE(SCLK_TIMER07, "clk_timer07", "xin24m", 26, 7), GATE(SCLK_TIMER06, "clk_timer06", "xin24m", 26, 6), GATE(SCLK_TIMER05, "clk_timer05", "xin24m", 26, 5), GATE(SCLK_TIMER04, "clk_timer04", "xin24m", 26, 4), GATE(SCLK_TIMER03, "clk_timer03", "xin24m", 26, 3), GATE(SCLK_TIMER02, "clk_timer02", "xin24m", 26, 2), GATE(SCLK_TIMER01, "clk_timer01", "xin24m", 26, 1), GATE(SCLK_TIMER00, "clk_timer00", "xin24m", 26, 0), /* CRU_CLKGATE_CON27 */ /* 15 - 9 unused */ GATE(ACLK_ISP1_WRAPPER, "aclk_isp1_wrapper", "hclk_isp1", 27, 8), GATE(HCLK_ISP1_WRAPPER, "hclk_isp1_wrapper", "aclk_isp0", 27, 7), GATE(PCLK_ISP1_WRAPPER, "pclkin_isp1_wrapper", "pclkin_cif", 27, 6), GATE(ACLK_ISP0_WRAPPER, "aclk_isp0_wrapper", "aclk_isp0", 27, 5), GATE(HCLK_ISP0_WRAPPER, "hclk_isp0_wrapper", "hclk_isp0", 27, 4), GATE(ACLK_ISP1_NOC, "aclk_isp1_noc", "aclk_isp1", 27, 3), GATE(HCLK_ISP1_NOC, "hclk_isp1_noc", "hclk_isp1", 27, 2), GATE(ACLK_ISP0_NOC, "aclk_isp0_noc", "aclk_isp0", 27, 1), GATE(HCLK_ISP0_NOC, "hclk_isp0_noc", "hclk_isp0", 27, 0), /* CRU_CLKGATE_CON28 */ /* 15 - 8 unused */ GATE(ACLK_VOP1, "aclk_vop1", "aclk_vop1_pre", 28, 7), GATE(HCLK_VOP1, "hclk_vop1", "hclk_vop1_pre", 28, 6), GATE(ACLK_VOP1_NOC, "aclk_vop1_noc", "aclk_vop1_pre", 28, 5), GATE(HCLK_VOP1_NOC, "hclk_vop1_noc", "hclk_vop1_pre", 28, 4), GATE(ACLK_VOP0, "aclk_vop0", "aclk_vop0_pre", 28, 3), GATE(HCLK_VOP0, "hclk_vop0", "hclk_vop0_pre", 28, 2), GATE(ACLK_VOP0_NOC, "aclk_vop0_noc", "aclk_vop0_pre", 28, 1), GATE(HCLK_VOP0_NOC, "hclk_vop0_noc", "hclk_vop0_pre", 28, 0), /* CRU_CLKGATE_CON29 */ /* 15 - 13 unused */ GATE(PCLK_VIO_GRF, "pclk_vio_grf", "pclk_vio", 29, 12), GATE(PCLK_GASKET, "pclk_gasket", "pclk_hdcp", 29, 11), GATE(ACLK_HDCP22, "aclk_hdcp22", "aclk_hdcp", 29, 10), GATE(HCLK_HDCP22, "hclk_hdcp22", "hclk_hdcp", 29, 9), GATE(PCLK_HDCP22, "pclk_hdcp22", "pclk_hdcp", 29, 8), GATE(PCLK_DP_CTRL, "pclk_dp_ctrl", "pclk_hdcp", 29, 7), GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "pclk_hdcp", 29, 6), GATE(HCLK_HDCP_NOC, "hclk_hdcp_noc", "hclk_hdcp", 29, 5), GATE(ACLK_HDCP_NOC, "aclk_hdcp_noc", "aclk_hdcp", 29, 4), GATE(PCLK_HDCP_NOC, "pclk_hdcp_noc", "pclk_hdcp", 29, 3), GATE(PCLK_MIPI_DSI1, "pclk_mipi_dsi1", "pclk_vio", 29, 2), GATE(PCLK_MIPI_DSI0, "pclk_mipi_dsi0", "pclk_vio", 29, 1), GATE(ACLK_VIO_NOC, "aclk_vio_noc", "aclk_vio", 29, 0), /* CRU_CLKGATE_CON30 */ /* 15 - 12 unused */ GATE(ACLK_GPU_GRF, "aclk_gpu_grf", "aclk_gpu_pre", 30, 11), GATE(ACLK_PERF_GPU, "aclk_perf_gpu", "aclk_gpu_pre", 30, 10), /* 9 unused */ GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 30, 8), /* 7 - 5 unused */ GATE(ACLK_USB3_GRF, "aclk_usb3_grf", "aclk_usb3", 30, 4), GATE(ACLK_USB3_RKSOC_AXI_PERF, "aclk_usb3_rksoc_axi_perf", "aclk_usb3", 30, 3), GATE(ACLK_USB3OTG1, "aclk_usb3otg1", "aclk_usb3", 30, 2), GATE(ACLK_USB3OTG0, "aclk_usb3otg0", "aclk_usb3", 30, 1), GATE(ACLK_USB3_NOC, "aclk_usb3_noc", "aclk_usb3", 30, 0), /* CRU_CLKGATE_CON31 */ /* 15 - 11 unused */ GATE(PCLK_SGRF, "pclk_sgrf", "pclk_alive", 31, 10), GATE(PCLK_PMU_INTR_ARB, "pclk_pmu_intr_arb", "pclk_alive", 31, 9), GATE(PCLK_HSICPHY, "pclk_hsicphy", "pclk_perihp", 31, 8), GATE(PCLK_TIMER1, "pclk_timer1", "pclk_alive", 31, 7), GATE(PCLK_TIMER0, "pclk_timer0", "pclk_alive", 31, 6), GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_alive", 31, 5), GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_alive", 31, 4), GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_alive", 31, 3), GATE(PCLK_INTR_ARB, "pclk_intr_arb", "pclk_alive", 31, 2), GATE(PCLK_GRF, "pclk_grf", "pclk_alive", 31, 1), /* 0 unused */ /* CRU_CLKGATE_CON32 */ /* 15 - 14 unused */ GATE(PCLK_EDP_CTRL, "pclk_edp_ctrl", "pclk_edp", 32, 13), GATE(PCLK_EDP_NOC, "pclk_edp_noc", "pclk_edp", 32, 12), /* 11 unused */ GATE(ACLK_EMMC_GRF, "aclk_emmcgrf", "aclk_emmc", 32, 10), GATE(ACLK_EMMC_NOC, "aclk_emmc_noc", "aclk_emmc", 32, 9), GATE(ACLK_EMMC_CORE, "aclk_emmccore", "aclk_emmc", 32, 8), /* 7 - 5 unused */ GATE(ACLK_PERF_GMAC, "aclk_perf_gmac", "aclk_gmac_pre", 32, 4), GATE(PCLK_GMAC_NOC, "pclk_gmac_noc", "pclk_gmac_pre", 32, 3), GATE(PCLK_GMAC, "pclk_gmac", "pclk_gmac_pre", 32, 2), GATE(ACLK_GMAC_NOC, "aclk_gmac_noc", "aclk_gmac_pre", 32, 1), GATE(ACLK_GMAC, "aclk_gmac", "aclk_gmac_pre", 32, 0), /* CRU_CLKGATE_CON33 */ /* 15 - 10 unused */ GATE(0, "hclk_sdmmc_noc", "hclk_sd", 33, 9), GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_sd", 33, 8), GATE(ACLK_GIC_ADB400_GIC_2_CORE_B, "aclk_gic_adb400_gic_2_core_b", "aclk_gic_pre", 33, 5), GATE(ACLK_GIC_ADB400_GIC_2_CORE_L, "aclk_gic_adb400_gic_2_core_l", "aclk_gic_pre", 33, 4), GATE(ACLK_GIC_ADB400_CORE_B_2_GIC, "aclk_gic_adb400_core_b_2_gic", "aclk_gic_pre", 33, 3), GATE(ACLK_GIC_ADB400_CORE_L_2_GIC, "aclk_gic_adb400_core_l_2_gic", "aclk_gic_pre", 33, 2), GATE(ACLK_GIC_NOC, "aclk_gic_noc", "aclk_gic_pre", 33, 1), GATE(ACLK_GIC, "aclk_gic", "aclk_gic_pre", 33, 0), /* CRU_CLKGATE_CON34 */ /* 15 - 7 unused */ GATE(0, "hclk_sdioaudio_noc", "hclk_perilp1", 34, 6), GATE(PCLK_SPI5, "pclk_spi5", "hclk_perilp1", 34, 5), GATE(HCLK_SDIO, "hclk_sdio", "hclk_perilp1", 34, 4), GATE(HCLK_SPDIF, "hclk_spdif", "hclk_perilp1", 34, 3), GATE(HCLK_I2S2_8CH, "hclk_i2s2", "hclk_perilp1", 34, 2), GATE(HCLK_I2S1_8CH, "hclk_i2s1", "hclk_perilp1", 34, 1), GATE(HCLK_I2S0_8CH, "hclk_i2s0", "hclk_perilp1", 34, 0), }; #define PLL_RATE(_hz, _ref, _fb, _post1, _post2, _dspd) \ { \ .freq = _hz, \ .refdiv = _ref, \ .fbdiv = _fb, \ .postdiv1 = _post1, \ .postdiv2 = _post2, \ .dsmpd = _dspd, \ } static struct rk_clk_pll_rate rk3399_pll_rates[] = { /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */ PLL_RATE(2208000000, 1, 92, 1, 1, 1), PLL_RATE(2184000000, 1, 91, 1, 1, 1), PLL_RATE(2160000000, 1, 90, 1, 1, 1), PLL_RATE(2136000000, 1, 89, 1, 1, 1), PLL_RATE(2112000000, 1, 88, 1, 1, 1), PLL_RATE(2088000000, 1, 87, 1, 1, 1), PLL_RATE(2064000000, 1, 86, 1, 1, 1), PLL_RATE(2040000000, 1, 85, 1, 1, 1), PLL_RATE(2016000000, 1, 84, 1, 1, 1), PLL_RATE(1992000000, 1, 83, 1, 1, 1), PLL_RATE(1968000000, 1, 82, 1, 1, 1), PLL_RATE(1944000000, 1, 81, 1, 1, 1), PLL_RATE(1920000000, 1, 80, 1, 1, 1), PLL_RATE(1896000000, 1, 79, 1, 1, 1), PLL_RATE(1872000000, 1, 78, 1, 1, 1), PLL_RATE(1848000000, 1, 77, 1, 1, 1), PLL_RATE(1824000000, 1, 76, 1, 1, 1), PLL_RATE(1800000000, 1, 75, 1, 1, 1), PLL_RATE(1776000000, 1, 74, 1, 1, 1), PLL_RATE(1752000000, 1, 73, 1, 1, 1), PLL_RATE(1728000000, 1, 72, 1, 1, 1), PLL_RATE(1704000000, 1, 71, 1, 1, 1), PLL_RATE(1680000000, 1, 70, 1, 1, 1), PLL_RATE(1656000000, 1, 69, 1, 1, 1), PLL_RATE(1632000000, 1, 68, 1, 1, 1), PLL_RATE(1608000000, 1, 67, 1, 1, 1), PLL_RATE(1600000000, 3, 200, 1, 1, 1), PLL_RATE(1584000000, 1, 66, 1, 1, 1), PLL_RATE(1560000000, 1, 65, 1, 1, 1), PLL_RATE(1536000000, 1, 64, 1, 1, 1), PLL_RATE(1512000000, 1, 63, 1, 1, 1), PLL_RATE(1488000000, 1, 62, 1, 1, 1), PLL_RATE(1464000000, 1, 61, 1, 1, 1), PLL_RATE(1440000000, 1, 60, 1, 1, 1), PLL_RATE(1416000000, 1, 59, 1, 1, 1), PLL_RATE(1392000000, 1, 58, 1, 1, 1), PLL_RATE(1368000000, 1, 57, 1, 1, 1), PLL_RATE(1344000000, 1, 56, 1, 1, 1), PLL_RATE(1320000000, 1, 55, 1, 1, 1), PLL_RATE(1296000000, 1, 54, 1, 1, 1), PLL_RATE(1272000000, 1, 53, 1, 1, 1), PLL_RATE(1248000000, 1, 52, 1, 1, 1), PLL_RATE(1200000000, 1, 50, 1, 1, 1), PLL_RATE(1188000000, 2, 99, 1, 1, 1), PLL_RATE(1104000000, 1, 46, 1, 1, 1), PLL_RATE(1100000000, 12, 550, 1, 1, 1), PLL_RATE(1008000000, 1, 84, 2, 1, 1), PLL_RATE(1000000000, 1, 125, 3, 1, 1), PLL_RATE( 984000000, 1, 82, 2, 1, 1), PLL_RATE( 960000000, 1, 80, 2, 1, 1), PLL_RATE( 936000000, 1, 78, 2, 1, 1), PLL_RATE( 912000000, 1, 76, 2, 1, 1), PLL_RATE( 900000000, 4, 300, 2, 1, 1), PLL_RATE( 888000000, 1, 74, 2, 1, 1), PLL_RATE( 864000000, 1, 72, 2, 1, 1), PLL_RATE( 840000000, 1, 70, 2, 1, 1), PLL_RATE( 816000000, 1, 68, 2, 1, 1), PLL_RATE( 800000000, 1, 100, 3, 1, 1), PLL_RATE( 700000000, 6, 350, 2, 1, 1), PLL_RATE( 696000000, 1, 58, 2, 1, 1), PLL_RATE( 676000000, 3, 169, 2, 1, 1), PLL_RATE( 600000000, 1, 75, 3, 1, 1), PLL_RATE( 594000000, 1, 99, 4, 1, 1), PLL_RATE( 533250000, 8, 711, 4, 1, 1), PLL_RATE( 504000000, 1, 63, 3, 1, 1), PLL_RATE( 500000000, 6, 250, 2, 1, 1), PLL_RATE( 408000000, 1, 68, 2, 2, 1), PLL_RATE( 312000000, 1, 52, 2, 2, 1), PLL_RATE( 297000000, 1, 99, 4, 2, 1), PLL_RATE( 216000000, 1, 72, 4, 2, 1), PLL_RATE( 148500000, 1, 99, 4, 4, 1), PLL_RATE( 106500000, 1, 71, 4, 4, 1), PLL_RATE( 96000000, 1, 64, 4, 4, 1), PLL_RATE( 74250000, 2, 99, 4, 4, 1), PLL_RATE( 65000000, 1, 65, 6, 4, 1), PLL_RATE( 54000000, 1, 54, 6, 4, 1), PLL_RATE( 27000000, 1, 27, 6, 4, 1), {}, }; static struct rk_clk_armclk_rates rk3399_cpu_l_rates[] = { {1800000000, 1}, {1704000000, 1}, {1608000000, 1}, {1512000000, 1}, {1488000000, 1}, {1416000000, 1}, {1200000000, 1}, {1008000000, 1}, { 816000000, 1}, { 696000000, 1}, { 600000000, 1}, { 408000000, 1}, { 312000000, 1}, { 216000000, 1}, { 96000000, 1}, }; static struct rk_clk_armclk_rates rk3399_cpu_b_rates[] = { {2208000000, 1}, {2184000000, 1}, {2088000000, 1}, {2040000000, 1}, {2016000000, 1}, {1992000000, 1}, {1896000000, 1}, {1800000000, 1}, {1704000000, 1}, {1608000000, 1}, {1512000000, 1}, {1488000000, 1}, {1416000000, 1}, {1200000000, 1}, {1008000000, 1}, { 816000000, 1}, { 696000000, 1}, { 600000000, 1}, { 408000000, 1}, { 312000000, 1}, { 216000000, 1}, { 96000000, 1}, }; /* Standard PLL. */ #define PLL(_id, _name, _base) \ { \ .type = RK3399_CLK_PLL, \ .clk.pll = &(struct rk_clk_pll_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = pll_src_p, \ .clkdef.parent_cnt = nitems(pll_src_p), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .base_offset = _base, \ .rates = rk3399_pll_rates, \ }, \ } PLIST(pll_src_p) = {"xin24m", "xin32k"}; PLIST(armclkl_p) = {"clk_core_l_lpll_src", "clk_core_l_bpll_src", "clk_core_l_dpll_src", "clk_core_l_gpll_src"}; PLIST(armclkb_p) = {"clk_core_b_lpll_src", "clk_core_b_bpll_src", "clk_core_b_dpll_src", "clk_core_b_gpll_src"}; PLIST(ddrclk_p) = {"clk_ddrc_lpll_src", "clk_ddrc_bpll_src", "clk_ddrc_dpll_src", "clk_ddrc_gpll_src"}; PLIST(pll_src_cpll_gpll_p) = {"cpll", "gpll"}; PLIST(pll_src_cpll_gpll_ppll_p) = {"cpll", "gpll", "ppll"}; PLIST(pll_src_cpll_gpll_upll_p) = {"cpll", "gpll", "upll"}; PLIST(pll_src_npll_cpll_gpll_p) = {"npll", "cpll", "gpll"}; PLIST(pll_src_cpll_gpll_npll_npll_p) = {"cpll", "gpll", "npll", "npll"}; PLIST(pll_src_cpll_gpll_npll_ppll_p) = {"cpll", "gpll", "npll", "ppll" }; PLIST(pll_src_cpll_gpll_npll_24m_p) = {"cpll", "gpll", "npll", "xin24m" }; PLIST(pll_src_cpll_gpll_npll_usbphy480m_p)= {"cpll", "gpll", "npll", "clk_usbphy_480m" }; PLIST(pll_src_ppll_cpll_gpll_npll_upll_p) = { "ppll", "cpll", "gpll", "npll", "upll" }; PLIST(pll_src_cpll_gpll_npll_upll_24m_p)= { "cpll", "gpll", "npll", "upll", "xin24m" }; PLIST(pll_src_cpll_gpll_npll_ppll_upll_24m_p) = { "cpll", "gpll", "npll", "ppll", "upll", "xin24m" }; PLIST(pll_src_vpll_cpll_gpll_gpll_p) = {"vpll", "cpll", "gpll", "gpll"}; PLIST(pll_src_vpll_cpll_gpll_npll_p) = {"vpll", "cpll", "gpll", "npll"}; PLIST(aclk_cci_p) = {"cpll_aclk_cci_src", "gpll_aclk_cci_src", "npll_aclk_cci_src", "vpll_aclk_cci_src"}; PLIST(cci_trace_p) = {"cpll_cci_trace","gpll_cci_trace"}; PLIST(cs_p)= {"cpll_cs", "gpll_cs", "npll_cs","npll_cs"}; PLIST(aclk_perihp_p)= {"cpll_aclk_perihp_src", "gpll_aclk_perihp_src" }; PLIST(dclk_vop0_p) = {"dclk_vop0_div", "dclk_vop0_frac"}; PLIST(dclk_vop1_p)= {"dclk_vop1_div", "dclk_vop1_frac"}; PLIST(clk_cif_p) = {"clk_cifout_src", "xin24m"}; PLIST(pll_src_24m_usbphy480m_p) = { "xin24m", "clk_usbphy_480m"}; PLIST(pll_src_24m_pciephy_p) = { "xin24m", "clk_pciephy_ref100m"}; PLIST(pll_src_24m_32k_cpll_gpll_p)= {"xin24m", "xin32k", "cpll", "gpll"}; PLIST(pciecore_cru_phy_p) = {"clk_pcie_core_cru", "clk_pcie_core_phy"}; PLIST(aclk_emmc_p) = { "cpll_aclk_emmc_src", "gpll_aclk_emmc_src"}; PLIST(aclk_perilp0_p) = { "cpll_aclk_perilp0_src", "gpll_aclk_perilp0_src" }; PLIST(fclk_cm0s_p) = { "cpll_fclk_cm0s_src", "gpll_fclk_cm0s_src" }; PLIST(hclk_perilp1_p) = { "cpll_hclk_perilp1_src", "gpll_hclk_perilp1_src" }; PLIST(clk_testout1_p) = { "clk_testout1_pll_src", "xin24m" }; PLIST(clk_testout2_p) = { "clk_testout2_pll_src", "xin24m" }; PLIST(usbphy_480m_p) = { "clk_usbphy0_480m_src", "clk_usbphy1_480m_src" }; PLIST(aclk_gmac_p) = { "cpll_aclk_gmac_src", "gpll_aclk_gmac_src" }; PLIST(rmii_p) = { "clk_gmac", "clkin_gmac" }; PLIST(spdif_p) = { "clk_spdif_div", "clk_spdif_frac", "clkin_i2s", "xin12m" }; PLIST(i2s0_p) = { "clk_i2s0_div", "clk_i2s0_frac", "clkin_i2s", "xin12m" }; PLIST(i2s1_p) = { "clk_i2s1_div", "clk_i2s1_frac", "clkin_i2s", "xin12m" }; PLIST(i2s2_p) = { "clk_i2s2_div", "clk_i2s2_frac", "clkin_i2s", "xin12m" }; PLIST(i2sch_p) = {"clk_i2s0", "clk_i2s1", "clk_i2s2"}; PLIST(i2sout_p) = {"clk_i2sout_src", "xin12m"}; PLIST(uart0_p)= {"clk_uart0_div", "clk_uart0_frac", "xin24m"}; PLIST(uart1_p)= {"clk_uart1_div", "clk_uart1_frac", "xin24m"}; PLIST(uart2_p)= {"clk_uart2_div", "clk_uart2_frac", "xin24m"}; PLIST(uart3_p)= {"clk_uart3_div", "clk_uart3_frac", "xin24m"}; static struct rk_clk rk3399_clks[] = { /* External clocks */ LINK("xin24m"), LINK("xin32k"), FFACT(0, "xin12m", "xin24m", 1, 2), FRATE(0, "clkin_i2s", 0), FRATE(0, "pclkin_cif", 0), LINK("clk_usbphy0_480m"), LINK("clk_usbphy1_480m"), LINK("clkin_gmac"), FRATE(0, "clk_pcie_core_phy", 0), FFACT(0, "clk_ddrc_div2", "clk_ddrc", 1, 2), /* PLLs */ PLL(PLL_APLLL, "lpll", 0x00), PLL(PLL_APLLB, "bpll", 0x20), PLL(PLL_DPLL, "dpll", 0x40), PLL(PLL_CPLL, "cpll", 0x60), PLL(PLL_GPLL, "gpll", 0x80), PLL(PLL_NPLL, "npll", 0xA0), PLL(PLL_VPLL, "vpll", 0xC0), /* CRU_CLKSEL_CON0 */ CDIV(0, "aclkm_core_l_c", "armclkl", 0, 0, 8, 5), ARMDIV(ARMCLKL, "armclkl", armclkl_p, rk3399_cpu_l_rates, 0, 0, 5, 6, 2, 0, 3), /* CRU_CLKSEL_CON1 */ CDIV(0, "pclk_dbg_core_l_c", "armclkl", 0, 1, 8, 5), CDIV(0, "atclk_core_l_c", "armclkl", 0, 1, 0, 5), /* CRU_CLKSEL_CON2 */ CDIV(0, "aclkm_core_b_c", "armclkb", 0, 2, 8, 5), ARMDIV(ARMCLKB, "armclkb", armclkb_p, rk3399_cpu_b_rates, 2, 0, 5, 6, 2, 1, 3), /* CRU_CLKSEL_CON3 */ CDIV(0, "pclken_dbg_core_b", "pclk_dbg_core_b", 0, 3, 13, 2), CDIV(0, "pclk_dbg_core_b_c", "armclkb", 0, 3, 8, 5), CDIV(0, "atclk_core_b_c", "armclkb", 0, 3, 0, 5), /* CRU_CLKSEL_CON4 */ COMP(0, "clk_cs", cs_p, 0, 4, 0, 5, 6, 2), /* CRU_CLKSEL_CON5 */ COMP(0, "clk_cci_trace_c", cci_trace_p, 0, 5, 8, 5, 15, 1), COMP(0, "aclk_cci_pre_c", aclk_cci_p, 0, 5, 0, 5, 6, 2), /* CRU_CLKSEL_CON6 */ COMP(0, "pclk_ddr_c", pll_src_cpll_gpll_p, 0, 6, 8, 5, 15, 1), COMP(SCLK_DDRC, "clk_ddrc", ddrclk_p, 0, 6, 0, 3, 4, 2), /* CRU_CLKSEL_CON7 */ CDIV(0, "hclk_vcodec_pre_c", "aclk_vcodec_pre", 0, 7, 8, 5), COMP(0, "aclk_vcodec_pre_c", pll_src_cpll_gpll_npll_ppll_p, 0, 7, 0, 5, 6, 2), /* CRU_CLKSEL_CON8 */ CDIV(0, "hclk_vdu_pre_c", "aclk_vdu_pre", 0, 8, 8, 5), COMP(0, "aclk_vdu_pre_c", pll_src_cpll_gpll_npll_ppll_p, 0, 8, 0, 5, 6, 2), /* CRU_CLKSEL_CON9 */ COMP(0, "clk_vdu_ca_c", pll_src_cpll_gpll_npll_npll_p, 0, 9, 8, 5, 14, 2), COMP(0, "clk_vdu_core_c", pll_src_cpll_gpll_npll_npll_p, 0, 9, 0, 5, 6, 2), /* CRU_CLKSEL_CON10 */ CDIV(0, "hclk_iep_pre_c", "aclk_iep_pre", 0, 10, 8, 5), COMP(0, "aclk_iep_pre_c", pll_src_cpll_gpll_npll_ppll_p, 0, 10, 0, 5, 6, 2), /* CRU_CLKSEL_CON11 */ CDIV(0, "hclk_rga_pre_c", "aclk_rga_pre", 0, 11, 8, 5), COMP(0, "aclk_rga_pre_c", pll_src_cpll_gpll_npll_ppll_p, 0, 11, 0, 5, 6, 2), /* CRU_CLKSEL_CON12 */ COMP(0, "aclk_center_c", pll_src_cpll_gpll_npll_npll_p, 0, 12, 8, 5, 14, 2), COMP(SCLK_RGA_CORE, "clk_rga_core_c", pll_src_cpll_gpll_npll_ppll_p, 0, 12, 0, 5, 6, 2), /* CRU_CLKSEL_CON13 */ COMP(0, "hclk_sd_c", pll_src_cpll_gpll_p, 0, 13, 8, 5, 15, 1), COMP(0, "aclk_gpu_pre_c", pll_src_ppll_cpll_gpll_npll_upll_p, 0, 13, 0, 5, 5, 3), /* CRU_CLKSEL_CON14 */ MUX(0, "upll", pll_src_24m_usbphy480m_p, 0, 14, 15, 1), CDIV(0, "pclk_perihp_c", "aclk_perihp", 0, 14, 12, 2), CDIV(0, "hclk_perihp_c", "aclk_perihp", 0, 14, 8, 2), MUX(0, "clk_usbphy_480m", usbphy_480m_p, 0, 14, 6, 1), COMP(0, "aclk_perihp_c", aclk_perihp_p, 0, 14, 0, 5, 7, 1), /* CRU_CLKSEL_CON15 */ COMP(0, "clk_sdio_c", pll_src_cpll_gpll_npll_ppll_upll_24m_p, 0, 15, 0, 7, 8, 3), /* CRU_CLKSEL_CON16 */ COMP(0, "clk_sdmmc_c", pll_src_cpll_gpll_npll_ppll_upll_24m_p, 0, 16, 0, 7, 8, 3), /* CRU_CLKSEL_CON17 */ COMP(0, "clk_pcie_pm_c", pll_src_cpll_gpll_npll_24m_p, 0, 17, 0, 7, 8, 3), /* CRU_CLKSEL_CON18 */ CDIV(0, "clk_pciephy_ref100m_c", "npll", 0, 18, 11, 5), MUX(SCLK_PCIEPHY_REF, "clk_pciephy_ref", pll_src_24m_pciephy_p, 0, 18, 10, 1), MUX(SCLK_PCIE_CORE, "clk_pcie_core", pciecore_cru_phy_p, 0, 18, 7, 1), COMP(0, "clk_pcie_core_cru_c", pll_src_cpll_gpll_npll_npll_p, 0, 18, 0, 7, 8, 2), /* CRU_CLKSEL_CON19 */ CDIV(0, "pclk_gmac_pre_c", "aclk_gmac_pre", 0, 19, 8, 3), MUX(SCLK_RMII_SRC, "clk_rmii_src",rmii_p, 0, 19, 4, 1), MUX(SCLK_HSICPHY, "clk_hsicphy_c", pll_src_cpll_gpll_npll_usbphy480m_p, 0, 19, 0, 2), /* CRU_CLKSEL_CON20 */ COMP(0, "clk_gmac_c", pll_src_cpll_gpll_npll_npll_p, 0, 20, 8, 5, 14, 2), COMP(0, "aclk_gmac_pre_c", aclk_gmac_p, 0, 20, 0, 5, 7, 1), /* CRU_CLKSEL_CON21 */ COMP(ACLK_EMMC, "aclk_emmc", aclk_emmc_p, 0, 21, 0, 5, 7, 1), /* CRU_CLKSEL_CON22 */ COMP(0, "clk_emmc_c", pll_src_cpll_gpll_npll_upll_24m_p, 0, 22, 0, 7, 8, 3), /* CRU_CLKSEL_CON23 */ CDIV(0, "pclk_perilp0_c", "aclk_perilp0", 0, 23, 12, 3), CDIV(0, "hclk_perilp0_c", "aclk_perilp0", 0, 23, 8, 2), COMP(0, "aclk_perilp0_c", aclk_perilp0_p, 0, 23, 0, 5, 7, 1), /* CRU_CLKSEL_CON24 */ COMP(0, "fclk_cm0s_c", fclk_cm0s_p, 0, 24, 8, 5, 15, 1), COMP(0, "clk_crypto0_c", pll_src_cpll_gpll_ppll_p, 0, 24, 0, 5, 6, 2), /* CRU_CLKSEL_CON25 */ CDIV(0, "pclk_perilp1_c", "hclk_perilp1", 0, 25, 8, 3), COMP(HCLK_PERILP1, "hclk_perilp1", hclk_perilp1_p, 0, 25, 0, 5, 7, 1), /* CRU_CLKSEL_CON26 */ CDIV(0, "clk_saradc_c", "xin24m", 0, 26, 8, 8), COMP(0, "clk_crypto1_c", pll_src_cpll_gpll_ppll_p, 0, 26, 0, 5, 6, 2), /* CRU_CLKSEL_CON27 */ COMP(0, "clk_tsadc_c", pll_src_p, 0, 27, 0, 10, 15, 1), /* CRU_CLKSEL_CON28 */ MUX(0, "clk_i2s0_mux", i2s0_p, RK_CLK_MUX_REPARENT, 28, 8, 2), COMP(0, "clk_i2s0_div_c", pll_src_cpll_gpll_p, 0, 28, 0, 7, 7, 1), /* CRU_CLKSEL_CON29 */ MUX(0, "clk_i2s1_mux", i2s1_p, RK_CLK_MUX_REPARENT, 29, 8, 2), COMP(0, "clk_i2s1_div_c", pll_src_cpll_gpll_p, 0, 29, 0, 7, 7, 1), /* CRU_CLKSEL_CON30 */ MUX(0, "clk_i2s2_mux", i2s2_p, RK_CLK_MUX_REPARENT, 30, 8, 2), COMP(0, "clk_i2s2_div_c", pll_src_cpll_gpll_p, 0, 30, 0, 7, 7, 1), /* CRU_CLKSEL_CON31 */ MUX(0, "clk_i2sout_c", i2sout_p, 0, 31, 2, 1), MUX(0, "clk_i2sout_src", i2sch_p, 0, 31, 0, 2), /* CRU_CLKSEL_CON32 */ COMP(0, "clk_spdif_rec_dptx_c", pll_src_cpll_gpll_p, 0, 32, 8, 5, 15, 1), MUX(0, "clk_spdif_mux", spdif_p, 0, 32, 13, 2), COMP(0, "clk_spdif_div_c", pll_src_cpll_gpll_p, 0, 32, 0, 7, 7, 1), /* CRU_CLKSEL_CON33 */ MUX(0, "clk_uart_src", pll_src_cpll_gpll_p, 0, 33, 15, 1), MUX(0, "clk_uart0_src", pll_src_cpll_gpll_upll_p, 0, 33, 12, 2), MUX(SCLK_UART0, "clk_uart0", uart0_p, 0, 33, 8, 2), CDIV(0, "clk_uart0_div_c", "clk_uart0_src", 0, 33, 0, 7), /* CRU_CLKSEL_CON34 */ MUX(SCLK_UART1, "clk_uart1", uart1_p, 0, 34, 8, 2), CDIV(0, "clk_uart1_div_c", "clk_uart_src", 0, 34, 0, 7), /* CRU_CLKSEL_CON35 */ MUX(SCLK_UART2, "clk_uart2", uart2_p, 0, 35, 8, 2), CDIV(0, "clk_uart2_div_c", "clk_uart_src", 0, 35, 0, 7), /* CRU_CLKSEL_CON36 */ MUX(SCLK_UART3, "clk_uart3", uart3_p, 0, 36, 8, 2), CDIV(0, "clk_uart3_div_c", "clk_uart_src", 0, 36, 0, 7), /* CRU_CLKSEL_CON37 */ /* unused */ /* CRU_CLKSEL_CON38 */ MUX(0, "clk_testout2_pll_src", pll_src_cpll_gpll_npll_npll_p, 0, 38, 14, 2), COMP(0, "clk_testout2_c", clk_testout2_p, 0, 38, 8, 5, 13, 1), MUX(0, "clk_testout1_pll_src", pll_src_cpll_gpll_npll_npll_p, 0, 38, 6, 2), COMP(0, "clk_testout1_c", clk_testout1_p, 0, 38, 0, 5, 5, 1), /* CRU_CLKSEL_CON39 */ COMP(0, "aclk_usb3_c", pll_src_cpll_gpll_npll_npll_p, 0, 39, 0, 5, 6, 2), /* CRU_CLKSEL_CON40 */ COMP(0, "clk_usb3otg0_suspend_c", pll_src_p, 0, 40, 0, 10, 15, 1), /* CRU_CLKSEL_CON41 */ COMP(0, "clk_usb3otg1_suspend_c", pll_src_p, 0, 41, 0, 10, 15, 1), /* CRU_CLKSEL_CON42 */ COMP(0, "aclk_hdcp_c", pll_src_cpll_gpll_ppll_p, 0, 42, 8, 5, 14, 2), COMP(0, "aclk_vio_c", pll_src_cpll_gpll_ppll_p, 0, 42, 0, 5, 6, 2), /* CRU_CLKSEL_CON43 */ CDIV(0, "pclk_hdcp_c", "aclk_hdcp", 0, 43, 10, 5), CDIV(0, "hclk_hdcp_c", "aclk_hdcp", 0, 43, 5, 5), CDIV(0, "pclk_vio_c", "aclk_vio", 0, 43, 0, 5), /* CRU_CLKSEL_CON44 */ COMP(0, "pclk_edp_c", pll_src_cpll_gpll_p, 0, 44, 8, 6, 15, 1), /* CRU_CLKSEL_CON45 - XXX clocks in mux are reversed in TRM !!!*/ COMP(0, "clk_hdmi_cec_c", pll_src_p, 0, 45, 0, 10, 15, 1), /* CRU_CLKSEL_CON46 */ COMP(0, "clk_dp_core_c", pll_src_npll_cpll_gpll_p, 0, 46, 0, 5, 6, 2), /* CRU_CLKSEL_CON47 */ CDIV(0, "hclk_vop0_pre_c", "aclk_vop0_pre_c", 0, 47, 8, 5), COMP(0, "aclk_vop0_pre_c", pll_src_vpll_cpll_gpll_npll_p, 0, 47, 0, 5, 6, 2), /* CRU_CLKSEL_CON48 */ CDIV(0, "hclk_vop1_pre_c", "aclk_vop1_pre", 0, 48, 8, 5), COMP(0, "aclk_vop1_pre_c", pll_src_vpll_cpll_gpll_npll_p, 0, 48, 0, 5, 6, 2), /* CRU_CLKSEL_CON49 */ MUX(DCLK_VOP0, "dclk_vop0", dclk_vop0_p, 0, 49, 11, 1), COMP(0, "dclk_vop0_div_c", pll_src_vpll_cpll_gpll_gpll_p, 0, 49, 0, 8, 8, 2), /* CRU_CLKSEL_CON50 */ MUX(DCLK_VOP1, "dclk_vop1", dclk_vop1_p, 0, 50, 11, 1), COMP(0, "dclk_vop1_div_c", pll_src_vpll_cpll_gpll_gpll_p, 0, 50, 0, 8, 8, 2), /* CRU_CLKSEL_CON51 */ COMP(0, "clk_vop0_pwm_c", pll_src_vpll_cpll_gpll_gpll_p, 0, 51, 0, 5, 6, 2), /* CRU_CLKSEL_CON52 */ COMP(0, "clk_vop1_pwm_c", pll_src_vpll_cpll_gpll_gpll_p, 0, 52, 0, 5, 6, 2), /* CRU_CLKSEL_CON53 */ CDIV(0, "hclk_isp0_c", "aclk_isp0", 0, 53, 8, 5), COMP(0, "aclk_isp0_c", pll_src_cpll_gpll_ppll_p, 0, 53, 0, 5, 6, 2), /* CRU_CLKSEL_CON54 */ CDIV(0, "hclk_isp1_c", "aclk_isp1", 0, 54, 8, 5), COMP(0, "aclk_isp1_c", pll_src_cpll_gpll_ppll_p, 0, 54, 0, 5, 6, 2), /* CRU_CLKSEL_CON55 */ COMP(0, "clk_isp1_c", pll_src_cpll_gpll_npll_npll_p, 0, 55, 8, 5, 14, 2), COMP(0, "clk_isp0_c", pll_src_cpll_gpll_npll_npll_p, 0, 55, 0, 5, 6, 2), /* CRU_CLKSEL_CON56 */ COMP(0, "aclk_gic_pre_c", pll_src_cpll_gpll_p, 0, 56, 8, 5, 15, 1), MUX(0, "clk_cifout_src_c", pll_src_cpll_gpll_npll_npll_p, 0, 56, 6, 2), COMP(SCLK_CIF_OUT, "clk_cifout", clk_cif_p, 0, 56, 0, 5, 5, 1), /* CRU_CLKSEL_CON57 */ CDIV(0, "clk_test_24m", "xin24m", 0, 57, 6, 10), CDIV(PCLK_ALIVE, "pclk_alive", "gpll", 0, 57, 0, 5), /* CRU_CLKSEL_CON58 */ COMP(0, "clk_spi5_c", pll_src_cpll_gpll_p, 0, 58, 8, 7, 15, 1), MUX(0, "clk_test_pre", pll_src_cpll_gpll_p, 0, 58, 7, 1), CDIV(0, "clk_test_c", "clk_test_pre", 0, 58, 0, 5), /* CRU_CLKSEL_CON59 */ COMP(0, "clk_spi1_c", pll_src_cpll_gpll_p, 0, 59, 8, 7, 15, 1), COMP(0, "clk_spi0_c", pll_src_cpll_gpll_p, 0, 59, 0, 7, 7, 1), /* CRU_CLKSEL_CON60 */ COMP(0, "clk_spi4_c", pll_src_cpll_gpll_p, 0, 60, 8, 7, 15, 1), COMP(0, "clk_spi2_c", pll_src_cpll_gpll_p, 0, 60, 0, 7, 7, 1), /* CRU_CLKSEL_CON61 */ COMP(0, "clk_i2c5_c", pll_src_cpll_gpll_p, 0, 61, 8, 7, 15, 1), COMP(0, "clk_i2c1_c", pll_src_cpll_gpll_p, 0, 61, 0, 7, 7, 1), /* CRU_CLKSEL_CON62 */ COMP(0, "clk_i2c6_c", pll_src_cpll_gpll_p, 0, 62, 8, 7, 15, 1), COMP(0, "clk_i2c2_c", pll_src_cpll_gpll_p, 0, 62, 0, 7, 7, 1), /* CRU_CLKSEL_CON63 */ COMP(0, "clk_i2c7_c", pll_src_cpll_gpll_p, 0, 63, 8, 7, 15, 1), COMP(0, "clk_i2c3_c", pll_src_cpll_gpll_p, 0, 63, 0, 7, 7, 1), /* CRU_CLKSEL_CON64 */ COMP(0, "clk_uphy0_tcpdphy_ref_c", pll_src_p, 0, 64, 8, 5, 15, 1), COMP(0, "clk_uphy0_tcpdcore_c", pll_src_24m_32k_cpll_gpll_p, 0, 64, 0, 5, 6, 2), /* CRU_CLKSEL_CON65 */ COMP(0, "clk_uphy1_tcpdphy_ref_c", pll_src_p, 0, 65, 8, 5, 15, 1), COMP(0, "clk_uphy1_tcpdcore_c", pll_src_24m_32k_cpll_gpll_p, 0, 65, 0, 5, 6, 2), /* CRU_CLKSEL_CON99 - 107 */ FRACT(0, "clk_spdif_frac_c", "clk_spdif_div", 0, 99), FRACT(0, "clk_i2s0_frac_c", "clk_i2s0_div", 0, 96), FRACT(0, "clk_i2s1_frac_c", "clk_i2s1_div", 0, 97), FRACT(0, "clk_i2s2_frac_c", "clk_i2s2_div", 0, 98), FRACT(0, "clk_uart0_frac_c", "clk_uart0_div", 0, 100), FRACT(0, "clk_uart1_frac_c", "clk_uart1_div", 0, 101), FRACT(0, "clk_uart2_frac_c", "clk_uart2_div", 0, 102), FRACT(0, "clk_uart3_frac_c", "clk_uart3_div", 0, 103), FRACT(0, "clk_test_frac_c", "clk_test_pre", 0, 105), FRACT(DCLK_VOP0_FRAC, "dclk_vop0_frac", "dclk_vop0_div", 0, 106), FRACT(DCLK_VOP1_FRAC, "dclk_vop1_frac", "dclk_vop1_div", 0, 107), /* * This clock is controlled in the secure world */ FFACT(PCLK_WDT, "pclk_wdt", "pclk_alive", 1, 1), /* Not yet implemented yet * MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc", RK3399_SDMMC_CON0, 1), * MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc", RK3399_SDMMC_CON1, 1), * MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio", RK3399_SDIO_CON0, 1), * MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio", RK3399_SDIO_CON1, 1), */ }; static int rk3399_cru_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "rockchip,rk3399-cru")) { device_set_desc(dev, "Rockchip RK3399 Clock and Reset Unit"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int rk3399_cru_attach(device_t dev) { struct rk_cru_softc *sc; sc = device_get_softc(dev); sc->dev = dev; sc->gates = rk3399_gates; sc->ngates = nitems(rk3399_gates); sc->clks = rk3399_clks; sc->nclks = nitems(rk3399_clks); sc->reset_offset = 0x400; sc->reset_num = 335; return (rk_cru_attach(dev)); } static device_method_t rk3399_cru_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk3399_cru_probe), DEVMETHOD(device_attach, rk3399_cru_attach), DEVMETHOD_END }; DEFINE_CLASS_1(rk3399_cru, rk3399_cru_driver, rk3399_cru_methods, sizeof(struct rk_cru_softc), rk_cru_driver); EARLY_DRIVER_MODULE(rk3399_cru, simplebus, rk3399_cru_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/dev/clk/rockchip/rk3399_pmucru.c b/sys/dev/clk/rockchip/rk3399_pmucru.c index 706bf444aaf2..2239722849c0 100644 --- a/sys/dev/clk/rockchip/rk3399_pmucru.c +++ b/sys/dev/clk/rockchip/rk3399_pmucru.c @@ -1,873 +1,873 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * Copyright (c) 2018 Val Packett * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include -#include -#include +#include +#include +#include #include #define CRU_CLKSEL_CON(x) (0x80 + (x) * 0x4) #define CRU_CLKGATE_CON(x) (0x100 + (x) * 0x4) #define PLL_PPLL 1 #define SCLK_32K_SUSPEND_PMU 2 #define SCLK_SPI3_PMU 3 #define SCLK_TIMER12_PMU 4 #define SCLK_TIMER13_PMU 5 #define SCLK_UART4_PMU 6 #define SCLK_PVTM_PMU 7 #define SCLK_WIFI_PMU 8 #define SCLK_I2C0_PMU 9 #define SCLK_I2C4_PMU 10 #define SCLK_I2C8_PMU 11 #define PCLK_PMU_SRC 19 #define PCLK_PMU 20 #define PCLK_PMUGRF_PMU 21 #define PCLK_INTMEM1_PMU 22 #define PCLK_GPIO0_PMU 23 #define PCLK_GPIO1_PMU 24 #define PCLK_SGRF_PMU 25 #define PCLK_NOC_PMU 26 #define PCLK_I2C0_PMU 27 #define PCLK_I2C4_PMU 28 #define PCLK_I2C8_PMU 29 #define PCLK_RKPWM_PMU 30 #define PCLK_SPI3_PMU 31 #define PCLK_TIMER_PMU 32 #define PCLK_MAILBOX_PMU 33 #define PCLK_UART4_PMU 34 #define PCLK_WDT_M0_PMU 35 #define FCLK_CM0S_SRC_PMU 44 #define FCLK_CM0S_PMU 45 #define SCLK_CM0S_PMU 46 #define HCLK_CM0S_PMU 47 #define DCLK_CM0S_PMU 48 #define PCLK_INTR_ARB_PMU 49 #define HCLK_NOC_PMU 50 /* GATES */ static struct rk_cru_gate rk3399_pmu_gates[] = { /* PMUCRU_CLKGATE_CON0 */ /* 0 Reserved */ /* 1 fclk_cm0s_pmu_ppll_src_en */ GATE(SCLK_SPI3_PMU, "clk_spi3_pmu", "clk_spi3_c", 0, 2), GATE(SCLK_TIMER12_PMU, "clk_timer0_pmu", "clk_timer_sel", 0, 3), GATE(SCLK_TIMER13_PMU, "clk_timer1_pmu", "clk_timer_sel", 0, 4), GATE(SCLK_UART4_PMU, "clk_uart4_pmu", "clk_uart4_sel", 0, 5), GATE(0, "clk_uart4_frac", "clk_uart4_frac_frac", 0, 6), /* 7 clk_pvtm_pmu_en */ GATE(SCLK_WIFI_PMU, "clk_wifi_pmu", "clk_wifi_sel", 0, 8), GATE(SCLK_I2C0_PMU, "clk_i2c0_src", "clk_i2c0_div", 0, 9), GATE(SCLK_I2C4_PMU, "clk_i2c4_src", "clk_i2c4_div", 0, 10), GATE(SCLK_I2C8_PMU, "clk_i2c8_src", "clk_i2c8_div", 0, 11), /* 12:15 Reserved */ /* PMUCRU_CLKGATE_CON1 */ GATE(PCLK_PMU, "pclk_pmu", "pclk_pmu_src", 1, 0), /* 1 pclk_pmugrf_en */ /* 2 pclk_intmem1_en */ GATE(PCLK_GPIO0_PMU, "pclk_gpio0_pmu", "pclk_pmu_src", 1, 3), GATE(PCLK_GPIO1_PMU, "pclk_gpio1_pmu", "pclk_pmu_src", 1, 4), /* 5 pclk_sgrf_en */ /* 6 pclk_noc_pmu_en */ GATE(PCLK_I2C0_PMU, "pclk_i2c0_pmu", "pclk_pmu_src", 1, 7), GATE(PCLK_I2C4_PMU, "pclk_i2c4_pmu", "pclk_pmu_src", 1, 8), GATE(PCLK_I2C8_PMU, "pclk_i2c8_pmu", "pclk_pmu_src", 1, 9), GATE(PCLK_RKPWM_PMU, "pclk_rkpwm_pmu", "pclk_pmu_src", 1, 10), GATE(PCLK_SPI3_PMU, "pclk_spi3_pmu", "pclk_pmu_src", 1, 11), GATE(PCLK_TIMER_PMU, "pclk_timer_pmu", "pclk_pmu_src", 1, 12), GATE(PCLK_MAILBOX_PMU, "pclk_mailbox_pmu", "pclk_pmu_src", 1, 13), /* 14 pclk_uartm0_en */ /* 15 pclk_wdt_m0_pmu_en */ /* PMUCRU_CLKGATE_CON2 */ /* 0 fclk_cm0s_en */ /* 1 sclk_cm0s_en */ /* 2 hclk_cm0s_en */ /* 3 dclk_cm0s_en */ /* 4 Reserved */ /* 5 hclk_noc_pmu_en */ /* 6:15 Reserved */ }; /* * PLLs */ static struct rk_clk_pll_rate rk3399_pll_rates[] = { { .freq = 2208000000, .refdiv = 1, .fbdiv = 92, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 2184000000, .refdiv = 1, .fbdiv = 91, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 2160000000, .refdiv = 1, .fbdiv = 90, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 2136000000, .refdiv = 1, .fbdiv = 89, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 2112000000, .refdiv = 1, .fbdiv = 88, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 2088000000, .refdiv = 1, .fbdiv = 87, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 2064000000, .refdiv = 1, .fbdiv = 86, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 2040000000, .refdiv = 1, .fbdiv = 85, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 2016000000, .refdiv = 1, .fbdiv = 84, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1992000000, .refdiv = 1, .fbdiv = 83, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1968000000, .refdiv = 1, .fbdiv = 82, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1944000000, .refdiv = 1, .fbdiv = 81, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1920000000, .refdiv = 1, .fbdiv = 80, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1896000000, .refdiv = 1, .fbdiv = 79, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1872000000, .refdiv = 1, .fbdiv = 78, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1848000000, .refdiv = 1, .fbdiv = 77, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1824000000, .refdiv = 1, .fbdiv = 76, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1800000000, .refdiv = 1, .fbdiv = 75, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1776000000, .refdiv = 1, .fbdiv = 74, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1752000000, .refdiv = 1, .fbdiv = 73, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1728000000, .refdiv = 1, .fbdiv = 72, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1704000000, .refdiv = 1, .fbdiv = 71, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1680000000, .refdiv = 1, .fbdiv = 70, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1656000000, .refdiv = 1, .fbdiv = 69, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1632000000, .refdiv = 1, .fbdiv = 68, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1608000000, .refdiv = 1, .fbdiv = 67, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1600000000, .refdiv = 3, .fbdiv = 200, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1584000000, .refdiv = 1, .fbdiv = 66, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1560000000, .refdiv = 1, .fbdiv = 65, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1536000000, .refdiv = 1, .fbdiv = 64, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1512000000, .refdiv = 1, .fbdiv = 63, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1488000000, .refdiv = 1, .fbdiv = 62, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1464000000, .refdiv = 1, .fbdiv = 61, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1440000000, .refdiv = 1, .fbdiv = 60, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1416000000, .refdiv = 1, .fbdiv = 59, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1392000000, .refdiv = 1, .fbdiv = 58, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1368000000, .refdiv = 1, .fbdiv = 57, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1344000000, .refdiv = 1, .fbdiv = 56, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1320000000, .refdiv = 1, .fbdiv = 55, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1296000000, .refdiv = 1, .fbdiv = 54, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1272000000, .refdiv = 1, .fbdiv = 53, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1248000000, .refdiv = 1, .fbdiv = 52, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1200000000, .refdiv = 1, .fbdiv = 50, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1188000000, .refdiv = 2, .fbdiv = 99, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1104000000, .refdiv = 1, .fbdiv = 46, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1100000000, .refdiv = 12, .fbdiv = 550, .postdiv1 = 1, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1008000000, .refdiv = 1, .fbdiv = 84, .postdiv1 = 2, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 1000000000, .refdiv = 1, .fbdiv = 125, .postdiv1 = 3, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 984000000, .refdiv = 1, .fbdiv = 82, .postdiv1 = 2, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 960000000, .refdiv = 1, .fbdiv = 80, .postdiv1 = 2, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 936000000, .refdiv = 1, .fbdiv = 78, .postdiv1 = 2, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 912000000, .refdiv = 1, .fbdiv = 76, .postdiv1 = 2, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 900000000, .refdiv = 4, .fbdiv = 300, .postdiv1 = 2, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 888000000, .refdiv = 1, .fbdiv = 74, .postdiv1 = 2, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 864000000, .refdiv = 1, .fbdiv = 72, .postdiv1 = 2, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 840000000, .refdiv = 1, .fbdiv = 70, .postdiv1 = 2, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 816000000, .refdiv = 1, .fbdiv = 68, .postdiv1 = 2, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 800000000, .refdiv = 1, .fbdiv = 100, .postdiv1 = 3, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 700000000, .refdiv = 6, .fbdiv = 350, .postdiv1 = 2, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 696000000, .refdiv = 1, .fbdiv = 58, .postdiv1 = 2, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 676000000, .refdiv = 3, .fbdiv = 169, .postdiv1 = 2, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 600000000, .refdiv = 1, .fbdiv = 75, .postdiv1 = 3, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 594000000, .refdiv = 1, .fbdiv = 99, .postdiv1 = 4, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 533250000, .refdiv = 8, .fbdiv = 711, .postdiv1 = 4, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 504000000, .refdiv = 1, .fbdiv = 63, .postdiv1 = 3, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 500000000, .refdiv = 6, .fbdiv = 250, .postdiv1 = 2, .postdiv2 = 1, .dsmpd = 1, }, { .freq = 408000000, .refdiv = 1, .fbdiv = 68, .postdiv1 = 2, .postdiv2 = 2, .dsmpd = 1, }, { .freq = 312000000, .refdiv = 1, .fbdiv = 52, .postdiv1 = 2, .postdiv2 = 2, .dsmpd = 1, }, { .freq = 297000000, .refdiv = 1, .fbdiv = 99, .postdiv1 = 4, .postdiv2 = 2, .dsmpd = 1, }, { .freq = 216000000, .refdiv = 1, .fbdiv = 72, .postdiv1 = 4, .postdiv2 = 2, .dsmpd = 1, }, { .freq = 148500000, .refdiv = 1, .fbdiv = 99, .postdiv1 = 4, .postdiv2 = 4, .dsmpd = 1, }, { .freq = 106500000, .refdiv = 1, .fbdiv = 71, .postdiv1 = 4, .postdiv2 = 4, .dsmpd = 1, }, { .freq = 96000000, .refdiv = 1, .fbdiv = 64, .postdiv1 = 4, .postdiv2 = 4, .dsmpd = 1, }, { .freq = 74250000, .refdiv = 2, .fbdiv = 99, .postdiv1 = 4, .postdiv2 = 4, .dsmpd = 1, }, { .freq = 65000000, .refdiv = 1, .fbdiv = 65, .postdiv1 = 6, .postdiv2 = 4, .dsmpd = 1, }, { .freq = 54000000, .refdiv = 1, .fbdiv = 54, .postdiv1 = 6, .postdiv2 = 4, .dsmpd = 1, }, { .freq = 27000000, .refdiv = 1, .fbdiv = 27, .postdiv1 = 6, .postdiv2 = 4, .dsmpd = 1, }, {}, }; PLIST(xin24m_p) = {"xin24m"}; PLIST(xin24m_xin32k_p) = {"xin24m", "xin32k"}; PLIST(xin24m_ppll_p) = {"xin24m", "ppll"}; PLIST(uart4_p) = {"clk_uart4_c", "clk_uart4_frac", "xin24m"}; PLIST(wifi_p) = {"clk_wifi_c", "clk_wifi_frac"}; static struct rk_clk_pll_def ppll = { .clkdef = { .id = PLL_PPLL, .name = "ppll", .parent_names = xin24m_p, .parent_cnt = nitems(xin24m_p), }, .base_offset = 0x00, .rates = rk3399_pll_rates, }; static struct rk_clk rk3399_pmu_clks[] = { /* Linked clocks */ LINK("xin32k"), { .type = RK3399_CLK_PLL, .clk.pll = &ppll }, /* PMUCRU_CLKSEL_CON0 */ CDIV(PCLK_PMU_SRC, "pclk_pmu_src", "ppll", 0, 0, 0, 5), /* 5:7 Reserved */ /* 8:12 cm0s_div */ /* 13:14 Reserved */ /* 15 cm0s_clk_pll_sel */ /* PMUCRU_CLKSEL_CON1 */ COMP(0, "clk_spi3_c", xin24m_ppll_p, 0, 1, 0, 7, 7, 1), COMP(0, "clk_wifi_c", xin24m_ppll_p, 0, 1, 8, 5, 13, 1), MUX(0, "clk_wifi_sel", wifi_p, 0, 1, 14, 1), MUX(0, "clk_timer_sel", xin24m_xin32k_p, 0, 1, 15, 1), /* PMUCRU_CLKSEL_CON2 */ CDIV(0, "clk_i2c0_div", "ppll", 0, 2, 0, 7), /* 7 Reserved */ CDIV(0, "clk_i2c8_div", "ppll", 0, 2, 8, 7), /* 15 Reserved */ /* PMUCRU_CLKSEL_CON3 */ CDIV(0, "clk_i2c4_div", "ppll", 0, 3, 0, 7), /* 7:15 Reserved */ /* PMUCRU_CLKSEL_CON4 */ /* 0:9 clk_32k_suspend_div */ /* 10:14 Reserved */ /* 15 clk_32k_suspend_sel */ /* PMUCRU_CLKSEL_CON5 */ COMP(0, "clk_uart4_c", xin24m_ppll_p, 0, 5, 0, 7, 10, 1), /* 7 Reserved */ MUX(0, "clk_uart4_sel", uart4_p, 0, 5, 8, 2), /* 11:15 Reserved */ /* PMUCRU_CLKFRAC_CON0 / PMUCRU_CLKSEL_CON6 */ FRACT(0, "clk_uart4_frac_frac", "clk_uart4_sel", 0, 6), /* PMUCRU_CLKFRAC_CON1 / PMUCRU_CLKSEL_CON7 */ FRACT(0, "clk_wifi_frac", "clk_wifi_c", 0, 7), }; static int rk3399_pmucru_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "rockchip,rk3399-pmucru")) { device_set_desc(dev, "Rockchip RK3399 PMU Clock and Reset Unit"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int rk3399_pmucru_attach(device_t dev) { struct rk_cru_softc *sc; sc = device_get_softc(dev); sc->dev = dev; sc->gates = rk3399_pmu_gates; sc->ngates = nitems(rk3399_pmu_gates); sc->clks = rk3399_pmu_clks; sc->nclks = nitems(rk3399_pmu_clks); sc->reset_offset = 0x110; sc->reset_num = 30; return (rk_cru_attach(dev)); } static device_method_t rk3399_pmucru_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk3399_pmucru_probe), DEVMETHOD(device_attach, rk3399_pmucru_attach), DEVMETHOD_END }; DEFINE_CLASS_1(rk3399_pmucru, rk3399_pmucru_driver, rk3399_pmucru_methods, sizeof(struct rk_cru_softc), rk_cru_driver); EARLY_DRIVER_MODULE(rk3399_pmucru, simplebus, rk3399_pmucru_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/dev/clk/rockchip/rk3568_cru.c b/sys/dev/clk/rockchip/rk3568_cru.c index 4d7f569a3de2..5f6eb766b4ee 100644 --- a/sys/dev/clk/rockchip/rk3568_cru.c +++ b/sys/dev/clk/rockchip/rk3568_cru.c @@ -1,1439 +1,1439 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021, 2022 Soren Schmidt * Copyright (c) 2023, Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include -#include -#include +#include +#include +#include #include #include #define RK3568_PLLSEL_CON(x) ((x) * 0x20) #define CRU_CLKSEL_CON(x) ((x) * 0x4 + 0x100) #define CRU_CLKGATE_CON(x) ((x) * 0x4 + 0x300) #define RK3568_SOFTRST_CON(x) ((x) * 0x4 + 0x400) #define RK_PLLRATE(_hz, _ref, _fb, _post1, _post2, _dspd) \ { \ .freq = _hz, \ .refdiv = _ref, \ .fbdiv = _fb, \ .postdiv1 = _post1, \ .postdiv2 = _post2, \ .dsmpd = _dspd, \ } /* PLL clock */ #define RK_PLL(_id, _name, _pnames, _off, _shift) \ { \ .type = RK3328_CLK_PLL, \ .clk.pll = &(struct rk_clk_pll_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = _pnames, \ .clkdef.parent_cnt = nitems(_pnames), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .base_offset = RK3568_PLLSEL_CON(_off), \ .mode_reg = 0xc0, \ .mode_shift = _shift, \ .rates = rk3568_pll_rates, \ }, \ } struct rk_clk_pll_rate rk3568_pll_rates[] = { /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd */ RK_PLLRATE(2208000000, 1, 92, 1, 1, 1), RK_PLLRATE(2184000000, 1, 91, 1, 1, 1), RK_PLLRATE(2160000000, 1, 90, 1, 1, 1), RK_PLLRATE(2088000000, 1, 87, 1, 1, 1), RK_PLLRATE(2064000000, 1, 86, 1, 1, 1), RK_PLLRATE(2040000000, 1, 85, 1, 1, 1), RK_PLLRATE(2016000000, 1, 84, 1, 1, 1), RK_PLLRATE(1992000000, 1, 83, 1, 1, 1), RK_PLLRATE(1920000000, 1, 80, 1, 1, 1), RK_PLLRATE(1896000000, 1, 79, 1, 1, 1), RK_PLLRATE(1800000000, 1, 75, 1, 1, 1), RK_PLLRATE(1704000000, 1, 71, 1, 1, 1), RK_PLLRATE(1608000000, 1, 67, 1, 1, 1), RK_PLLRATE(1600000000, 3, 200, 1, 1, 1), RK_PLLRATE(1584000000, 1, 132, 2, 1, 1), RK_PLLRATE(1560000000, 1, 130, 2, 1, 1), RK_PLLRATE(1536000000, 1, 128, 2, 1, 1), RK_PLLRATE(1512000000, 1, 126, 2, 1, 1), RK_PLLRATE(1488000000, 1, 124, 2, 1, 1), RK_PLLRATE(1464000000, 1, 122, 2, 1, 1), RK_PLLRATE(1440000000, 1, 120, 2, 1, 1), RK_PLLRATE(1416000000, 1, 118, 2, 1, 1), RK_PLLRATE(1400000000, 3, 350, 2, 1, 1), RK_PLLRATE(1392000000, 1, 116, 2, 1, 1), RK_PLLRATE(1368000000, 1, 114, 2, 1, 1), RK_PLLRATE(1344000000, 1, 112, 2, 1, 1), RK_PLLRATE(1320000000, 1, 110, 2, 1, 1), RK_PLLRATE(1296000000, 1, 108, 2, 1, 1), RK_PLLRATE(1272000000, 1, 106, 2, 1, 1), RK_PLLRATE(1248000000, 1, 104, 2, 1, 1), RK_PLLRATE(1200000000, 1, 100, 2, 1, 1), RK_PLLRATE(1188000000, 1, 99, 2, 1, 1), RK_PLLRATE(1104000000, 1, 92, 2, 1, 1), RK_PLLRATE(1100000000, 3, 275, 2, 1, 1), RK_PLLRATE(1008000000, 1, 84, 2, 1, 1), RK_PLLRATE(1000000000, 3, 250, 2, 1, 1), RK_PLLRATE(912000000, 1, 76, 2, 1, 1), RK_PLLRATE(816000000, 1, 68, 2, 1, 1), RK_PLLRATE(800000000, 3, 200, 2, 1, 1), RK_PLLRATE(700000000, 3, 350, 4, 1, 1), RK_PLLRATE(696000000, 1, 116, 4, 1, 1), RK_PLLRATE(600000000, 1, 100, 4, 1, 1), RK_PLLRATE(594000000, 1, 99, 4, 1, 1), RK_PLLRATE(500000000, 1, 125, 6, 1, 1), RK_PLLRATE(408000000, 1, 68, 2, 2, 1), RK_PLLRATE(312000000, 1, 78, 6, 1, 1), RK_PLLRATE(216000000, 1, 72, 4, 2, 1), RK_PLLRATE(200000000, 1, 100, 3, 4, 1), RK_PLLRATE(148500000, 1, 99, 4, 4, 1), RK_PLLRATE(100000000, 1, 150, 6, 6, 1), RK_PLLRATE(96000000, 1, 96, 6, 4, 1), RK_PLLRATE(74250000, 2, 99, 4, 4, 1), {}, }; static struct rk_clk_armclk_rates rk3568_armclk_rates[] = { {2208000000, 1}, {2160000000, 1}, {2064000000, 1}, {2016000000, 1}, {1992000000, 1}, {1800000000, 1}, {1704000000, 1}, {1608000000, 1}, {1512000000, 1}, {1488000000, 1}, {1416000000, 1}, {1200000000, 1}, {1104000000, 1}, {1008000000, 1}, { 816000000, 1}, { 696000000, 1}, { 600000000, 1}, { 408000000, 1}, { 312000000, 1}, { 216000000, 1}, { 96000000, 1}, {}, }; /* Parent clock defines */ PLIST(mux_pll_p) = { "xin24m" }; PLIST(mux_usb480m_p) = { "xin24m", "usb480m_phy", "clk_rtc_32k" }; PLIST(mux_armclk_p) = { "apll", "gpll" }; PLIST(clk_i2s0_8ch_tx_p) = { "clk_i2s0_8ch_tx_src", "clk_i2s0_8ch_tx_frac", "i2s0_mclkin", "xin_osc0_half" }; PLIST(clk_i2s0_8ch_rx_p) = { "clk_i2s0_8ch_rx_src", "clk_i2s0_8ch_rx_frac", "i2s0_mclkin", "xin_osc0_half" }; PLIST(clk_i2s1_8ch_tx_p) = { "clk_i2s1_8ch_tx_src", "clk_i2s1_8ch_tx_frac", "i2s1_mclkin", "xin_osc0_half" }; PLIST(clk_i2s1_8ch_rx_p) = { "clk_i2s1_8ch_rx_src", "clk_i2s1_8ch_rx_frac", "i2s1_mclkin", "xin_osc0_half" }; PLIST(clk_i2s2_2ch_p) = { "clk_i2s2_2ch_src", "clk_i2s2_2ch_frac", "i2s2_mclkin", "xin_osc0_half"}; PLIST(clk_i2s3_2ch_tx_p) = { "clk_i2s3_2ch_tx_src", "clk_i2s3_2ch_tx_frac", "i2s3_mclkin", "xin_osc0_half" }; PLIST(clk_i2s3_2ch_rx_p) = { "clk_i2s3_2ch_rx_src", "clk_i2s3_2ch_rx_frac", "i2s3_mclkin", "xin_osc0_half" }; PLIST(mclk_spdif_8ch_p) = { "mclk_spdif_8ch_src", "mclk_spdif_8ch_frac" }; PLIST(sclk_audpwm_p) = { "sclk_audpwm_src", "sclk_audpwm_frac" }; PLIST(sclk_uart1_p) = { "clk_uart1_src", "clk_uart1_frac", "xin24m" }; PLIST(sclk_uart2_p) = { "clk_uart2_src", "clk_uart2_frac", "xin24m" }; PLIST(sclk_uart3_p) = { "clk_uart3_src", "clk_uart3_frac", "xin24m" }; PLIST(sclk_uart4_p) = { "clk_uart4_src", "clk_uart4_frac", "xin24m" }; PLIST(sclk_uart5_p) = { "clk_uart5_src", "clk_uart5_frac", "xin24m" }; PLIST(sclk_uart6_p) = { "clk_uart6_src", "clk_uart6_frac", "xin24m" }; PLIST(sclk_uart7_p) = { "clk_uart7_src", "clk_uart7_frac", "xin24m" }; PLIST(sclk_uart8_p) = { "clk_uart8_src", "clk_uart8_frac", "xin24m" }; PLIST(sclk_uart9_p) = { "clk_uart9_src", "clk_uart9_frac", "xin24m" }; PLIST(mpll_gpll_cpll_npll_p) = { "mpll", "gpll", "cpll", "npll" }; PLIST(gpll_cpll_npll_p) = { "gpll", "cpll", "npll" }; PLIST(npll_gpll_p) = { "npll", "gpll" }; PLIST(cpll_gpll_p) = { "cpll", "gpll" }; PLIST(gpll_cpll_p) = { "gpll", "cpll" }; PLIST(gpll_cpll_npll_vpll_p) = { "gpll", "cpll", "npll", "vpll" }; PLIST(apll_gpll_npll_p) = { "apll", "gpll", "npll" }; PLIST(sclk_core_pre_p) = { "sclk_core_src", "npll" }; PLIST(gpll150_gpll100_gpll75_xin24m_p) = { "clk_gpll_div_150m", "clk_gpll_div_100m", "clk_gpll_div_75m", "xin24m" }; PLIST(clk_gpu_pre_mux_p) = { "clk_gpu_src", "gpu_pvtpll_out" }; PLIST(clk_npu_pre_ndft_p) = { "clk_npu_src", "clk_npu_np5"}; PLIST(clk_npu_p) = { "clk_npu_pre_ndft", "npu_pvtpll_out" }; PLIST(dpll_gpll_cpll_p) = { "dpll", "gpll", "cpll" }; PLIST(clk_ddr1x_p) = { "clk_ddrphy1x_src", "dpll" }; PLIST(gpll200_gpll150_gpll100_xin24m_p) = { "clk_gpll_div_200m", "clk_gpll_div_150m", "clk_gpll_div_100m", "xin24m" }; PLIST(gpll100_gpll75_gpll50_p) = { "clk_gpll_div_100m", "clk_gpll_div_75m", "clk_cpll_div_50m" }; PLIST(i2s0_mclkout_tx_p) = { "clk_i2s0_8ch_tx", "xin_osc0_half" }; PLIST(i2s0_mclkout_rx_p) = { "clk_i2s0_8ch_rx", "xin_osc0_half" }; PLIST(i2s1_mclkout_tx_p) = { "clk_i2s1_8ch_tx", "xin_osc0_half" }; PLIST(i2s1_mclkout_rx_p) = { "clk_i2s1_8ch_rx", "xin_osc0_half" }; PLIST(i2s2_mclkout_p) = { "clk_i2s2_2ch", "xin_osc0_half" }; PLIST(i2s3_mclkout_tx_p) = { "clk_i2s3_2ch_tx", "xin_osc0_half" }; PLIST(i2s3_mclkout_rx_p) = { "clk_i2s3_2ch_rx", "xin_osc0_half" }; PLIST(mclk_pdm_p) = { "clk_gpll_div_300m", "clk_cpll_div_250m", "clk_gpll_div_200m", "clk_gpll_div_100m" }; PLIST(clk_i2c_p) = { "clk_gpll_div_200m", "clk_gpll_div_100m", "xin24m", "clk_cpll_div_100m" }; PLIST(gpll200_gpll150_gpll100_p) = { "clk_gpll_div_200m", "clk_gpll_div_150m", "clk_gpll_div_100m" }; PLIST(gpll300_gpll200_gpll100_p) = { "clk_gpll_div_300m", "clk_gpll_div_200m", "clk_gpll_div_100m" }; PLIST(clk_nandc_p) = { "clk_gpll_div_200m", "clk_gpll_div_150m", "clk_cpll_div_100m", "xin24m" }; PLIST(sclk_sfc_p) = { "xin24m", "clk_cpll_div_50m", "clk_gpll_div_75m", "clk_gpll_div_100m", "clk_cpll_div_125m", "clk_gpll_div_150m" }; PLIST(gpll200_gpll150_cpll125_p) = { "clk_gpll_div_200m", "clk_gpll_div_150m", "clk_cpll_div_125m" }; PLIST(cclk_emmc_p) = { "xin24m", "clk_gpll_div_200m", "clk_gpll_div_150m", "clk_cpll_div_100m", "clk_cpll_div_50m", "clk_osc0_div_375k" }; PLIST(aclk_pipe_p) = { "clk_gpll_div_400m", "clk_gpll_div_300m", "clk_gpll_div_200m", "xin24m" }; PLIST(gpll200_cpll125_p) = { "clk_gpll_div_200m", "clk_cpll_div_125m" }; PLIST(gpll300_gpll200_gpll100_xin24m_p) = { "clk_gpll_div_300m", "clk_gpll_div_200m", "clk_gpll_div_100m", "xin24m" }; PLIST(clk_sdmmc_p) = { "xin24m", "clk_gpll_div_400m", "clk_gpll_div_300m", "clk_cpll_div_100m", "clk_cpll_div_50m", "clk_osc0_div_750k" }; PLIST(cpll125_cpll50_cpll25_xin24m_p) = { "clk_cpll_div_125m", "clk_cpll_div_50m", "clk_cpll_div_25m", "xin24m" }; PLIST(clk_gmac_ptp_p) = { "clk_cpll_div_62P5m", "clk_gpll_div_100m", "clk_cpll_div_50m", "xin24m" }; PLIST(cpll333_gpll300_gpll200_p) = { "clk_cpll_div_333m", "clk_gpll_div_300m", "clk_gpll_div_200m" }; PLIST(cpll_gpll_hpll_p) = { "cpll", "gpll", "hpll" }; PLIST(gpll_usb480m_xin24m_p) = { "gpll", "usb480m", "xin24m", "xin24m" }; PLIST(gpll300_cpll250_gpll100_xin24m_p) = { "clk_gpll_div_300m", "clk_cpll_div_250m", "clk_gpll_div_100m", "xin24m" }; PLIST(cpll_gpll_hpll_vpll_p) = { "cpll", "gpll", "hpll", "vpll" }; PLIST(hpll_vpll_gpll_cpll_p) = { "hpll", "vpll", "gpll", "cpll" }; PLIST(gpll400_cpll333_gpll200_p) = { "clk_gpll_div_400m", "clk_cpll_div_333m", "clk_gpll_div_200m" }; PLIST(gpll100_gpll75_cpll50_xin24m_p) = { "clk_gpll_div_100m", "clk_gpll_div_75m", "clk_cpll_div_50m", "xin24m" }; PLIST(xin24m_gpll100_cpll100_p) = { "xin24m", "clk_gpll_div_100m", "clk_cpll_div_100m" }; PLIST(gpll_cpll_usb480m_p) = { "gpll", "cpll", "usb480m" }; PLIST(gpll100_xin24m_cpll100_p) = { "clk_gpll_div_100m", "xin24m", "clk_cpll_div_100m" }; PLIST(gpll200_xin24m_cpll100_p) = { "clk_gpll_div_200m", "xin24m", "clk_cpll_div_100m" }; PLIST(xin24m_32k_p) = { "xin24m", "clk_rtc_32k" }; PLIST(cpll500_gpll400_gpll300_xin24m_p) = { "clk_cpll_div_500m", "clk_gpll_div_400m", "clk_gpll_div_300m", "xin24m" }; PLIST(gpll400_gpll300_gpll200_xin24m_p) = { "clk_gpll_div_400m", "clk_gpll_div_300m", "clk_gpll_div_200m", "xin24m" }; PLIST(xin24m_cpll100_p) = { "xin24m", "clk_cpll_div_100m" }; PLIST(mux_gmac0_p) = { "clk_mac0_2top", "gmac0_clkin" }; PLIST(mux_gmac0_rgmii_speed_p) = { "clk_gmac0", "clk_gmac0", "clk_gmac0_tx_div50", "clk_gmac0_tx_div5" }; PLIST(mux_gmac0_rmii_speed_p) = { "clk_gmac0_rx_div20", "clk_gmac0_rx_div2" }; PLIST(mux_gmac0_rx_tx_p) = { "clk_gmac0_rgmii_speed", "clk_gmac0_rmii_speed", "clk_gmac0_xpcs_mii" }; PLIST(mux_gmac1_p) = { "clk_mac1_2top", "gmac1_clkin" }; PLIST(mux_gmac1_rgmii_speed_p) = { "clk_gmac1", "clk_gmac1", "clk_gmac1_tx_div50", "clk_gmac1_tx_div5" }; PLIST(mux_gmac1_rmii_speed_p) = { "clk_gmac1_rx_div20", "clk_gmac1_rx_div2" }; PLIST(mux_gmac1_rx_tx_p) = { "clk_gmac1_rgmii_speed", "clk_gmac1_rmii_speed", "clk_gmac1_xpcs_mii" }; PLIST(clk_mac_2top_p) = { "clk_cpll_div_125m", "clk_cpll_div_50m", "clk_cpll_div_25m", "ppll" }; PLIST(aclk_rkvdec_pre_p) = { "gpll", "cpll" }; PLIST(clk_rkvdec_core_p) = { "gpll", "cpll", "npll", "vpll" }; /* CLOCKS */ static struct rk_clk rk3568_clks[] = { /* External clocks */ LINK("xin24m"), LINK("clk_rtc_32k"), LINK("usb480m_phy"), LINK("mpll"), /* It lives in SCRU */ LINK("i2s0_mclkin"), LINK("i2s1_mclkin"), LINK("i2s2_mclkin"), LINK("i2s3_mclkin"), LINK("gpu_pvtpll_out"), LINK("npu_pvtpll_out"), LINK("gmac0_clkin"), LINK("gmac1_clkin"), LINK("clk_gmac0_xpcs_mii"), LINK("clk_gmac1_xpcs_mii"), LINK("dummy"), /* PLL's */ RK_PLL(PLL_APLL, "apll", mux_pll_p, 0, 0), RK_PLL(PLL_DPLL, "dpll", mux_pll_p, 1, 2), RK_PLL(PLL_GPLL, "gpll", mux_pll_p, 2, 6), RK_PLL(PLL_CPLL, "cpll", mux_pll_p, 3, 4), RK_PLL(PLL_NPLL, "npll", mux_pll_p, 4, 10), RK_PLL(PLL_VPLL, "vpll", mux_pll_p, 5, 12), ARMDIV(ARMCLK, "armclk", mux_armclk_p, rk3568_armclk_rates, 0, 0, 5, 6, 1, 0, 1), FFACT(0, "clk_osc0_div_375k", "clk_osc0_div_750k", 1, 2), FFACT(0, "xin_osc0_half", "xin24m", 1, 2), MUX(USB480M, "usb480m", mux_usb480m_p, 0, -16, 14, 2), /* Clocks */ /* CRU_CLKSEL_CON00 */ /* 0:4 clk_core0_div DIV */ /* 5 Reserved */ /* 6 clk_core_i_sel MUX */ /* 7 clk_core_ndft_sel MUX */ /* 8:12 clk_core1_div DIV */ /* 13:14 Reserved */ /* 15 clk_core_ndft_mux_sel MUX */ /* CRU_CLKSEL_CON01 */ /* 0:4 clk_core2_div DIV */ /* 5:7 Reserved */ /* 8:12 clk_core3_div DIV */ /* 13:15 Reserved */ /* CRU_CLKSEL_CON02 */ COMP(0, "sclk_core_src_c", apll_gpll_npll_p, 0, 2, 0, 4, 8, 2), /* 4:7 Reserved */ /* 10:14 Reserved */ MUX(0, "sclk_core_pre_sel", sclk_core_pre_p, 0, 2, 15, 1), /* CRU_CLKSEL_CON03 */ CDIV(0, "atclk_core_div", "armclk", 0, 3, 0, 5), /* 5:7 Reserved */ CDIV(0, "gicclk_core_div", "armclk", 0, 3, 8, 5), /* 13:15 Reserved */ /* CRU_CLKSEL_CON04 */ CDIV(0, "pclk_core_pre_div", "armclk", 0, 4, 0, 5), /* 5:7 Reserved */ CDIV(0, "periphclk_core_pre_div", "armclk", 0, 4, 8, 5), /* 13:15 Reserved */ /* CRU_CLKSEL_CON05 */ /* 0:7 Reserved */ /* 8:12 aclk_core_ndft_div DIV */ /* 13 Reserved */ /* 14:15 aclk_core_biu2bus_sel MUX */ /* CRU_CLKSEL_CON06 */ COMP(0, "clk_gpu_pre_c", mpll_gpll_cpll_npll_p, 0, 6, 0, 4, 6, 2), /* 4:5 Reserved */ CDIV(0, "aclk_gpu_pre_div", "clk_gpu_pre_c", 0, 6, 8, 2), /* 10 Reserved */ MUX(CLK_GPU_PRE_MUX, "clk_gpu_pre_mux_sel", clk_gpu_pre_mux_p, 0, 6, 11, 1), CDIV(0, "pclk_gpu_pre_div", "clk_gpu_pre_c", 0, 6, 12, 4), /* CRU_CLKSEL_CON07 */ COMP(0, "clk_npu_src_c", npll_gpll_p, 0, 7, 0, 4, 6, 1), COMP(0, "clk_npu_np5_c", npll_gpll_p, 0, 7, 4, 2, 7, 1), MUX(CLK_NPU_PRE_NDFT, "clk_npu_pre_ndft", clk_npu_pre_ndft_p, 0, 7, 8, 1), /* 9:14 Reserved */ MUX(CLK_NPU, "clk_npu", clk_npu_p, 0, 7, 15, 1), /* CRU_CLKSEL_CON08 */ CDIV(0, "hclk_npu_pre_div", "clk_npu", 0, 8, 0, 4), CDIV(0, "pclk_npu_pre_div", "clk_npu", 0, 8, 4, 4), /* 8:15 Reserved */ /* CRU_CLKSEL_CON09 */ COMP(0, "clk_ddrphy1x_src_c", dpll_gpll_cpll_p, 0, 9, 0, 5, 6, 2), /* 5 Reserved */ /* 8:14 Reserved */ MUX(CLK_DDR1X, "clk_ddr1x", clk_ddr1x_p, RK_CLK_COMPOSITE_GRF, 9, 15, 1), /* CRU_CLKSEL_CON10 */ CDIV(0, "clk_msch_div", "clk_ddr1x", 0, 10, 0, 2), MUX(0, "aclk_perimid_sel", gpll300_gpll200_gpll100_xin24m_p, 0, 10, 4, 2), MUX(0, "hclk_perimid_sel", gpll150_gpll100_gpll75_xin24m_p, 0, 10, 6, 2), MUX(0, "aclk_gic_audio_sel", gpll200_gpll150_gpll100_xin24m_p, 0, 10, 8, 2), MUX(0, "hclk_gic_audio_sel", gpll150_gpll100_gpll75_xin24m_p, 0, 10, 10, 2), MUX(0, "dclk_sdmmc_buffer_sel", gpll100_gpll75_gpll50_p, 0, 10, 12, 2), /* 14:15 Reserved */ /* CRU_CLKSEL_CON11 */ COMP(0, "clk_i2s0_8ch_tx_src_c", gpll_cpll_npll_p, 0, 11, 0, 7, 8, 2), /* 7 Reserved */ MUX(CLK_I2S0_8CH_TX, "clk_i2s0_8ch_tx", clk_i2s0_8ch_tx_p, 0, 11, 10, 2), /* 12:14 Reserved */ MUX(0, "i2s0_mclkout_tx_sel", i2s0_mclkout_tx_p, 0, 11, 15, 1), /* CRU_CLKSEL_CON12 */ FRACT(0, "clk_i2s0_8ch_tx_frac_div", "clk_i2s0_8ch_tx_src", 0, 12), /* CRU_CLKSEL_CON13 */ COMP(0, "clk_i2s0_8ch_rx_src_c", gpll_cpll_npll_p, 0, 13, 0, 7, 8, 2), /* 7 Reserved */ MUX(CLK_I2S0_8CH_RX, "clk_i2s0_8ch_rx", clk_i2s0_8ch_rx_p, 0, 13, 10, 2), /* 12:14 Reserved */ MUX(0, "i2s0_mclkout_rx_sel", i2s0_mclkout_rx_p, 0, 13, 15, 1), /* CRU_CLKSEL_CON14 */ FRACT(0, "clk_i2s0_8ch_rx_frac_div", "clk_i2s0_8ch_rx_src", 0, 14), /* CRU_CLKSEL_CON15 */ COMP(0, "clk_i2s1_8ch_tx_src_c", gpll_cpll_npll_p, 0, 15, 0, 7, 8, 2), /* 7 Reserved */ MUX(CLK_I2S1_8CH_TX, "clk_i2s1_8ch_tx", clk_i2s1_8ch_tx_p, 0, 15, 10, 2), /* 12:14 Reserved */ MUX(0, "i2s1_mclkout_tx_sel", i2s1_mclkout_tx_p, 0, 11, 15, 1), /* CRU_CLKSEL_CON16 */ FRACT(0, "clk_i2s1_8ch_tx_frac_div", "clk_i2s1_8ch_tx_src", 0, 16), /* CRU_CLKSEL_CON17 */ COMP(0, "clk_i2s1_8ch_rx_src_c", gpll_cpll_npll_p, 0, 17, 0, 7, 8, 2), /* 7 Reserved */ MUX(CLK_I2S1_8CH_RX, "clk_i2s1_8ch_rx", clk_i2s1_8ch_rx_p, 0, 17, 10, 2), /* 12:14 Reserved */ MUX(0, "i2s1_mclkout_rx_sel", i2s1_mclkout_rx_p, 0, 17, 15, 1), /* CRU_CLKSEL_CON18 */ FRACT(0, "clk_i2s1_8ch_rx_frac_div", "clk_i2s1_8ch_rx_src", 0, 18), /* CRU_CLKSEL_CON19 */ COMP(0, "clk_i2s2_2ch_src_c", gpll_cpll_npll_p, 0, 19, 0, 7, 8, 2), /* 7 Reserved */ MUX(CLK_I2S2_2CH, "clk_i2s2_2ch", clk_i2s2_2ch_p, 0, 19, 10, 2), /* 12:14 Reserved */ MUX(0, "i2s2_mclkout_sel", i2s2_mclkout_p, 0, 19, 15, 1), /* CRU_CLKSEL_CON20 */ FRACT(0, "clk_i2s2_2ch_frac_div", "clk_i2s2_2ch_src", 0, 20), /* CRU_CLKSEL_CON21 */ COMP(0, "clk_i2s3_2ch_tx_src_c", gpll_cpll_npll_p, 0, 21, 0, 7, 8, 2), /* 7 Reserved */ MUX(CLK_I2S3_2CH_TX, "clk_i2s3_2ch_tx", clk_i2s3_2ch_tx_p, 0, 21, 10, 2), /* 12:14 Reserved */ MUX(0, "i2s3_mclkout_tx_sel", i2s3_mclkout_tx_p, 0, 21, 15, 1), /* CRU_CLKSEL_CON22 */ FRACT(0, "clk_i2s3_2ch_tx_frac_div", "clk_i2s3_2ch_tx_src", 0, 22), /* CRU_CLKSEL_CON23 */ COMP(0, "mclk_spdif_8ch_src_c", cpll_gpll_p, 0, 23, 0, 7, 14, 1), /* 7 Reserved */ MUX(0, "mclk_pdm_sel", mclk_pdm_p, 0, 23, 8, 2), MUX(0, "clk_acdcdig_i2c_sel", clk_i2c_p, 0, 23, 10, 2), /* 12:13 Reserved */ MUX(MCLK_SPDIF_8CH, "mclk_spdif_8ch", mclk_spdif_8ch_p, 0, 23, 15, 1), /* CRU_CLKSEL_CON24 */ FRACT(0, "mclk_spdif_8ch_frac_div", "mclk_spdif_8ch_src", 0, 24), /* CRU_CLKSEL_CON25 */ COMP(0, "sclk_audpwm_src_c", gpll_cpll_p, 0, 25, 0, 5, 14, 1), /* 6:13 Reserved */ MUX(SCLK_AUDPWM, "sck_audpwm_sel", sclk_audpwm_p, 0, 25, 15, 1), /* CRU_CLKSEL_CON26 */ FRACT(0, "sclk_audpwm_frac_frac", "sclk_audpwm_src", 0, 26), /* CRU_CLKSEL_CON27 */ MUX(0, "aclk_secure_flash_sel", gpll200_gpll150_gpll100_xin24m_p, 0, 27, 0, 2), MUX(0, "hclk_secure_flash_sel", gpll150_gpll100_gpll75_xin24m_p, 0, 27, 2, 2), MUX(0, "clk_crypto_ns_core_sel", gpll200_gpll150_gpll100_p, 0, 27, 4, 2), MUX(0, "clk_crypto_ns_pka_sel", gpll300_gpll200_gpll100_p, 0, 27, 6, 2), /* 8:15 Reserved */ /* CRU_CLKSEL_CON28 */ MUX(0, "nclk_nandc_sel", clk_nandc_p, 0, 28, 0, 2), /* 2:3 Reserved */ MUX(0, "sclk_sfc_sel", sclk_sfc_p, 0, 28, 4, 3), /* 7 Reserved */ MUX(0, "bclk_emmc_sel", gpll200_gpll150_cpll125_p, 0, 28, 8, 2), /* 10:11 Reserved */ MUX(0, "cclk_emmc_sel", cclk_emmc_p, 0, 28, 12, 3), /* 15 Reserved */ /* CRU_CLKSEL_CON29 */ MUX(0, "aclk_pipe_sel", aclk_pipe_p, 0, 29, 0, 2), /* 2:3 Reserved */ CDIV(0, "pclk_pipe_div", "aclk_pipe", 0, 29, 4, 4), MUX(0, "clk_usb3otg0_suspend_sel", xin24m_32k_p, 0, 29, 8, 1), MUX(0, "clk_usb3otg1_suspend_sel", xin24m_32k_p, 0, 29, 9, 1), /* 10:12 Reserved */ MUX(0, "clk_xpcs_eee_sel", gpll200_cpll125_p, 0, 29, 13, 1), /* 14:15 Reserved */ /* CRU_CLKSEL_CON30 */ MUX(0, "aclk_php_sel", gpll300_gpll200_gpll100_xin24m_p, 0, 30, 0, 2), MUX(0, "hclk_php_sel", gpll150_gpll100_gpll75_xin24m_p, 0, 30, 2, 2), CDIV(0, "pclk_php_div", "aclk_php", 0, 30, 4, 4), MUX(0, "clk_sdmmc0_sel", clk_sdmmc_p, 0, 30, 8, 3), /* 11 Reserved */ MUX(0, "clk_sdmmc1_sel", clk_sdmmc_p, 0, 30, 12, 3), /* 15 Reserved */ /* CRU_CLKSEL_CON31 */ MUX(SCLK_GMAC0_RX_TX, "clk_gmac0_rx_tx", mux_gmac0_rx_tx_p, 0, 31, 0, 2), MUX(SCLK_GMAC0, "clk_gmac0", mux_gmac0_p, 0, 31, 2, 1), MUX(SCLK_GMAC0_RMII_SPEED, "clk_gmac0_rmii_speed", mux_gmac0_rmii_speed_p, 0, 31, 3, 1), MUX(SCLK_GMAC0_RGMII_SPEED, "clk_gmac0_rgmii_speed", mux_gmac0_rgmii_speed_p, 0, 31, 4, 2), MUX(0, "clk_mac0_2top_sel", clk_mac_2top_p, 0, 31, 8, 2), MUX(0, "clk_gmac0_ptp_ref_sel", clk_gmac_ptp_p, 0, 31, 12, 2), MUX(0, "clk_mac0_out_sel", cpll125_cpll50_cpll25_xin24m_p, 0, 31, 14, 2), FFACT(0, "clk_gmac0_tx_div5", "clk_gmac0", 1, 5), FFACT(0, "clk_gmac0_tx_div50", "clk_gmac0", 1, 50), FFACT(0, "clk_gmac0_rx_div2", "clk_gmac0", 1, 2), FFACT(0, "clk_gmac0_rx_div20", "clk_gmac0", 1, 20), /* CRU_CLKSEL_CON32 */ MUX(0, "aclk_usb_sel", gpll300_gpll200_gpll100_xin24m_p, 0, 32, 0, 2), MUX(0, "hclk_usb_sel", gpll150_gpll100_gpll75_xin24m_p, 0, 32, 4, 2), CDIV(0, "pclk_usb_div", "aclk_usb", 0, 32, 4, 4), MUX(0, "clk_sdmmc2_sel", clk_sdmmc_p, 0, 32, 8, 3), /* 11:15 Reserved */ /* CRU_CLKSEL_CON33 */ MUX(SCLK_GMAC1_RX_TX, "clk_gmac1_rx_tx", mux_gmac1_rx_tx_p, 0, 33, 0, 2), MUX(SCLK_GMAC1, "clk_gmac1", mux_gmac1_p, 0, 33, 2, 1), MUX(SCLK_GMAC1_RMII_SPEED, "clk_gmac1_rmii_speed", mux_gmac1_rmii_speed_p, 0, 33, 3, 1), MUX(SCLK_GMAC1_RGMII_SPEED, "clk_gmac1_rgmii_speed", mux_gmac1_rgmii_speed_p, 0, 33, 4, 2), /* 6:7 Reserved */ MUX(0, "clk_mac1_2top_sel", clk_mac_2top_p, 0, 33, 8, 2), MUX(0, "clk_gmac1_ptp_ref_sel", clk_gmac_ptp_p, 0, 33, 12, 2), MUX(0, "clk_mac1_out_sel", cpll125_cpll50_cpll25_xin24m_p, 0, 33, 14, 2), FFACT(0, "clk_gmac1_tx_div5", "clk_gmac1", 1, 5), FFACT(0, "clk_gmac1_tx_div50", "clk_gmac1", 1, 50), FFACT(0, "clk_gmac1_rx_div2", "clk_gmac1", 1, 2), FFACT(0, "clk_gmac1_rx_div20", "clk_gmac1", 1, 20), /* CRU_CLKSEL_CON34 */ MUX(0, "aclk_vi_sel", gpll400_gpll300_gpll200_xin24m_p, 0, 34, 0, 2), /* 2:3 Reserved */ CDIV(0, "hclk_vi_div", "aclk_vi", 0, 34, 4, 4), CDIV(0, "pclk_vi_div", "aclk_vi", 0, 34, 8, 4), /* 12:13 Reserved */ MUX(0, "dclk_vicap1_sel", cpll333_gpll300_gpll200_p, 0, 34, 14, 2), /* CRU_CLKSEL_CON35 */ COMP(0, "clk_isp_c", cpll_gpll_hpll_p, 0, 35, 0, 5, 6, 2), /* 5 Reserved */ COMP(0, "clk_cif_out_c", gpll_usb480m_xin24m_p, 0, 35, 8, 6, 14, 2), /* CRU_CLKSEL_CON36 */ COMP(0, "clk_cam0_out_c", gpll_usb480m_xin24m_p, 0, 36, 0, 6, 6, 2), COMP(0, "clk_cam1_out_c", gpll_usb480m_xin24m_p, 0, 36, 8, 6, 14, 2), /* CRU_CLKSEL_CON37 */ MUX(0, "aclk_vo_sel", gpll300_cpll250_gpll100_xin24m_p, 0, 37, 0, 2), /* 2:7 Reserved */ CDIV(0, "hclk_vo_div", "aclk_vo", 0, 37, 8, 4), CDIV(0, "pclk_vo_div", "aclk_vo", 0, 37, 12, 4), /* CRU_CLKSEL_CON38 */ COMP(0, "aclk_vop_pre_c", cpll_gpll_hpll_vpll_p, 0, 38, 0, 5, 6, 2), /* 5 Reserved */ MUX(0, "clk_edp_200m_sel", gpll200_gpll150_cpll125_p, 0, 38, 8, 2), /* 10:15 Reserved */ /* CRU_CLKSEL_CON39 */ COMP(0, "dclk_vop0_c", hpll_vpll_gpll_cpll_p, 0, 39, 0, 8, 10, 2), /* 12:15 Reserved */ /* CRU_CLKSEL_CON40 */ COMP(0, "dclk_vop1_c", hpll_vpll_gpll_cpll_p, 0, 40, 0, 8, 10, 2), /* 12:15 Reserved */ /* CRU_CLKSEL_CON41 */ COMP(0, "dclk_vop2_c", hpll_vpll_gpll_cpll_p, 0, 41, 0, 8, 10, 2), /* 12:15 Reserved */ /* CRU_CLKSEL_CON42 */ COMP(0, "aclk_vpu_pre_c", gpll_cpll_p, 0, 42, 0, 5, 7, 1), /* 5:6 Reserved */ CDIV(0, "hclk_vpu_pre_div", "aclk_vpu_pre", 0, 42, 8, 4), /* 12:15 Reserved */ /* CRU_CLKSEL_CON43 */ MUX(0, "aclk_rga_pre_sel", gpll300_cpll250_gpll100_xin24m_p, 0, 43, 0, 2), MUX(0, "clk_rga_core_sel", gpll300_gpll200_gpll100_p, 0, 43, 2, 2), MUX(0, "clk_iep_core_sel", gpll300_gpll200_gpll100_p, 0, 43, 4, 2), MUX(0, "dclk_ebc_sel", gpll400_cpll333_gpll200_p, 0, 43, 6, 2), CDIV(0, "hclk_rga_pre_div", "aclk_rga_pre", 0, 43, 8, 4), CDIV(0, "pclk_rga_pre_div", "aclk_rga_pre", 0, 43, 12, 4), /* CRU_CLKSEL_CON44 */ COMP(0, "aclk_rkvenc_pre_c", gpll_cpll_npll_p, 0, 44, 0, 5, 6, 2), /* 5 Reserved */ CDIV(0, "hclk_rkvenc_pre_div", "aclk_rkvenc_pre", 0, 44, 8, 4), /* 12:15 Reserved */ /* CRU_CLKSEL_CON45 */ COMP(0, "clk_rkvenc_core_c", gpll_cpll_npll_vpll_p, 0, 45, 0, 5, 14, 2), /* 5:13 Reserved */ /* CRU_CLKSEL_CON46 */ /* CRU_CLKSEL_CON47 */ COMP(0, "aclk_rkvdec_pre_c", aclk_rkvdec_pre_p, 0, 47, 0, 5, 7, 1), /* 5:6 Reserved */ CDIV(0, "hclk_rkvdec_pre_div", "aclk_rkvdec_pre", 0, 47, 8, 4), /* 12:15 Reserved */ /* CRU_CLKSEL_CON48 */ COMP(0, "clk_rkvdec_ca_c", gpll_cpll_npll_vpll_p, 0, 48, 0, 5, 6, 2), /* 5 Reserved */ /* 8:15 Reserved */ /* CRU_CLKSEL_CON49 */ COMP(0, "clk_rkvdec_hevc_ca_c", gpll_cpll_npll_vpll_p, 0, 49, 0, 5, 6, 2), /* 5 Reserved */ COMP(0, "clk_rkvdec_core_c", clk_rkvdec_core_p, 0, 49, 8, 5, 14, 2), /* 13 Reserved */ /* CRU_CLKSEL_CON50 */ MUX(0, "aclk_bus_sel", gpll200_gpll150_gpll100_xin24m_p, 0, 50, 0, 2), /* 2:3 Reserved */ MUX(0, "pclk_bus_sel", gpll100_gpll75_cpll50_xin24m_p, 0, 50, 4, 2), /* 6:15 Reserved */ /* CRU_CLKSEL_CON51 */ COMP(0, "clk_tsadc_tsen_c", xin24m_gpll100_cpll100_p, 0, 51, 0, 3, 4, 2), /* 6:7 Reserved */ CDIV(0, "clk_tsadc_div", "clk_tsadc_tsen", 0, 51, 8, 7), /* 15 Reserved */ /* CRU_CLKSEL_CON52 */ COMP(0, "clk_uart1_src_c", gpll_cpll_usb480m_p, 0, 52, 0, 7, 8, 2), /* 7 Reserved */ /* 10:11 Reserved */ MUX(0, "sclk_uart1_sel", sclk_uart1_p, 0, 52, 12, 2), /* CRU_CLKSEL_CON53 */ FRACT(0, "clk_uart1_frac_frac", "clk_uart1_src", 0, 53), /* CRU_CLKSEL_CON54 */ COMP(0, "clk_uart2_src_c", gpll_cpll_usb480m_p, 0, 54, 0, 7, 8, 2), /* 7 Reserved */ /* 10:11 Reserved */ MUX(0, "sclk_uart2_sel", sclk_uart2_p, 0, 52, 12, 2), /* CRU_CLKSEL_CON55 */ FRACT(0, "clk_uart2_frac_frac", "clk_uart2_src", 0, 55), /* CRU_CLKSEL_CON56 */ COMP(0, "clk_uart3_src_c", gpll_cpll_usb480m_p, 0, 54, 0, 7, 8, 2), /* 7 Reserved */ /* 10:11 Reserved */ MUX(0, "sclk_uart3_sel", sclk_uart3_p, 0, 56, 12, 2), /* CRU_CLKSEL_CON57 */ FRACT(0, "clk_uart3_frac_frac", "clk_uart3_src", 0, 57), /* CRU_CLKSEL_CON58 */ COMP(0, "clk_uart4_src_c", gpll_cpll_usb480m_p, 0, 58, 0, 7, 8, 2), /* 7 Reserved */ /* 10:11 Reserved */ MUX(0, "sclk_uart4_sel", sclk_uart4_p, 0, 58, 12, 2), /* CRU_CLKSEL_CON59 */ FRACT(0, "clk_uart4_frac_frac", "clk_uart4_src", 0, 59), /* CRU_CLKSEL_CON60 */ COMP(0, "clk_uart5_src_c", gpll_cpll_usb480m_p, 0, 60, 0, 7, 8, 2), /* 7 Reserved */ /* 10:11 Reserved */ MUX(0, "sclk_uart5_sel", sclk_uart5_p, 0, 60, 12, 2), /* CRU_CLKSEL_CON61 */ FRACT(0, "clk_uart5_frac_frac", "clk_uart5_src", 0, 61), /* CRU_CLKSEL_CON62 */ COMP(0, "clk_uart6_src_c", gpll_cpll_usb480m_p, 0, 62, 0, 7, 8, 2), /* 7 Reserved */ /* 10:11 Reserved */ MUX(0, "sclk_uart6_sel", sclk_uart6_p, 0, 62, 12, 2), /* CRU_CLKSEL_CON63 */ FRACT(0, "clk_uart6_frac_frac", "clk_uart6_src", 0, 63), /* CRU_CLKSEL_CON64 */ COMP(0, "clk_uart7_src_c", gpll_cpll_usb480m_p, 0, 64, 0, 7, 8, 2), /* 7 Reserved */ /* 10:11 Reserved */ MUX(0, "sclk_uart7_sel", sclk_uart7_p, 0, 64, 12, 2), /* CRU_CLKSEL_CON65 */ FRACT(0, "clk_uart7_frac_frac", "clk_uart7_src", 0, 65), /* CRU_CLKSEL_CON66 */ COMP(0, "clk_uart8_src_c", gpll_cpll_usb480m_p, 0, 66, 0, 7, 8, 2), /* 7 Reserved */ /* 10:11 Reserved */ MUX(0, "sclk_uart8_sel", sclk_uart8_p, 0, 66, 12, 2), /* CRU_CLKSEL_CON67 */ FRACT(0, "clk_uart8_frac_frac", "clk_uart8_src", 0, 67), /* CRU_CLKSEL_CON68 */ COMP(0, "clk_uart9_src_c", gpll_cpll_usb480m_p, 0, 68, 0, 7, 8, 2), /* 7 Reserved */ /* 10:11 Reserved */ MUX(0, "sclk_uart9_sel", sclk_uart9_p, 0, 68, 12, 2), /* CRU_CLKSEL_CON69 */ FRACT(0, "clk_uart9_frac_frac", "clk_uart9_src", 0, 69), /* CRU_CLKSEL_CON70 */ COMP(0, "clk_can0_c", gpll_cpll_p, 0, 70, 0, 5, 7, 1), /* 5:6 Reserved */ COMP(0, "clk_can1_c", gpll_cpll_p, 0, 70, 8, 5, 15, 1), /* 13:14 Reserved */ /* CRU_CLKSEL_CON71 */ COMP(0, "clk_can2_c", gpll_cpll_p, 0, 71, 0, 5, 7, 1), /* 5:6 Reserved */ MUX(0, "clk_i2c_sel", clk_i2c_p, 0, 71, 8, 2), /* 10:15 Reserved */ /* CRU_CLKSEL_CON72 */ MUX(0, "clk_spi0_sel", gpll200_xin24m_cpll100_p, 0, 72, 0, 2), MUX(0, "clk_spi1_sel", gpll200_xin24m_cpll100_p, 0, 72, 2, 2), MUX(0, "clk_spi2_sel", gpll200_xin24m_cpll100_p, 0, 72, 4, 2), MUX(0, "clk_spi3_sel", gpll200_xin24m_cpll100_p, 0, 72, 6, 2), MUX(0, "clk_pwm1_sel", gpll100_xin24m_cpll100_p, 0, 72, 8, 2), MUX(0, "clk_pwm2_sel", gpll100_xin24m_cpll100_p, 0, 72, 10, 2), MUX(0, "clk_pwm3_sel", gpll100_xin24m_cpll100_p, 0, 72, 12, 2), MUX(0, "dbclk_gpio_sel", xin24m_32k_p, 0, 72, 14, 1), /* 15 Reserved */ /* CRU_CLKSEL_CON73 */ MUX(0, "aclk_top_high_sel", cpll500_gpll400_gpll300_xin24m_p, 0, 73, 0, 2), /* 2:3 Reserved */ MUX(0, "aclk_top_low_sel", gpll400_gpll300_gpll200_xin24m_p, 0, 73, 4, 2), /* 6:7 Reserved */ MUX(0, "hclk_top_sel", gpll150_gpll100_gpll75_xin24m_p, 0, 73, 8, 2), /* 10:11 Reserved */ MUX(0, "pclk_top_sel", gpll100_gpll75_cpll50_xin24m_p, 0, 73, 12, 2), /* 14 Reserved */ MUX(0, "clk_optc_arb_sel", xin24m_cpll100_p, 0, 73, 15 , 1), /* CRU_CLKSEL_CON74 */ /* 0:7 clk_testout_div CDIV */ /* 8:12 clk_testout_sel MUX */ /* CRU_CLKSEL_CON75 */ CDIV(0, "clk_gpll_div_400m_div", "gpll", 0, 75, 0, 5), CDIV(0, "clk_gpll_div_300m_div", "gpll", 0, 75, 8, 5), /* CRU_CLKSEL_CON76 */ CDIV(0, "clk_gpll_div_200m_div", "gpll", 0, 76, 0, 5), CDIV(0, "clk_gpll_div_150m_div", "gpll", 0, 76, 8, 5), /* CRU_CLKSEL_CON77 */ CDIV(0, "clk_gpll_div_100m_div", "gpll", 0, 77, 0, 5), CDIV(0, "clk_gpll_div_75m_div", "gpll", 0, 77, 8, 5), /* CRU_CLKSEL_CON78 */ CDIV(0, "clk_gpll_div_20m_div", "gpll", 0, 78, 0, 6), CDIV(0, "clk_cpll_div_500m_div", "cpll", 0, 78, 8, 5), /* CRU_CLKSEL_CON79 */ CDIV(0, "clk_cpll_div_333m_div", "cpll", 0, 79, 0, 6), CDIV(0, "clk_cpll_div_250m_div", "cpll", 0, 79, 8, 5), /* CRU_CLKSEL_CON80 */ CDIV(0, "clk_cpll_div_125m_div", "cpll", 0, 80, 0, 6), CDIV(0, "clk_cpll_div_62P5m_div", "cpll", 0, 80, 8, 5), /* CRU_CLKSEL_CON81 */ CDIV(0, "clk_cpll_div_50m_div", "cpll", 0, 81, 0, 6), CDIV(0, "clk_cpll_div_25m_div", "cpll", 0, 81, 8, 5), /* CRU_CLKSEL_CON82 */ CDIV(0, "clk_cpll_div_100m_div", "cpll", 0, 82, 0, 6), CDIV(0, "clk_osc0_div_750k_div", "xin24m", 0, 82, 8, 5), /* CRU_CLKSEL_CON83 */ CDIV(0, "clk_i2s3_2ch_rx_src_div", "clk_i2s3_2ch_rx_src_sel", 0, 83, 0, 7), /* 7 Reserved */ MUX(0, "clk_i2s3_2ch_rx_src_sel", gpll_cpll_npll_p, 0, 83, 8, 2), MUX(CLK_I2S3_2CH_RX, "clk_i2s3_2ch_rx", clk_i2s3_2ch_rx_p, 0, 83, 10, 2), /* 12:14 Reserved */ MUX(0, "i2s3_mclkout_rx_sel", i2s3_mclkout_rx_p, 0, 83, 15, 1), /* CRU_CLKSEL_CON84 */ FRACT(0, "clk_i2s3_2ch_rx_frac_div", "clk_i2s3_2ch_rx_src", 0, 84), }; /* GATES */ static struct rk_cru_gate rk3568_gates[] = { /* CRU_CLKGATE_CON00 */ /* 0 clk_core */ /* 1 clk_core0 */ /* 2 clk_core1 */ /* 3 clk_core2 */ /* 4 clk_core3 */ GATE(0, "sclk_core_src", "sclk_core_src_c", 0, 5), /* 6 clk_npll_core */ /* 7 sclk_core */ GATE(0, "atclk_core", "atclk_core_div", 0, 8), GATE(0, "gicclk_core", "gicclk_core_div", 0, 9), GATE(0, "pclk_core_pre", "pclk_core_pre_div", 0, 10), GATE(0, "periphclk_core_pre", "periphclk_core_pre_div", 0, 11), /* 12 pclk_core */ /* 13 periphclk_core */ /* 14 tsclk_core */ /* 15 cntclk_core */ /* CRU_CLKGATE_CON01 */ /* 0 aclk_core */ /* 1 aclk_core_biuddr */ /* 2 aclk_core_biu2bus */ /* 3 pclk_dgb_biu */ /* 4 pclk_dbg */ /* 5 pclk_dbg_daplite */ /* 6 aclk_adb400_core2gic */ /* 7 aclk_adb400_gic2core */ /* 8 pclk_core_grf */ GATE(PCLK_CORE_PVTM, "pclk_core_pvtm", "pclk_core_pre", 1, 9), GATE(CLK_CORE_PVTM, "clk_core_pvtm", "xin24m", 1, 10), GATE(CLK_CORE_PVTM_CORE, "clk_core_pvtm_core", "armclk", 1, 11), GATE(CLK_CORE_PVTPLL, "clk_core_pvtpll", "armclk", 1, 12), /* 13 clk_core_div2 */ /* 14 clk_apll_core */ /* 15 clk_jtag */ /* CRU_CLKGATE_CON02 */ /* 0 clk_gpu_src */ GATE(CLK_GPU_SRC, "clk_gpu_src", "clk_gpu_pre_c", 2, 0), /* 1 Reserved */ GATE(PCLK_GPU_PRE, "pclk_gpu_pre", "pclk_gpu_pre_div", 2, 2), GATE(CLK_GPU, "clk_gpu", "clk_gpu_pre_c", 2, 3), /* 4 aclk_gpu_biu */ /* 5 pclk_gpu_biu */ GATE(PCLK_GPU_PVTM, "pclk_gpu_pvtm", "pclk_gpu_pre", 2, 6), GATE(CLK_GPU_PVTM, "clk_gpu_pvtm", "xin24m", 2, 7), GATE(CLK_GPU_PVTM_CORE, "clk_gpu_pvtm_core", "clk_gpu_src", 2, 8), GATE(CLK_GPU_PVTPLL, "clk_gpu_pvtpll", "clk_gpu_src", 2, 9), /* 10 clk_gpu_div2 */ GATE(ACLK_GPU_PRE, "aclk_gpu_pre", "aclk_gpu_pre_div", 2, 11), /* 12:15 Reserved */ /* CRU_CLKGATE_CON03 */ GATE(CLK_NPU_SRC, "clk_npu_src", "clk_npu_src_c", 3, 0), GATE(CLK_NPU_NP5, "clk_npu_np5", "clk_npu_np5_c", 3, 1), GATE(HCLK_NPU_PRE, "hclk_npu_pre", "hclk_npu_pre_div", 3, 2), GATE(PCLK_NPU_PRE, "pclk_npu_pre", "pclk_npu_pre_div", 3, 3), /* 4 aclk_npu_biu */ GATE(ACLK_NPU_PRE, "aclk_npu_pre", "clk_npu", 3, 4), /* 5 hclk_npu_biu */ /* 6 pclk_npu_biu */ GATE(ACLK_NPU, "aclk_npu", "aclk_npu_pre", 3, 7), GATE(HCLK_NPU, "hclk_npu", "hclk_npu_pre", 3, 8), GATE(PCLK_NPU_PVTM, "pclk_npu_pvtm", "pclk_npu_pre", 3, 9), GATE(CLK_NPU_PVTM, "clk_npu_pvtm", "xin24m", 3, 10), GATE(CLK_NPU_PVTM_CORE, "clk_npu_pvtm_core", "clk_npu_pre_ndft",3, 11), GATE(CLK_NPU_PVTPLL, "clk_npu_pvtpll", "clk_npu_pre_ndft", 3, 12), /* 13 clk_npu_div2 */ /* 14:15 Reserved */ /* CRU_CLKGATE_CON04 */ GATE(CLK_DDRPHY1X_SRC, "clk_ddrphy1x_src", "clk_ddrphy1x_src_c", 4, 0), /* 1 clk_dpll_ddr */ GATE(CLK_MSCH, "clk_msch", "clk_msch_div", 4, 2), /* 3 clk_hwffc_ctrl */ /* 4 aclk_ddrscramble */ /* 5 aclk_msch */ /* 6 clk_ddr_alwayson */ /* 7 Reserved */ /* 8 aclk_ddrsplit */ /* 9 clk_ddrdft_ctl */ /* 10 Reserved */ /* 11 aclk_dma2ddr */ /* 12 Reserved */ /* 13 clk_ddrmon */ /* 14 Reserved */ GATE(CLK24_DDRMON, "clk24_ddrmon", "xin24m", 4, 15), /* CRU_CLKGATE_CON05 */ GATE(ACLK_GIC_AUDIO, "aclk_gic_audio", "aclk_gic_audio_sel", 5, 0), GATE(HCLK_GIC_AUDIO, "hclk_gic_audio", "hclk_gic_audio_sel", 5, 1), /* 2 aclk_gic_audio_biu */ /* 3 hclk_gic_audio_biu */ GATE(ACLK_GIC600, "aclk_gic600", "aclk_gic_audio", 5, 4), /* 5 aclk_gicadb_core2gic */ /* 6 aclk_gicadb_gic2core */ GATE(ACLK_SPINLOCK, "aclk_spinlock", "aclk_gic_audio", 5, 7), GATE(HCLK_SDMMC_BUFFER, "hclk_sdmmc_buffer", "hclk_gic_audio", 5, 8), GATE(DCLK_SDMMC_BUFFER, "dclk_sdmmc_buffer", "dclk_sdmmc_buffer_sel", 5, 9), GATE(HCLK_I2S0_8CH, "hclk_i2s0_8ch", "hclk_gic_audio", 5, 10), GATE(HCLK_I2S1_8CH, "hclk_i2s1_8ch", "hclk_gic_audio", 5, 11), GATE(HCLK_I2S2_2CH, "hclk_i2s2_2ch", "hclk_gic_audio", 5, 12), GATE(HCLK_I2S3_2CH, "hclk_i2s3_2ch", "hclk_gic_audio", 5, 13), GATE(HCLK_PDM, "hclk_pdm", "hclk_gic_audio", 5, 14), GATE(MCLK_PDM, "mclk_pdm", "mclk_pdm_sel", 5, 15), /* CRU_CLKGATE_CON06 */ GATE(CLK_I2S0_8CH_TX_SRC, "clk_i2s0_8ch_tx_src", "clk_i2s0_8ch_tx_src_c", 6, 0), GATE(CLK_I2S0_8CH_TX_FRAC, "clk_i2s0_8ch_tx_frac", "clk_i2s0_8ch_tx_frac_div", 6, 1), GATE(MCLK_I2S0_8CH_TX, "mclk_i2s0_8ch_tx", "clk_i2s0_8ch_tx", 6, 2), GATE(I2S0_MCLKOUT_TX, "i2s0_mclkout_tx", "i2s0_mclkout_tx_sel", 6, 3), GATE(CLK_I2S0_8CH_RX_SRC, "clk_i2s0_8ch_rx_src", "clk_i2s0_8ch_rx_src_c", 6, 4), GATE(CLK_I2S0_8CH_RX_FRAC, "clk_i2s0_8ch_rx_frac", "clk_i2s0_8ch_rx_frac_div", 6, 5), GATE(MCLK_I2S0_8CH_RX, "mclk_i2s0_8ch_rx", "clk_i2s0_8ch_rx", 6, 6), GATE(I2S0_MCLKOUT_RX, "i2s0_mclkout_rx", "i2s0_mclkout_rx_sel", 6, 7), GATE(CLK_I2S1_8CH_TX_SRC, "clk_i2s1_8ch_tx_src", "clk_i2s1_8ch_tx_src_c", 6, 8), GATE(CLK_I2S1_8CH_TX_FRAC, "clk_i2s1_8ch_tx_frac", "clk_i2s1_8ch_tx_frac_div", 6, 9), GATE(MCLK_I2S1_8CH_TX, "mclk_i2s1_8ch_tx", "clk_i2s1_8ch_tx", 6, 10), GATE(I2S1_MCLKOUT_TX, "i2s1_mclkout_tx", "i2s1_mclkout_tx_sel", 6, 11), GATE(CLK_I2S1_8CH_RX_SRC, "clk_i2s1_8ch_rx_src", "clk_i2s1_8ch_rx_src_c", 6, 12), GATE(CLK_I2S1_8CH_RX_FRAC, "clk_i2s1_8ch_rx_frac", "clk_i2s1_8ch_rx_frac_div", 6, 13), GATE(MCLK_I2S1_8CH_RX, "mclk_i2s1_8ch_rx", "clk_i2s1_8ch_rx", 6, 14), GATE(I2S1_MCLKOUT_RX, "i2s1_mclkout_rx", "i2s1_mclkout_rx_sel", 6, 15), /* CRU_CLKGATE_CON07 */ GATE(CLK_I2S2_2CH_SRC, "clk_i2s2_2ch_src", "clk_i2s2_2ch_src_c", 7, 0), GATE(CLK_I2S2_2CH_FRAC, "clk_i2s2_2ch_frac", "clk_i2s2_2ch_frac_div", 7, 1), GATE(MCLK_I2S2_2CH, "mclk_i2s2_2ch", "clk_i2s2_2ch", 7, 2), GATE(I2S2_MCLKOUT, "i2s2_mclkout", "i2s2_mclkout_sel", 7, 3), GATE(CLK_I2S3_2CH_TX, "clk_i2s3_2ch_tx_src", "clk_i2s3_2ch_tx_src_c", 7, 4), GATE(CLK_I2S3_2CH_TX_FRAC, "clk_i2s3_2ch_tx_frac", "clk_i2s3_2ch_tx_frac_div", 7, 5), GATE(MCLK_I2S3_2CH_TX, "mclk_i2s3_2ch_tx", "clk_i2s3_2ch_tx", 7, 6), GATE(I2S3_MCLKOUT_TX, "i2s3_mclkout_tx", "i2s3_mclkout_tx_sel", 7, 7), GATE(CLK_I2S3_2CH_RX, "clk_i2s3_2ch_rx_src", "clk_i2s3_2ch_rx_src_div", 7, 8), GATE(CLK_I2S3_2CH_RX_FRAC, "clk_i2s3_2ch_rx_frac", "clk_i2s3_2ch_rx_frac_div", 7, 9), GATE(MCLK_I2S3_2CH_RX, "mclk_i2s3_2ch_rx", "clk_i2s3_2ch_rx", 7, 10), GATE(I2S3_MCLKOUT_RX, "i2s3_mclkout_rx", "i2s3_mclkout_rx_sel", 7, 11), GATE(HCLK_VAD, "hclk_vad", "hclk_gic_audio", 7, 12), GATE(HCLK_SPDIF_8CH, "hclk_spdif_8ch", "hclk_gic_audio", 7, 13), GATE(MCLK_SPDIF_8CH_SRC, "mclk_spdif_8ch_src", "mclk_spdif_8ch_src_c", 7, 14), GATE(MCLK_SPDIF_8CH_FRAC, "mclk_spdif_8ch_frac", "mclk_spdif_8ch_frac_div", 7, 15), /* CRU_CLKGATE_CON08 */ GATE(HCLK_AUDPWM, "hclk_audpwm", "hclk_gic_audio", 8, 0), GATE(SCLK_AUDPWM_SRC, "sclk_audpwm_src", "sclk_audpwm_src_c", 8, 1), GATE(SCLK_AUDPWM_FRAC, "sclk_audpwm_frac", "sclk_audpwm_frac_frac", 8, 2), GATE(HCLK_ACDCDIG, "hclk_acdcdig", "hclk_gic_audio", 8, 3), GATE(CLK_ACDCDIG_I2C, "clk_acdcdig_i2c", "clk_acdcdig_i2c_sel", 8, 4), GATE(CLK_ACDCDIG_DAC, "clk_acdcdig_dac", "mclk_i2s3_2ch_tx", 8, 5), GATE(CLK_ACDCDIG_ADC, "clk_acdcdig_adc", "mclk_i2s3_2ch_rx", 8, 6), GATE(ACLK_SECURE_FLASH, "aclk_secure_flash", "aclk_secure_flash_sel", 8, 7), GATE(HCLK_SECURE_FLASH, "hclk_secure_flash", "hclk_secure_flash_sel", 8, 8), /* 9 aclk_secure_flash_biu */ /* 10 hclk_secure_flash_biu */ GATE(ACLK_CRYPTO_NS, "aclk_crypto_ns", "aclk_secure_flash", 8, 11), GATE(HCLK_CRYPTO_NS, "hclk_crypto_ns", "hclk_secure_flash", 8, 12), GATE(CLK_CRYPTO_NS_CORE, "clk_crypto_ns_core", "clk_crypto_ns_core_sel", 8, 13), GATE(CLK_CRYPTO_NS_PKA, "clk_crypto_ns_pka", "clk_crypto_ns_pka_sel", 8, 14), GATE(CLK_CRYPTO_NS_RNG, "clk_crypto_ns_rng", "hclk_secure_flash", 8, 15), /* CRU_CLKGATE_CON09 */ GATE(HCLK_NANDC, "hclk_nandc", "hclk_secure_flash", 9, 0), GATE(NCLK_NANDC, "nclk_nandc", "nclk_nandc_sel", 9, 1), GATE(HCLK_SFC, "hclk_sfc", "hclk_secure_flash", 9, 2), GATE(HCLK_SFC_XIP, "hclk_sfc_xip", "hclk_secure_flash", 9, 3), GATE(SCLK_SFC, "sclk_sfc", "sclk_sfc_sel", 9, 4), GATE(ACLK_EMMC, "aclk_emmc", "aclk_secure_flash", 9, 5), GATE(HCLK_EMMC, "hclk_emmc", "hclk_secure_flash", 9, 6), GATE(BCLK_EMMC, "bclk_emmc", "bclk_emmc_sel", 9, 7), GATE(CCLK_EMMC, "cclk_emmc", "cclk_emmc_sel", 9, 8), GATE(TCLK_EMMC, "tclk_emmc", "xin24m", 9, 9), GATE(HCLK_TRNG_NS, "hclk_trng_ns", "hclk_secure_flash", 9, 10), GATE(CLK_TRNG_NS, "clk_trng_ns", "hclk_secure_flash", 9, 11), /* 12:15 Reserved */ /* CRU_CLKGATE_CON10 */ GATE(ACLK_PIPE, "aclk_pipe", "aclk_pipe_sel", 10, 0), GATE(PCLK_PIPE, "pclk_pipe", "pclk_pipe_div", 10, 1), /* 2 aclk_pipe_biu */ /* 3 pclk_pipe_biu */ GATE(CLK_XPCS_EEE, "clk_xpcs_eee", "clk_xpcs_eee_sel", 10, 4), /* 5 clk_xpcs_rx_div10 */ /* 6 clk_xpcs_tx_div10 */ /* 7 pclk_pipe_grf */ GATE(ACLK_USB3OTG0, "aclk_usb3otg0", "aclk_pipe", 10, 8), GATE(CLK_USB3OTG0_REF, "clk_usb3otg0_ref", "xin24m", 10, 9), GATE(CLK_USB3OTG0_SUSPEND, "clk_usb3otg0_suspend", "clk_usb3otg0_suspend_sel", 10, 10), /* 11 clk_usb3otg0_pipe */ GATE(ACLK_USB3OTG1, "aclk_usb3otg1", "aclk_pipe", 10, 12), GATE(CLK_USB3OTG1_REF, "clk_usb3otg1_ref", "xin24m", 10, 13), GATE(CLK_USB3OTG1_SUSPEND, "clk_usb3otg1_suspend", "clk_usb3otg1_suspend_sel", 10, 14), /* 15 clk_usb3otg1_pipe */ /* CRU_CLKGATE_CON11 */ GATE(ACLK_SATA0, "aclk_sata0", "aclk_pipe", 11, 0), GATE(CLK_SATA0_PMALIVE, "clk_sata0_pmalive", "clk_gpll_div_20m", 11, 1), GATE(CLK_SATA0_RXOOB, "clk_sata0_rxoob", "clk_cpll_div_50m", 11, 2), /* 3 clk_sata0_pipe */ GATE(ACLK_SATA1, "aclk_sata1", "aclk_pipe", 11, 4), GATE(CLK_SATA1_PMALIVE, "clk_sata1_pmalive", "clk_gpll_div_20m", 11, 5), GATE(CLK_SATA1_RXOOB, "clk_sata1_rxoob", "clk_cpll_div_50m", 11, 6), /* 7 clk_sata1_pipe */ GATE(ACLK_SATA2, "aclk_sata2", "aclk_pipe", 11, 8), GATE(CLK_SATA2_PMALIVE, "clk_sata2_pmalive", "clk_gpll_div_20m", 11, 9), GATE(CLK_SATA2_RXOOB, "clk_sata2_rxoob", "clk_cpll_div_50m", 11, 10), /* 11 clk_sata2_pipe */ /* 12:15 Reserved */ /* CRU_CLKGATE_CON12 */ GATE(ACLK_PCIE20_MST, "aclk_pcie20_mst", "aclk_pipe", 12, 0), GATE(ACLK_PCIE20_SLV, "aclk_pcie20_slv", "aclk_pipe", 12, 1), GATE(ACLK_PCIE20_DBI, "aclk_pcie20_dbi", "aclk_pipe", 12, 2), GATE(PCLK_PCIE20, "pclk_pcie20", "pclk_pipe", 12, 3), GATE(CLK_PCIE20_AUX_NDFT, "clk_pcie20_aux_ndft", "xin24m", 12, 4), /* 5 clk_pcie20_pipe */ /* 6:7 Reserved */ GATE(ACLK_PCIE30X1_MST, "aclk_pcie30x1_mst", "aclk_pipe", 12, 8), GATE(ACLK_PCIE30X1_SLV, "aclk_pcie30x1_slv", "aclk_pipe", 12, 9), GATE(ACLK_PCIE30X1_DBI, "aclk_pcie30x1_dbi", "aclk_pipe", 12, 10), GATE(PCLK_PCIE30X1, "pclk_pcie30x1", "pclk_pipe", 12, 11), GATE(CLK_PCIE30X1_AUX_NDFT, "clk_pcie30x1_aux_ndft", "xin24m", 12, 12), /* 13 clk_pcie30x1_pipe */ /* 14:15 Reserved */ /* CRU_CLKGATE_CON13 */ GATE(ACLK_PCIE30X2_MST, "aclk_pcie30x2_mst", "aclk_pipe", 13, 0), GATE(ACLK_PCIE30X2_SLV, "aclk_pcie30x2_slv", "aclk_pipe", 13, 1), GATE(ACLK_PCIE30X2_DBI, "aclk_pcie30x2_dbi", "aclk_pipe", 13, 2), GATE(PCLK_PCIE30X2, "pclk_pcie30x2", "pclk_pipe", 13, 3), GATE(CLK_PCIE30X2_AUX_NDFT, "clk_pcie30x2_aux_ndft", "xin24m", 13, 4), /* 5 clk_pcie30x2_pipe */ GATE(PCLK_XPCS, "pclk_xpcs", "pclk_pipe", 13, 6), /* 7 clk_xpcs_qsgmii_tx */ /* 8 clk_xpcs_qsgmii_rx */ /* 9 clk_xpcs_xgxs_tx */ /* 10 Reserved */ /* 11 clk_xpcs_xgxs_rx */ /* 12 clk_xpcs_mii0_tx */ /* 13 clk_xpcs_mii0_rx */ /* 14 clk_xpcs_mii1_tx */ /* 15 clk_xpcs_mii1_rx */ /* CRU_CLKGATE_CON14 */ GATE(ACLK_PERIMID, "aclk_perimid", "aclk_perimid_sel", 14, 0), GATE(HCLK_PERIMID, "hclk_perimid", "hclk_perimid_sel", 14, 1), /* 2 aclk_perimid_biu */ /* 3 hclk_perimid_biu */ /* 4:7 Reserved */ GATE(ACLK_PHP, "aclk_php", "aclk_php_sel", 14, 8), GATE(HCLK_PHP, "hclk_php", "hclk_php_sel", 14, 9), GATE(PCLK_PHP, "pclk_php", "pclk_php_div", 14, 10), /* 11 aclk_php_biu */ /* 12 hclk_php_biu */ /* 13 pclk_php_biu */ /* 14:15 Reserved */ /* CRU_CLKGATE_CON15 */ GATE(HCLK_SDMMC0, "hclk_sdmmc0", "hclk_php", 15, 0), GATE(CLK_SDMMC0, "clk_sdmmc0", "clk_sdmmc0_sel", 15, 1), GATE(HCLK_SDMMC1, "hclk_sdmmc1", "hclk_php", 15, 2), GATE(CLK_SDMMC1, "clk_sdmmc1", "clk_sdmmc1_sel", 15, 3), GATE(CLK_GMAC0_PTP_REF, "clk_gmac0_ptp_ref", "clk_gmac0_ptp_ref_sel", 15, 4), GATE(ACLK_GMAC0, "aclk_gmac0", "aclk_php", 15, 5), GATE(PCLK_GMAC0, "pclk_gmac0", "pclk_php", 15, 6), GATE(CLK_MAC0_2TOP, "clk_mac0_2top", "clk_mac0_2top_sel", 15, 7), GATE(CLK_MAC0_OUT, "clk_mac0_out", "clk_mac0_out_sel", 15, 8), /* 9:11 Reserved */ GATE(CLK_MAC0_REFOUT, "clk_mac0_refout", "clk_mac0_2top", 15, 12), /* 13:15 Reserved */ /* CRU_CLKGATE_CON16 */ GATE(ACLK_USB, "aclk_usb", "aclk_usb_sel", 16, 0), GATE(HCLK_USB, "hclk_usb", "hclk_usb_sel", 16, 1), GATE(PCLK_USB, "pclk_usb", "pclk_usb_div", 16, 2), /* 3 aclk_usb_biu */ /* 4 hclk_usb_biu */ /* 5 pclk_usb_biu */ /* 6 pclk_usb_grf */ /* 7:11 Reserved */ GATE(HCLK_USB2HOST0, "hclk_usb2host0", "hclk_usb", 16, 12), GATE(HCLK_USB2HOST0_ARB, "hclk_usb2host0_arb", "hclk_usb", 16, 13), GATE(HCLK_USB2HOST1, "hclk_usb2host1", "hclk_usb", 16, 14), GATE(HCLK_USB2HOST1_ARB, "hclk_usb2host1_arb", "hclk_usb", 16, 15), /* CRU_CLKGATE_CON17 */ GATE(HCLK_SDMMC2, "hclk_sdmmc2", "hclk_usb", 17, 0), GATE(CLK_SDMMC2, "clk_sdmmc2", "clk_sdmmc2_sel", 17, 1), GATE(CLK_GMAC1_PTP_REF, "clK_gmac1_ptp_ref", "clk_gmac1_ptp_ref_sel", 17, 2), GATE(ACLK_GMAC1, "aclk_gmac1", "aclk_usb", 17, 3), GATE(PCLK_GMAC1, "pclk_gmac1", "pclk_usb", 17, 4), GATE(CLK_MAC1_2TOP, "clk_mac1_2top", "clk_mac1_2top_sel", 17, 5), GATE(CLK_MAC1_OUT, "clk_mac1_out", "clk_mac1_out_sel", 17, 6), /* 7:9 Reserved */ GATE(CLK_MAC1_REFOUT, "clk_mac1_refout", "clk_mac1_2top", 17, 10), /* 11:15 Reserved */ /* CRU_CLKGATE_CON18 */ GATE(ACLK_VI, "aclk_vi", "aclk_vi_sel", 18, 0), GATE(HCLK_VI, "hclk_vi", "hclk_vi_div", 18, 1), GATE(PCLK_VI, "pclk_vi", "pclk_vi_div", 18, 2), /* 3 aclk_vi_biu */ /* 4 hclk_vi_biu */ /* 5 pclk_vi_biu */ /* 6:8 Reserved */ GATE(ACLK_VICAP, "aclk_vicap", "aclk_vi", 18, 9), GATE(HCLK_VICAP, "hclk_vicap", "hclk_vi", 18, 10), GATE(DCLK_VICAP, "dclk_vicap", "dclk_vicap1_sel", 18, 11), /* 12:15 Reserved */ /* CRU_CLKGATE_CON19 */ GATE(ACLK_ISP, "aclk_isp", "aclk_vi", 19, 0), GATE(HCLK_ISP, "hclk_isp", "hclk_vi", 19, 1), GATE(CLK_ISP, "clk_isp", "clk_isp_c", 19, 2), /* 3 Reserved */ GATE(PCLK_CSI2HOST1, "pclk_csi2host1", "pclk_vi", 19, 4), /* 5:7 Reserved */ GATE(CLK_CIF_OUT, "clk_cif_out", "clk_cif_out_c", 19, 8), GATE(CLK_CAM0_OUT, "clk_cam0_out", "clk_cam0_out_c", 19, 9), GATE(CLK_CAM1_OUT, "clk_cam1_out", "clk_cam1_out_c", 19, 9), /* 11:15 Reserved */ /* CRU_CLKGATE_CON20 */ /* 0 Reserved or aclk_vo ??? */ GATE(ACLK_VO, "aclk_vo", "aclk_vo_sel", 20, 0), GATE(HCLK_VO, "hclk_vo", "hclk_vo_div", 20, 1), GATE(PCLK_VO, "pclk_vo", "pclk_vo_div", 20, 2), /* 3 aclk_vo_biu */ /* 4 hclk_vo_biu */ /* 5 pclk_vo_biu */ GATE(ACLK_VOP_PRE, "aclk_vop_pre", "aclk_vop_pre_c", 20, 6), /* 7 aclk_vop_biu */ GATE(ACLK_VOP, "aclk_vop", "aclk_vop_pre", 20, 8), GATE(HCLK_VOP, "hclk_vop", "hclk_vo", 20, 9), GATE(DCLK_VOP0, "dclk_vop0", "dclk_vop0_c", 20, 10), GATE(DCLK_VOP1, "dclk_vop1", "dclk_vop1_c", 20, 11), GATE(DCLK_VOP2, "dclk_vop2", "dclk_vop2_c", 20, 12), GATE(CLK_VOP_PWM, "clk_vop_pwm", "xin24m", 20, 13), /* 14:15 Reserved */ /* CRU_CLKGATE_CON21 */ GATE(ACLK_HDCP, "aclk_hdcp", "aclk_vo", 21, 0), GATE(HCLK_HDCP, "hclk_hdcp", "hclk_vo", 21, 1), GATE(PCLK_HDCP, "pclk_hdcp", "pclk_vo", 21, 2), GATE(PCLK_HDMI_HOST, "pclk_hdmi_host", "pclk_vo", 21, 3), GATE(CLK_HDMI_SFR, "clk_hdmi_sfr", "xin24m", 21, 4), GATE(CLK_HDMI_CEC, "clk_hdmi_cec", "clk_rtc_32k", 21, 5), GATE(PCLK_DSITX_0, "pclk_dsitx_0", "pclk_vo", 21, 6), GATE(PCLK_DSITX_1, "pclk_dsitx_1", "pclk_vo", 21, 7), GATE(PCLK_EDP_CTRL, "pclk_edp_ctrl", "pclk_vo", 21, 8), GATE(CLK_EDP_200M, "clk_edp_200m", "clk_edp_200m_sel", 21, 9), /* 10:15 Reserved */ /* CRU_CLKGATE_CON22 */ GATE(ACLK_VPU_PRE, "aclk_vpu_pre", "aclk_vpu_pre_c", 22, 0), GATE(HCLK_VPU_PRE, "hclk_vpu_pre", "aclk_vpu_pre_c", 22, 1), /* 2 aclk_vpu_biu */ /* 3 hclk_vpu_biu */ GATE(ACLK_VPU, "aclk_vpu", "aclk_vpu_pre", 22, 4), GATE(HCLK_VPU, "hclk_vpu", "hclk_vpu_pre", 22, 5), /* 6:11 Reserved */ GATE(PCLK_RGA_PRE, "pclk_rga_pre", "pclk_rga_pre_div", 22, 12), /* 13 pclk_rga_biu */ GATE(PCLK_EINK, "pclk_eink", "pclk_rga_pre", 22, 14), GATE(HCLK_EINK, "hclk_eink", "hclk_rga_pre", 22, 15), /* CRU_CLKGATE_CON23 */ GATE(ACLK_RGA_PRE, "aclk_rga_pre", "aclk_rga_pre_sel", 23, 0), GATE(HCLK_RGA_PRE, "hclk_rga_pre", "hclk_rga_pre_div", 23, 1), /* 2 aclk_rga_biu */ /* 3 hclk_rga_biu */ GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 23, 4), GATE(HCLK_RGA, "hclk_rga", "hclk_rga_pre", 23, 5), GATE(CLK_RGA_CORE, "clk_rga_core", "clk_rga_core_sel", 23, 6), GATE(ACLK_IEP, "aclk_iep", "aclk_rga_pre", 23, 7), GATE(HCLK_IEP, "hclk_iep", "hclk_rga_pre", 23, 8), GATE(CLK_IEP_CORE, "clk_iep_core", "clk_iep_core_sel", 23, 9), GATE(HCLK_EBC, "hclk_ebc", "hclk_rga_pre", 23, 10), GATE(DCLK_EBC, "dclk_ebc", "dclk_ebc_sel", 23, 11), GATE(ACLK_JDEC, "aclk_jdec", "aclk_rga_pre", 23, 12), GATE(HCLK_JDEC, "hclk_jdec", "hclk_rga_pre", 23, 13), GATE(ACLK_JENC, "aclk_jenc", "aclk_rga_pre", 23, 14), GATE(HCLK_JENC, "hclk_jenc", "hclk_rga_pre", 23, 15), /* CRU_CLKGATE_CON24 */ GATE(ACLK_RKVENC_PRE, "aclk_rkvenc_pre", "aclk_rkvenc_pre_c", 24, 0), GATE(HCLK_RKVENC_PRE, "hclk_rkvenc_pre", "hclk_rkvenc_pre_div", 24, 1), /* 2 Reserved */ /* 3 aclk_rkvenc_biu */ /* 4 hclk_rkvenc_biu */ /* 5 Reserved */ GATE(ACLK_RKVENC, "aclk_rkvenc", "aclk_rkvenc_pre", 24, 6), GATE(HCLK_RKVENC, "hclk_rkvenc", "hclk_rkvenc_pre", 24, 7), GATE(CLK_RKVENC_CORE, "clk_rkvenc_core", "clk_rkvenc_core_c", 24, 8), /* 9:15 Reserved */ /* CRU_CLKGATE_CON25 */ GATE(ACLK_RKVDEC_PRE, "aclk_rkvdec_pre", "aclk_rkvdec_pre_c", 25, 0), GATE(HCLK_RKVDEC_PRE, "hclk_rkvdec_pre", "hclk_rkvdec_pre_div", 25, 1), /* 2 aclk_rkvdec_biu */ /* 3 hclk_rkvdec_biu */ GATE(ACLK_RKVDEC, "aclk_rkvdec", "aclk_rkvdec_pre", 25, 4), GATE(HCLK_RKVDEC, "hclk_rkvdec", "hclk_rkvdec_pre", 25, 5), GATE(CLK_RKVDEC_CA, "clk_rkvdec_ca", "clk_rkvdec_ca_c", 25, 6), GATE(CLK_RKVDEC_CORE, "clk_rkvdec_core", "clk_rkvdec_core_c", 25, 7), GATE(CLK_RKVDEC_HEVC_CA, "clk_rkvdec_hevc_ca", "clk_rkvdec_hevc_ca_c", 25, 8), /* 9:15 Reserved */ /* CRU_CLKGATE_CON26 */ GATE(ACLK_BUS, "aclk_bus", "aclk_bus_sel", 26, 0), GATE(PCLK_BUS, "pclk_bus", "pclk_bus_sel", 26, 1), /* 2 aclk_bus_biu */ /* 3 pclk_bus_biu */ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_bus", 26, 4), GATE(CLK_TSADC_TSEN, "clk_tsadc_tsen", "clk_tsadc_tsen_c", 26, 5), GATE(CLK_TSADC, "clk_tsadc", "clk_tsadc_div", 26, 6), GATE(PCLK_SARADC, "pclk_saradc", "pclk_bus", 26, 7), GATE(CLK_SARADC, "clk_saradc", "xin24m", 26, 8), GATE(PCLK_OTPC_NS, "pclk_otpc_ns", "hclk_secure_flash", 26, 9), GATE(CLK_OTPC_NS_SBPI, "clk_otpc_ns_sbpi", "xin24m", 26, 10), GATE(CLK_OTPC_NS_USR, "clk_otpc_ns_usr", "xin_osc0_half", 26, 11), GATE(PCLK_SCR, "pclk_scr", "pclk_bus", 26, 12), GATE(PCLK_WDT_NS, "pclk_wdt_ns", "pclk_bus", 26, 13), GATE(TCLK_WDT_NS, "tclk_wdt_ns", "xin24m", 26, 14), /* 15 Reserved */ /* CRU_CLKGATE_CON27 */ /* 0 pclk_grf */ /* 1 pclk_grf_vccio12 */ /* 2 pclk_grf_vccio34 */ /* 3 pclk_grf_vccio567 */ GATE(PCLK_CAN0, "pclk_can0", "pclk_bus", 27, 5), GATE(CLK_CAN0, "clk_can0", "clk_can0_c", 27, 6), GATE(PCLK_CAN1, "pclk_can1", "pclk_bus", 27, 7), GATE(CLK_CAN1, "clk_can1", "clk_can1_c", 27, 8), GATE(PCLK_CAN2, "pclk_can2", "pclk_bus", 27, 9), GATE(CLK_CAN2, "clk_can2", "clk_can2_c", 27, 10), /* 11 Reserved */ GATE(PCLK_UART1, "pclk_uart1", "pclk_bus", 27, 12), GATE(CLK_UART1_SRC, "clk_uart1_src", "clk_uart1_src_c", 27, 13), GATE(CLK_UART1_FRAC, "clk_uart1_frac", "clk_uart1_frac_frac", 27, 14), GATE(SCLK_UART1, "sclk_uart1", "sclk_uart1_sel", 27, 15), /* CRU_CLKGATE_CON28 */ GATE(PCLK_UART2, "pclk_uart2", "pclk_bus", 28, 0), GATE(CLK_UART2_SRC, "clk_uart2_src", "clk_uart2_src_c", 28, 1), GATE(CLK_UART2_FRAC, "clk_uart2_frac", "clk_uart2_frac_frac", 28, 2), GATE(SCLK_UART2, "sclk_uart2", "sclk_uart2_sel", 28, 3), GATE(PCLK_UART3, "pclk_uart3", "pclk_bus", 28, 4), GATE(CLK_UART3_SRC, "clk_uart3_src", "clk_uart3_src_c", 28, 5), GATE(CLK_UART3_FRAC, "clk_uart3_frac", "clk_uart3_frac_frac", 28, 6), GATE(SCLK_UART3, "sclk_uart3", "sclk_uart3_sel", 28, 7), GATE(PCLK_UART4, "pclk_uart4", "pclk_bus", 28, 8), GATE(CLK_UART4_SRC, "clk_uart4_src", "clk_uart4_src_c", 28, 9), GATE(CLK_UART4_FRAC, "clk_uart4_frac", "clk_uart4_frac_frac", 28, 10), GATE(SCLK_UART4, "sclk_uart4", "sclk_uart4_sel", 28, 11), GATE(PCLK_UART5, "pclk_uart5", "pclk_bus", 28, 12), GATE(CLK_UART5_SRC, "clk_uart5_src", "clk_uart5_src_c", 28, 13), GATE(CLK_UART5_FRAC, "clk_uart5_frac", "clk_uart5_frac_frac", 28, 14), GATE(SCLK_UART5, "sclk_uart5", "sclk_uart5_sel", 28, 15), /* CRU_CLKGATE_CON29 */ GATE(PCLK_UART6, "pclk_uart6", "pclk_bus", 29, 0), GATE(CLK_UART6_SRC, "clk_uart6_src", "clk_uart6_src_c", 29, 1), GATE(CLK_UART6_FRAC, "clk_uart6_frac", "clk_uart6_frac_frac", 29, 2), GATE(SCLK_UART6, "sclk_uart6", "sclk_uart6_sel", 29, 3), GATE(PCLK_UART7, "pclk_uart7", "pclk_bus", 29, 4), GATE(CLK_UART7_SRC, "clk_uart7_src", "clk_uart7_src_c", 29, 5), GATE(CLK_UART7_FRAC, "clk_uart7_frac", "clk_uart7_frac_frac", 29, 6), GATE(SCLK_UART7, "sclk_uart7", "sclk_uart7_sel", 29, 7), GATE(PCLK_UART8, "pclk_uart8", "pclk_bus", 29, 8), GATE(CLK_UART8_SRC, "clk_uart8_src", "clk_uart8_src_c", 29, 9), GATE(CLK_UART8_FRAC, "clk_uart8_frac", "clk_uart8_frac_frac", 29, 10), GATE(SCLK_UART8, "sclk_uart8", "sclk_uart8_sel", 29, 11), GATE(PCLK_UART9, "pclk_uart9", "pclk_bus", 29, 12), GATE(CLK_UART9_SRC, "clk_uart9_src", "clk_uart9_src_c", 29, 13), GATE(CLK_UART9_FRAC, "clk_uart9_frac", "clk_uart9_frac_frac", 29, 14), GATE(SCLK_UART9, "sclk_uart9", "sclk_uart9_sel", 29, 15), /* CRU_CLKGATE_CON30 */ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus", 30, 0), GATE(CLK_I2C1, "clk_i2c1", "clk_i2c", 30, 1), GATE(PCLK_I2C2, "pclk_i2c2", "pclk_bus", 30, 2), GATE(CLK_I2C2, "clk_i2c2", "clk_i2c", 30, 3), GATE(PCLK_I2C3, "pclk_i2c3", "pclk_bus", 30, 4), GATE(CLK_I2C3, "clk_i2c3", "clk_i2c", 30, 5), GATE(PCLK_I2C4, "pclk_i2c4", "pclk_bus", 30, 6), GATE(CLK_I2C4, "clk_i2c4", "clk_i2c", 30, 7), GATE(PCLK_I2C5, "pclk_i2c5", "pclk_bus", 30, 8), GATE(CLK_I2C5, "clk_i2c5", "clk_i2c", 30, 9), GATE(PCLK_SPI0, "pclk_spi0", "pclk_bus", 30, 10), GATE(CLK_SPI0, "clk_spi0", "clk_spi0_sel", 30, 11), GATE(PCLK_SPI1, "pclk_spi1", "pclk_bus", 30, 12), GATE(CLK_SPI1, "clk_spi1", "clk_spi1_sel", 30, 13), GATE(PCLK_SPI2, "pclk_spi2", "pclk_bus", 30, 14), GATE(CLK_SPI2, "clk_spi2", "clk_spi2_sel", 30, 15), /* CRU_CLKGATE_CON31 */ GATE(PCLK_SPI3, "pclk_spi3", "pclk_bus", 31, 0), GATE(CLK_SPI3, "clk_spi3", "clk_spi3_sel", 31, 1), GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_bus", 31, 2), GATE(DBCLK_GPIO1, "dbclk_gpio1", "dbclk_gpio", 31, 3), GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_bus", 31, 4), GATE(DBCLK_GPIO2, "dbclk_gpio2", "dbclk_gpio", 31, 5), GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_bus", 31, 6), GATE(DBCLK_GPIO3, "dbclk_gpio3", "dbclk_gpio", 31, 7), GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_bus", 31, 8), GATE(DBCLK_GPIO4, "dbclk_gpio4", "dbclk_gpio", 31, 9), GATE(PCLK_PWM1, "pclk_pwm1", "pclk_bus", 31, 10), GATE(CLK_PWM1, "clk_pwm1", "clk_pwm1_sel", 31, 11), GATE(CLK_PWM1_CAPTURE, "clk_pwm1_capture", "xin24m", 31, 12), GATE(PCLK_PWM2, "pclk_pwm2", "pclk_bus", 31, 13), GATE(CLK_PWM2, "clk_pwm2", "clk_pwm2_sel", 31, 14), GATE(CLK_PWM2_CAPTURE, "clk_pwm2_capture", "xin24m", 31, 15), /* CRU_CLKGATE_CON32 */ GATE(PCLK_PWM3, "pclk_pwm3", "pclk_bus", 32, 0), GATE(CLK_PWM3, "clk_pwm3", "clk_pwm3_sel", 32, 1), GATE(CLK_PWM3_CAPTURE, "clk_pwm3_capture", "xin24m", 32, 2), GATE(PCLK_TIMER, "pclk_timer", "pclk_bus", 32, 3), GATE(CLK_TIMER0, "clk_timer0", "xin24m", 32, 4), GATE(CLK_TIMER1, "clk_timer1", "xin24m", 32, 5), GATE(CLK_TIMER2, "clk_timer2", "xin24m", 32, 6), GATE(CLK_TIMER3, "clk_timer3", "xin24m", 32, 7), GATE(CLK_TIMER4, "clk_timer4", "xin24m", 32, 8), GATE(CLK_TIMER5, "clk_timer5", "xin24m", 32, 9), GATE(CLK_I2C, "clk_i2c", "clk_i2c_sel", 32, 10), GATE(DBCLK_GPIO, "dbclk_gpio", "dbclk_gpio_sel", 32, 11), /* 12 clk_timer */ GATE(ACLK_MCU, "aclk_mcu", "aclk_bus", 32, 13), GATE(PCLK_INTMUX, "pclk_intmux", "pclk_bus", 32, 14), GATE(PCLK_MAILBOX, "pclk_mailbox", "pclk_bus", 32, 15), /* CRU_CLKGATE_CON33 */ GATE(ACLK_TOP_HIGH, "aclk_top_high", "aclk_top_high_sel", 33, 0), GATE(ACLK_TOP_LOW, "aclk_top_low", "aclk_top_low_sel", 33, 1), GATE(HCLK_TOP, "hclk_top", "hclk_top_sel", 33, 2), GATE(PCLK_TOP, "pclk_top", "pclk_top_sel", 33, 3), /* 4 aclk_top_high_biu */ /* 5 aclk_top_low_biu */ /* 6 hclk_top_biu */ /* 7 pclk_top_biu */ GATE(PCLK_PCIE30PHY, "pclk_pcie30phy", "pclk_top", 33, 8), GATE(CLK_OPTC_ARB, "clk_optc_arb", "clk_optc_arb_sel", 33, 9), /* 10:11 Reserved */ /* 12 pclk_top_cru */ GATE(PCLK_MIPICSIPHY, "pclk_mipicsiphy", "pclk_top", 33, 13), GATE(PCLK_MIPIDSIPHY0, "pclk_mipidsiphy0", "pclk_top", 33, 14), GATE(PCLK_MIPIDSIPHY1, "pclk_mipidsiphy1", "pclk_top", 33, 15), /* CRU_CLKGATE_CON34 */ /* 0 pclk_apb2asb_chip_left */ /* 1 pclk_apb2asb_chip_bottom */ /* 2 pclk_asb2apb_chip_left */ /* 3 pclk_asb2apb_chip_bottom */ GATE(PCLK_PIPEPHY0, "pclk_pipephy0", "pclk_top", 34, 4), GATE(PCLK_PIPEPHY1, "pclk_pipephy1", "pclk_top", 34, 5), GATE(PCLK_PIPEPHY2, "pclk_pipephy2", "pclk_top", 34, 6), /* 7 pclk_usb2phy0_grf */ /* 8 pclk_usb2phy1_grf */ /* 9 pclk_ddrphy */ /* 10 clk_ddrphy */ GATE(PCLK_CPU_BOOST, "pclk_cpu_boost", "pclk_top", 34, 11), GATE(CLK_CPU_BOOST, "clk_cpu_boost", "xin24m", 34, 12), GATE(PCLK_OTPPHY, "pclk_otpphy", "pclk_top", 34, 13), GATE(PCLK_EDPPHY_GRF, "pclk_edpphy_grf", "pclk_top", 34, 14), /* 15 clk_testout */ /* CRU_CLKGATE_CON35 */ GATE(0, "clk_gpll_div_400m", "clk_gpll_div_400m_div", 35, 0), GATE(0, "clk_gpll_div_300m", "clk_gpll_div_300m_div", 35, 1), GATE(0, "clk_gpll_div_200m", "clk_gpll_div_200m_div", 35, 2), GATE(0, "clk_gpll_div_150m", "clk_gpll_div_150m_div", 35, 3), GATE(0, "clk_gpll_div_100m", "clk_gpll_div_100m_div", 35, 4), GATE(0, "clk_gpll_div_75m", "clk_gpll_div_75m_div", 35, 5), GATE(0, "clk_gpll_div_20m", "clk_gpll_div_20m_div", 35, 6), GATE(CPLL_500M, "clk_cpll_div_500m", "clk_cpll_div_500m_div", 35, 7), GATE(CPLL_333M, "clk_cpll_div_333m", "clk_cpll_div_333m_div", 35, 8), GATE(CPLL_250M, "clk_cpll_div_250m", "clk_cpll_div_250m_div", 35, 9), GATE(CPLL_125M, "clk_cpll_div_125m", "clk_cpll_div_125m_div", 35, 10), GATE(CPLL_100M, "clk_cpll_div_100m", "clk_cpll_div_100m_div", 35, 11), GATE(CPLL_62P5M, "clk_cpll_div_62P5m", "clk_cpll_div_62P5m_div", 35, 12), GATE(CPLL_50M, "clk_cpll_div_50m", "clk_cpll_div_50m_div", 35, 13), GATE(CPLL_25M, "clk_cpll_div_25m", "clk_cpll_div_25m_div", 35, 14), GATE(0, "clk_osc0_div_750k", "clk_osc0_div_750k_div", 35, 15), }; static int rk3568_cru_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "rockchip,rk3568-cru")) { device_set_desc(dev, "Rockchip RK3568 Clock & Reset Unit"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int rk3568_cru_attach(device_t dev) { struct rk_cru_softc *sc; sc = device_get_softc(dev); sc->dev = dev; sc->clks = rk3568_clks; sc->nclks = nitems(rk3568_clks); sc->gates = rk3568_gates; sc->ngates = nitems(rk3568_gates); sc->reset_offset = 0x400; sc->reset_num = 478; return (rk_cru_attach(dev)); } static device_method_t methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk3568_cru_probe), DEVMETHOD(device_attach, rk3568_cru_attach), DEVMETHOD_END }; DEFINE_CLASS_1(rk3568_cru, rk3568_cru_driver, methods, sizeof(struct rk_cru_softc), rk_cru_driver); EARLY_DRIVER_MODULE(rk3568_cru, simplebus, rk3568_cru_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/dev/clk/rockchip/rk3568_pmucru.c b/sys/dev/clk/rockchip/rk3568_pmucru.c index 0756dc5392bf..214253abb55e 100644 --- a/sys/dev/clk/rockchip/rk3568_pmucru.c +++ b/sys/dev/clk/rockchip/rk3568_pmucru.c @@ -1,248 +1,248 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021, 2022 Soren Schmidt * Copyright (c) 2023, Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include -#include -#include +#include +#include +#include #include #include #define CRU_PLLSEL_CON(x) ((x) * 0x20) #define CRU_CLKSEL_CON(x) ((x) * 0x4 + 0x100) #define CRU_CLKGATE_CON(x) ((x) * 0x4 + 0x180) /* PLL clock */ #define RK_PLL(_id, _name, _pnames, _off, _shift) \ { \ .type = RK3328_CLK_PLL, \ .clk.pll = &(struct rk_clk_pll_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = _pnames, \ .clkdef.parent_cnt = nitems(_pnames), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .base_offset = CRU_PLLSEL_CON(_off), \ .mode_reg = 0x80, \ .mode_shift = _shift, \ .rates = rk3568_pll_rates, \ }, \ } extern struct rk_clk_pll_rate rk3568_pll_rates[]; /* Parent clock defines */ PLIST(mux_pll_p) = { "xin24m" }; PLIST(xin24m_32k_p) = { "xin24m", "clk_rtc_32k" }; PLIST(sclk_uart0_p) = { "sclk_uart0_div", "sclk_uart0_frac", "xin24m" }; PLIST(sclk_uart0_div_p) = { "ppll", "usb480m", "cpll", "gpll" }; PLIST(clk_rtc32k_pmu_p) = { "clk_32k_pvtm", "xin32k", "clk_osc0_div32k" }; PLIST(clk_usbphy0_ref_p) = { "clk_ref24m", "xin_osc0_usbphy0_g" }; PLIST(clk_usbphy1_ref_p) = { "clk_ref24m", "xin_osc0_usbphy1_g" }; PLIST(clk_mipidsiphy0_ref_p) = { "clk_ref24m", "xin_osc0_mipidsiphy0_g" }; PLIST(clk_mipidsiphy1_ref_p) = { "clk_ref24m", "xin_osc0_mipidsiphy1_g" }; PLIST(clk_wifi_p) = { "clk_wifi_osc0", "clk_wifi_div" }; PLIST(clk_pciephy0_ref_p) = { "clk_pciephy0_osc0", "clk_pciephy0_div" }; PLIST(clk_pciephy1_ref_p) = { "clk_pciephy1_osc0", "clk_pciephy1_div" }; PLIST(clk_pciephy2_ref_p) = { "clk_pciephy2_osc0", "clk_pciephy2_div" }; PLIST(clk_hdmi_ref_p) = { "hpll", "hpll_ph0" }; PLIST(clk_pdpmu_p) = { "ppll", "gpll" }; PLIST(clk_pwm0_p) = { "xin24m", "clk_pdpmu" }; /* CLOCKS */ static struct rk_clk rk3568_clks[] = { /* External clocks */ LINK("xin24m"), LINK("cpll"), LINK("gpll"), LINK("usb480m"), LINK("clk_32k_pvtm"), /* Fixed clocks */ FFACT(0, "ppll_ph0", "ppll", 1, 2), FFACT(0, "ppll_ph180", "ppll", 1, 2), FFACT(0, "hpll_ph0", "hpll", 1, 2), /* PLL's */ RK_PLL(PLL_PPLL, "ppll", mux_pll_p, 0, 0), RK_PLL(PLL_HPLL, "hpll", mux_pll_p, 2, 2), /* PMUCRU_PMUCLKSEL_CON00 */ CDIV(0, "xin_osc0_div_div", "xin24m", 0, 0, 0, 5), MUX(0, "clk_rtc_32k_mux", clk_rtc32k_pmu_p, 0, 0, 6, 2), /* PMUCRU_PMUCLKSEL_CON01 */ FRACT(0, "clk_osc0_div32k", "xin24m", 0, 1), /* PMUCRU_PMUCLKSEL_CON02 */ CDIV(0, "pclk_pdpmu_pre", "clk_pdpmu", 0, 2, 0, 5), MUX(CLK_PDPMU, "clk_pdpmu", clk_pdpmu_p, 0, 2, 15, 1), /* PMUCRU_PMUCLKSEL_CON03 */ CDIV(0, "clk_i2c0_div", "clk_pdpmu", 0, 3, 0, 7), /* PMUCRU_PMUCLKSEL_CON04 */ CDIV(0, "sclk_uart0_div_div", "sclk_uart0_div_sel", 0, 4, 0, 7), MUX(0, "sclk_uart0_div_sel", sclk_uart0_div_p, 0, 4, 8, 2), MUX(0, "sclk_uart0_mux", sclk_uart0_p, 0, 4, 10, 2), /* PMUCRU_PMUCLKSEL_CON05 */ FRACT(0, "sclk_uart0_frac_div", "sclk_uart0_div", 0, 5), /* PMUCRU_PMUCLKSEL_CON06 */ CDIV(0, "clk_pwm0_div", "clk_pwm0_sel", 0, 6, 0, 7), MUX(0, "clk_pwm0_sel", clk_pwm0_p, 0, 6, 7, 1), MUX(0, "dbclk_gpio0_sel", xin24m_32k_p, 0, 6, 15, 1), /* PMUCRU_PMUCLKSEL_CON07 */ CDIV(0, "clk_ref24m_div", "clk_pdpmu", 0, 7, 0, 6), /* PMUCRU_PMUCLKSEL_CON08 */ MUX(CLK_USBPHY0_REF, "clk_usbphy0_ref", clk_usbphy0_ref_p, 0, 8, 0, 1), MUX(CLK_USBPHY1_REF, "clk_usbphy1_ref", clk_usbphy1_ref_p, 0, 8, 1, 1), MUX(CLK_MIPIDSIPHY0_REF, "clk_mipidsiphy0_ref", clk_mipidsiphy0_ref_p, 0, 8, 2, 1), MUX(CLK_MIPIDSIPHY1_REF, "clk_mipidsiphy1_ref", clk_mipidsiphy1_ref_p, 0, 8, 3, 1), MUX(CLK_HDMI_REF, "clk_hdmi_ref", clk_hdmi_ref_p, 0, 8, 7, 1), CDIV(0, "clk_wifi_div_div", "clk_pdpmu", 0, 8, 8, 6), MUX(CLK_WIFI, "clk_wifi", clk_wifi_p, 0, 8, 15, 1), /* PMUCRU_PMUCLKSEL_CON09 */ CDIV(0, "clk_pciephy0_div_div", "ppll_ph0", 0, 9, 0, 3), MUX(CLK_PCIEPHY0_REF, "clk_pciephy0_ref", clk_pciephy0_ref_p, 0, 9, 3, 1), CDIV(0, "clk_pciephy1_div_div", "ppll_ph0", 0, 9, 4, 3), MUX(CLK_PCIEPHY1_REF, "clk_pciephy1_ref", clk_pciephy1_ref_p, 0, 9, 7, 1), CDIV(0, "clk_pciephy2_div_div", "ppll_ph0", 0, 9, 8, 3), MUX(CLK_PCIEPHY2_REF, "clk_pciephy2_ref", clk_pciephy2_ref_p, 0, 9, 11, 1), }; /* GATES */ static struct rk_cru_gate rk3568_gates[] = { /* PMUCRU_PMUGATE_CON00 */ GATE(XIN_OSC0_DIV, "xin_osc0_div", "xin_osc0_div_div", 0, 0), GATE(CLK_RTC_32K, "clk_rtc_32k", "clk_rtc_32k_mux", 0, 1), GATE(PCLK_PDPMU, "pclk_pdpmu", "pclk_pdpmu_pre", 0, 2), GATE(PCLK_PMU, "pclk_pmu", "pclk_pdpmu", 0, 6), GATE(CLK_PMU, "clk_pmu", "xin24m", 0, 7), /* PMUCRU_PMUGATE_CON01 */ GATE(PCLK_I2C0, "pclk_i2c0", "pclk_pdpmu", 1, 0), GATE(CLK_I2C0, "clk_i2c0", "clk_i2c0_div", 1, 1), GATE(PCLK_UART0, "pclk_uart0", "pclk_pdpmu", 1, 2), GATE(CLK_UART0_DIV, "sclk_uart0_div", "sclk_uart0_div_div", 1, 3), GATE(CLK_UART0_FRAC, "sclk_uart0_frac", "sclk_uart0_frac_div", 1, 4), GATE(SCLK_UART0, "sclk_uart0", "sclk_uart0_mux", 1, 5), GATE(PCLK_PWM0, "pclk_pwm0", "pclk_pdpmu", 1, 6), GATE(CLK_PWM0, "clk_pwm0", "clk_pwm0_div", 1, 7), GATE(CLK_CAPTURE_PWM0_NDFT, "clk_capture_pwm0_ndft", "xin24m", 1, 8), GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pdpmu", 1, 9), GATE(DBCLK_GPIO0, "dbclk_gpio0", "dbclk_gpio0_sel", 1, 10), GATE(PCLK_PMUPVTM, "pclk_pmupvtm", "pclk_pdpmu", 1, 11), GATE(CLK_PMUPVTM, "clk_pmupvtm", "xin24m", 1, 12), GATE(CLK_CORE_PMUPVTM, "clk_core_pmupvtm", "xin24m", 1, 13), /* PMUCRU_PMUGATE_CON02 */ GATE(CLK_REF24M, "clk_ref24m", "clk_ref24m_div", 2, 0), GATE(XIN_OSC0_USBPHY0_G, "xin_osc0_usbphy0_g", "xin24m", 2, 1), GATE(XIN_OSC0_USBPHY1_G, "xin_osc0_usbphy1_g", "xin24m", 2, 2), GATE(XIN_OSC0_MIPIDSIPHY0_G, "xin_osc0_mipidsiphy0_g", "xin24m", 2, 3), GATE(XIN_OSC0_MIPIDSIPHY1_G, "xin_osc0_mipidsiphy1_g", "xin24m", 2, 4), GATE(CLK_WIFI_DIV, "clk_wifi_div", "clk_wifi_div_div", 2, 5), GATE(CLK_WIFI_OSC0, "clk_wifi_osc0", "xin24m", 2, 6), GATE(CLK_PCIEPHY0_DIV, "clk_pciephy0_div", "clk_pciephy0_div_div", 2, 7), GATE(CLK_PCIEPHY0_OSC0, "clk_pciephy0_osc0", "xin24m", 2, 8), GATE(CLK_PCIEPHY1_DIV, "clk_pciephy1_div", "clk_pciephy1_div_div", 2, 9), GATE(CLK_PCIEPHY1_OSC0, "clk_pciephy1_osc0", "xin24m", 2, 10), GATE(CLK_PCIEPHY2_DIV, "clk_pciephy2_div", "clk_pciephy2_div_div", 2, 11), GATE(CLK_PCIEPHY2_OSC0, "clk_pciephy2_osc0", "xin24m", 2, 12), GATE(CLK_PCIE30PHY_REF_M, "clk_pcie30phy_ref_m", "ppll_ph0", 2, 13), GATE(CLK_PCIE30PHY_REF_N, "clk_pcie30phy_ref_n", "ppll_ph180", 2, 14), GATE(XIN_OSC0_EDPPHY_G, "xin_osc0_edpphy_g", "xin24m", 2, 15), }; static int rk3568_pmucru_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "rockchip,rk3568-pmucru")) { device_set_desc(dev, "Rockchip RK3568 PMU Clock & Reset Unit"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int rk3568_pmucru_attach(device_t dev) { struct rk_cru_softc *sc; sc = device_get_softc(dev); sc->dev = dev; sc->clks = rk3568_clks; sc->nclks = nitems(rk3568_clks); sc->gates = rk3568_gates; sc->ngates = nitems(rk3568_gates); sc->reset_offset = 0x200; sc->reset_num = 4; return (rk_cru_attach(dev)); } static device_method_t methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk3568_pmucru_probe), DEVMETHOD(device_attach, rk3568_pmucru_attach), DEVMETHOD_END }; DEFINE_CLASS_1(rk3568_pmucru, rk3568_pmucru_driver, methods, sizeof(struct rk_cru_softc), rk_cru_driver); EARLY_DRIVER_MODULE(rk3568_pmucru, simplebus, rk3568_pmucru_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/dev/clk/rockchip/rk_clk_armclk.c b/sys/dev/clk/rockchip/rk_clk_armclk.c index 3d462e74d92b..36197166d213 100644 --- a/sys/dev/clk/rockchip/rk_clk_armclk.c +++ b/sys/dev/clk/rockchip/rk_clk_armclk.c @@ -1,252 +1,252 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include -#include +#include #include #include "clkdev_if.h" struct rk_clk_armclk_sc { uint32_t muxdiv_offset; uint32_t mux_shift; uint32_t mux_width; uint32_t mux_mask; uint32_t div_shift; uint32_t div_width; uint32_t div_mask; uint32_t gate_offset; uint32_t gate_shift; uint32_t flags; uint32_t main_parent; uint32_t alt_parent; struct rk_clk_armclk_rates *rates; int nrates; }; #define WRITE4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define READ4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) #define RK_ARMCLK_WRITE_MASK_SHIFT 16 #if 0 #define dprintf(format, arg...) \ printf("%s:(%s)" format, __func__, clknode_get_name(clk), arg) #else #define dprintf(format, arg...) #endif static int rk_clk_armclk_init(struct clknode *clk, device_t dev) { struct rk_clk_armclk_sc *sc; uint32_t val, idx; sc = clknode_get_softc(clk); idx = 0; DEVICE_LOCK(clk); READ4(clk, sc->muxdiv_offset, &val); DEVICE_UNLOCK(clk); idx = (val & sc->mux_mask) >> sc->mux_shift; clknode_init_parent_idx(clk, idx); return (0); } static int rk_clk_armclk_set_mux(struct clknode *clk, int index) { struct rk_clk_armclk_sc *sc; uint32_t val = 0; sc = clknode_get_softc(clk); dprintf("Set mux to %d\n", index); DEVICE_LOCK(clk); val |= index << sc->mux_shift; val |= sc->mux_mask << RK_ARMCLK_WRITE_MASK_SHIFT; dprintf("Write: muxdiv_offset=%x, val=%x\n", sc->muxdiv_offset, val); WRITE4(clk, sc->muxdiv_offset, val); DEVICE_UNLOCK(clk); return (0); } static int rk_clk_armclk_recalc(struct clknode *clk, uint64_t *freq) { struct rk_clk_armclk_sc *sc; uint32_t reg, div; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->muxdiv_offset, ®); dprintf("Read: muxdiv_offset=%x, val=%x\n", sc->muxdiv_offset, reg); DEVICE_UNLOCK(clk); div = ((reg & sc->div_mask) >> sc->div_shift) + 1; dprintf("parent_freq=%ju, div=%u\n", *freq, div); *freq = *freq / div; return (0); } static int rk_clk_armclk_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout, int flags, int *stop) { struct rk_clk_armclk_sc *sc; struct clknode *p_main; const char **p_names; uint64_t best = 0, best_p = 0; uint32_t div = 0, val = 0; int err, i, rate = 0; sc = clknode_get_softc(clk); dprintf("Finding best parent/div for target freq of %ju\n", *fout); p_names = clknode_get_parent_names(clk); p_main = clknode_find_by_name(p_names[sc->main_parent]); for (i = 0; i < sc->nrates; i++) { if (sc->rates[i].freq == *fout) { best = sc->rates[i].freq; div = sc->rates[i].div; best_p = best * div; rate = i; dprintf("Best parent %s (%d) with best freq at %ju\n", clknode_get_name(p_main), sc->main_parent, best); break; } } if (rate == sc->nrates) return (0); if ((flags & CLK_SET_DRYRUN) != 0) { *fout = best; *stop = 1; return (0); } dprintf("Changing parent (%s) freq to %ju\n", clknode_get_name(p_main), best_p); err = clknode_set_freq(p_main, best_p, 0, 1); if (err != 0) printf("Cannot set %s to %ju\n", clknode_get_name(p_main), best_p); clknode_set_parent_by_idx(clk, sc->main_parent); clknode_get_freq(p_main, &best_p); dprintf("main parent freq at %ju\n", best_p); DEVICE_LOCK(clk); val |= (div - 1) << sc->div_shift; val |= sc->div_mask << RK_ARMCLK_WRITE_MASK_SHIFT; dprintf("Write: muxdiv_offset=%x, val=%x\n", sc->muxdiv_offset, val); WRITE4(clk, sc->muxdiv_offset, val); DEVICE_UNLOCK(clk); *fout = best; *stop = 1; return (0); } static clknode_method_t rk_clk_armclk_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, rk_clk_armclk_init), CLKNODEMETHOD(clknode_set_mux, rk_clk_armclk_set_mux), CLKNODEMETHOD(clknode_recalc_freq, rk_clk_armclk_recalc), CLKNODEMETHOD(clknode_set_freq, rk_clk_armclk_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(rk_clk_armclk_clknode, rk_clk_armclk_clknode_class, rk_clk_armclk_clknode_methods, sizeof(struct rk_clk_armclk_sc), clknode_class); int rk_clk_armclk_register(struct clkdom *clkdom, struct rk_clk_armclk_def *clkdef) { struct clknode *clk; struct rk_clk_armclk_sc *sc; clk = clknode_create(clkdom, &rk_clk_armclk_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->muxdiv_offset = clkdef->muxdiv_offset; sc->mux_shift = clkdef->mux_shift; sc->mux_width = clkdef->mux_width; sc->mux_mask = ((1 << clkdef->mux_width) - 1) << sc->mux_shift; sc->div_shift = clkdef->div_shift; sc->div_width = clkdef->div_width; sc->div_mask = ((1 << clkdef->div_width) - 1) << sc->div_shift; sc->flags = clkdef->flags; sc->main_parent = clkdef->main_parent; sc->alt_parent = clkdef->alt_parent; sc->rates = clkdef->rates; sc->nrates = clkdef->nrates; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/rockchip/rk_clk_armclk.h b/sys/dev/clk/rockchip/rk_clk_armclk.h index 0c7f91d21d60..d5d14f1e8449 100644 --- a/sys/dev/clk/rockchip/rk_clk_armclk.h +++ b/sys/dev/clk/rockchip/rk_clk_armclk.h @@ -1,61 +1,61 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _RK_CLK_ARMCLK_H_ #define _RK_CLK_ARMCLK_H_ -#include +#include struct rk_clk_armclk_rates { uint64_t freq; uint32_t div; }; struct rk_clk_armclk_def { struct clknode_init_def clkdef; uint32_t muxdiv_offset; uint32_t mux_shift; uint32_t mux_width; uint32_t div_shift; uint32_t div_width; uint32_t flags; uint32_t main_parent; uint32_t alt_parent; struct rk_clk_armclk_rates *rates; int nrates; }; int rk_clk_armclk_register(struct clkdom *clkdom, struct rk_clk_armclk_def *clkdef); #endif /* _RK_CLK_ARMCLK_H_ */ diff --git a/sys/dev/clk/rockchip/rk_clk_composite.c b/sys/dev/clk/rockchip/rk_clk_composite.c index a37ddcd6ecb2..2b6c889913b8 100644 --- a/sys/dev/clk/rockchip/rk_clk_composite.c +++ b/sys/dev/clk/rockchip/rk_clk_composite.c @@ -1,334 +1,334 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include -#include +#include #include #include #include "clkdev_if.h" #include "syscon_if.h" struct rk_clk_composite_sc { uint32_t muxdiv_offset; uint32_t mux_shift; uint32_t mux_width; uint32_t mux_mask; uint32_t div_shift; uint32_t div_width; uint32_t div_mask; uint32_t flags; struct syscon *grf; }; #define WRITE4(_clk, off, val) \ rk_clk_composite_write_4(_clk, off, val) #define READ4(_clk, off, val) \ rk_clk_composite_read_4(_clk, off, val) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) #define RK_CLK_COMPOSITE_MASK_SHIFT 16 #if 0 #define dprintf(format, arg...) \ printf("%s:(%s)" format, __func__, clknode_get_name(clk), arg) #else #define dprintf(format, arg...) #endif static void rk_clk_composite_read_4(struct clknode *clk, bus_addr_t addr, uint32_t *val) { struct rk_clk_composite_sc *sc; sc = clknode_get_softc(clk); if (sc->grf) *val = SYSCON_READ_4(sc->grf, addr); else CLKDEV_READ_4(clknode_get_device(clk), addr, val); } static void rk_clk_composite_write_4(struct clknode *clk, bus_addr_t addr, uint32_t val) { struct rk_clk_composite_sc *sc; sc = clknode_get_softc(clk); if (sc->grf) SYSCON_WRITE_4(sc->grf, addr, val | (0xffff << 16)); else CLKDEV_WRITE_4(clknode_get_device(clk), addr, val); } static struct syscon * rk_clk_composite_get_grf(struct clknode *clk) { device_t dev; phandle_t node; struct syscon *grf; grf = NULL; dev = clknode_get_device(clk); node = ofw_bus_get_node(dev); if (OF_hasprop(node, "rockchip,grf") && syscon_get_by_ofw_property(dev, node, "rockchip,grf", &grf) != 0) { return (NULL); } return (grf); } static int rk_clk_composite_init(struct clknode *clk, device_t dev) { struct rk_clk_composite_sc *sc; uint32_t val, idx; sc = clknode_get_softc(clk); if ((sc->flags & RK_CLK_COMPOSITE_GRF) != 0) { sc->grf = rk_clk_composite_get_grf(clk); if (sc->grf == NULL) panic("clock %s has GRF flag set but no syscon is available", clknode_get_name(clk)); } idx = 0; if ((sc->flags & RK_CLK_COMPOSITE_HAVE_MUX) != 0) { DEVICE_LOCK(clk); READ4(clk, sc->muxdiv_offset, &val); DEVICE_UNLOCK(clk); idx = (val & sc->mux_mask) >> sc->mux_shift; } clknode_init_parent_idx(clk, idx); return (0); } static int rk_clk_composite_set_mux(struct clknode *clk, int index) { struct rk_clk_composite_sc *sc; uint32_t val = 0; sc = clknode_get_softc(clk); if ((sc->flags & RK_CLK_COMPOSITE_HAVE_MUX) == 0) return (0); dprintf("Set mux to %d\n", index); DEVICE_LOCK(clk); val |= (index << sc->mux_shift); val |= sc->mux_mask << RK_CLK_COMPOSITE_MASK_SHIFT; dprintf("Write: muxdiv_offset=%x, val=%x\n", sc->muxdiv_offset, val); WRITE4(clk, sc->muxdiv_offset, val); DEVICE_UNLOCK(clk); return (0); } static int rk_clk_composite_recalc(struct clknode *clk, uint64_t *freq) { struct rk_clk_composite_sc *sc; uint32_t reg, div; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->muxdiv_offset, ®); dprintf("Read: muxdiv_offset=%x, val=%x\n", sc->muxdiv_offset, reg); DEVICE_UNLOCK(clk); div = ((reg & sc->div_mask) >> sc->div_shift); if (sc->flags & RK_CLK_COMPOSITE_DIV_EXP) div = 1 << div; else div += 1; dprintf("parent_freq=%ju, div=%u\n", *freq, div); *freq = *freq / div; dprintf("Final freq=%ju\n", *freq); return (0); } static uint32_t rk_clk_composite_find_best(struct rk_clk_composite_sc *sc, uint64_t fparent, uint64_t freq, uint32_t *reg) { uint64_t best, cur; uint32_t best_div, best_div_reg; uint32_t div, div_reg; best = 0; best_div = 0; best_div_reg = 0; for (div_reg = 0; div_reg <= ((sc->div_mask >> sc->div_shift) + 1); div_reg++) { if (sc->flags == RK_CLK_COMPOSITE_DIV_EXP) div = 1 << div_reg; else div = div_reg + 1; cur = fparent / div; if ((freq - cur) < (freq - best)) { best = cur; best_div = div; best_div_reg = div_reg; break; } } *reg = best_div_reg; return (best_div); } static int rk_clk_composite_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout, int flags, int *stop) { struct rk_clk_composite_sc *sc; struct clknode *p_clk; const char **p_names; uint64_t best, cur; uint32_t div, div_reg, best_div, best_div_reg, val; int p_idx, best_parent; sc = clknode_get_softc(clk); dprintf("Finding best parent/div for target freq of %ju\n", *fout); p_names = clknode_get_parent_names(clk); for (best_div = 0, best = 0, p_idx = 0; p_idx != clknode_get_parents_num(clk); p_idx++) { p_clk = clknode_find_by_name(p_names[p_idx]); clknode_get_freq(p_clk, &fparent); dprintf("Testing with parent %s (%d) at freq %ju\n", clknode_get_name(p_clk), p_idx, fparent); div = rk_clk_composite_find_best(sc, fparent, *fout, &div_reg); cur = fparent / div; if ((*fout - cur) < (*fout - best)) { best = cur; best_div = div; best_div_reg = div_reg; best_parent = p_idx; dprintf("Best parent so far %s (%d) with best freq at " "%ju\n", clknode_get_name(p_clk), p_idx, best); } } *stop = 1; if (best_div == 0) return (ERANGE); if ((best < *fout) && ((flags & CLK_SET_ROUND_DOWN) == 0)) return (ERANGE); if ((best > *fout) && ((flags & CLK_SET_ROUND_UP) == 0)) { return (ERANGE); } if ((flags & CLK_SET_DRYRUN) != 0) { *fout = best; return (0); } p_idx = clknode_get_parent_idx(clk); if (p_idx != best_parent) { dprintf("Switching parent index from %d to %d\n", p_idx, best_parent); clknode_set_parent_by_idx(clk, best_parent); } dprintf("Setting divider to %d (reg: %d)\n", best_div, best_div_reg); dprintf(" div_mask: 0x%X, div_shift: %d\n", sc->div_mask, sc->div_shift); DEVICE_LOCK(clk); val = best_div_reg << sc->div_shift; val |= sc->div_mask << RK_CLK_COMPOSITE_MASK_SHIFT; dprintf("Write: muxdiv_offset=%x, val=%x\n", sc->muxdiv_offset, val); WRITE4(clk, sc->muxdiv_offset, val); DEVICE_UNLOCK(clk); *fout = best; return (0); } static clknode_method_t rk_clk_composite_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, rk_clk_composite_init), CLKNODEMETHOD(clknode_set_mux, rk_clk_composite_set_mux), CLKNODEMETHOD(clknode_recalc_freq, rk_clk_composite_recalc), CLKNODEMETHOD(clknode_set_freq, rk_clk_composite_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(rk_clk_composite_clknode, rk_clk_composite_clknode_class, rk_clk_composite_clknode_methods, sizeof(struct rk_clk_composite_sc), clknode_class); int rk_clk_composite_register(struct clkdom *clkdom, struct rk_clk_composite_def *clkdef) { struct clknode *clk; struct rk_clk_composite_sc *sc; clk = clknode_create(clkdom, &rk_clk_composite_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->muxdiv_offset = clkdef->muxdiv_offset; sc->mux_shift = clkdef->mux_shift; sc->mux_width = clkdef->mux_width; sc->mux_mask = ((1 << clkdef->mux_width) - 1) << sc->mux_shift; sc->div_shift = clkdef->div_shift; sc->div_width = clkdef->div_width; sc->div_mask = ((1 << clkdef->div_width) - 1) << sc->div_shift; sc->flags = clkdef->flags; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/rockchip/rk_clk_composite.h b/sys/dev/clk/rockchip/rk_clk_composite.h index 22a052da22c2..346bee53f04f 100644 --- a/sys/dev/clk/rockchip/rk_clk_composite.h +++ b/sys/dev/clk/rockchip/rk_clk_composite.h @@ -1,54 +1,54 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _RK_CLK_COMPOSITE_H_ #define _RK_CLK_COMPOSITE_H_ -#include +#include struct rk_clk_composite_def { struct clknode_init_def clkdef; uint32_t muxdiv_offset; uint32_t mux_shift; uint32_t mux_width; uint32_t div_shift; uint32_t div_width; uint32_t flags; }; #define RK_CLK_COMPOSITE_HAVE_MUX 0x0001 #define RK_CLK_COMPOSITE_DIV_EXP 0x0002 /* Register 0, 1, 2, 2, ... */ /* Divider 1, 2, 4, 8, ... */ #define RK_CLK_COMPOSITE_GRF 0x0004 /* Use syscon registers instead of CRU's */ int rk_clk_composite_register(struct clkdom *clkdom, struct rk_clk_composite_def *clkdef); #endif /* _RK_CLK_COMPOSITE_H_ */ diff --git a/sys/dev/clk/rockchip/rk_clk_fract.c b/sys/dev/clk/rockchip/rk_clk_fract.c index f559e9c71852..aa7084c90d76 100644 --- a/sys/dev/clk/rockchip/rk_clk_fract.c +++ b/sys/dev/clk/rockchip/rk_clk_fract.c @@ -1,277 +1,277 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2019 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include -#include +#include #include #include "clkdev_if.h" #define WR4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define RD4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define MD4(_clk, off, clr, set ) \ CLKDEV_MODIFY_4(clknode_get_device(_clk), off, clr, set) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) #define RK_CLK_FRACT_MASK_SHIFT 16 static int rk_clk_fract_init(struct clknode *clk, device_t dev); static int rk_clk_fract_recalc(struct clknode *clk, uint64_t *req); static int rk_clk_fract_set_freq(struct clknode *clknode, uint64_t fin, uint64_t *fout, int flag, int *stop); static int rk_clk_fract_set_gate(struct clknode *clk, bool enable); struct rk_clk_fract_sc { uint32_t flags; uint32_t offset; uint32_t numerator; uint32_t denominator; uint32_t gate_offset; uint32_t gate_shift; }; static clknode_method_t rk_clk_fract_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, rk_clk_fract_init), CLKNODEMETHOD(clknode_set_gate, rk_clk_fract_set_gate), CLKNODEMETHOD(clknode_recalc_freq, rk_clk_fract_recalc), CLKNODEMETHOD(clknode_set_freq, rk_clk_fract_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(rk_clk_fract, rk_clk_fract_class, rk_clk_fract_methods, sizeof(struct rk_clk_fract_sc), clknode_class); /* * Compute best rational approximation of input fraction * for fixed sized fractional divider registers. * http://en.wikipedia.org/wiki/Continued_fraction * * - n_input, d_input Given input fraction * - n_max, d_max Maximum vaues of divider registers * - n_out, d_out Computed approximation */ static void clk_compute_fract_div( uint64_t n_input, uint64_t d_input, uint64_t n_max, uint64_t d_max, uint64_t *n_out, uint64_t *d_out) { uint64_t n_prev, d_prev; /* previous convergents */ uint64_t n_cur, d_cur; /* current convergents */ uint64_t n_rem, d_rem; /* fraction remainder */ uint64_t tmp, fact; /* Initialize fraction reminder */ n_rem = n_input; d_rem = d_input; /* Init convergents to 0/1 and 1/0 */ n_prev = 0; d_prev = 1; n_cur = 1; d_cur = 0; while (d_rem != 0 && n_cur < n_max && d_cur < d_max) { /* Factor for this step. */ fact = n_rem / d_rem; /* Adjust fraction reminder */ tmp = d_rem; d_rem = n_rem % d_rem; n_rem = tmp; /* Compute new nominator and save last one */ tmp = n_prev + fact * n_cur; n_prev = n_cur; n_cur = tmp; /* Compute new denominator and save last one */ tmp = d_prev + fact * d_cur; d_prev = d_cur; d_cur = tmp; } if (n_cur > n_max || d_cur > d_max) { *n_out = n_prev; *d_out = d_prev; } else { *n_out = n_cur; *d_out = d_cur; } } static int rk_clk_fract_init(struct clknode *clk, device_t dev) { uint32_t reg; struct rk_clk_fract_sc *sc; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); RD4(clk, sc->offset, ®); DEVICE_UNLOCK(clk); sc->numerator = (reg >> 16) & 0xFFFF; sc->denominator = reg & 0xFFFF; if (sc->denominator == 0) sc->denominator = 1; clknode_init_parent_idx(clk, 0); return(0); } static int rk_clk_fract_set_gate(struct clknode *clk, bool enable) { struct rk_clk_fract_sc *sc; uint32_t val = 0; sc = clknode_get_softc(clk); if ((sc->flags & RK_CLK_FRACT_HAVE_GATE) == 0) return (0); RD4(clk, sc->gate_offset, &val); val = 0; if (!enable) val |= 1 << sc->gate_shift; val |= (1 << sc->gate_shift) << RK_CLK_FRACT_MASK_SHIFT; DEVICE_LOCK(clk); WR4(clk, sc->gate_offset, val); DEVICE_UNLOCK(clk); return (0); } static int rk_clk_fract_recalc(struct clknode *clk, uint64_t *freq) { struct rk_clk_fract_sc *sc; sc = clknode_get_softc(clk); if (sc->denominator == 0) { printf("%s: %s denominator is zero!\n", clknode_get_name(clk), __func__); *freq = 0; return(EINVAL); } *freq *= sc->numerator; *freq /= sc->denominator; return (0); } static int rk_clk_fract_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout, int flags, int *stop) { struct rk_clk_fract_sc *sc; uint64_t div_n, div_d, _fout; sc = clknode_get_softc(clk); clk_compute_fract_div(*fout, fin, 0xFFFF, 0xFFFF, &div_n, &div_d); _fout = fin * div_n; _fout /= div_d; /* Rounding. */ if ((flags & CLK_SET_ROUND_UP) && (_fout < *fout)) { if (div_n > div_d && div_d > 1) div_n++; else div_d--; } else if ((flags & CLK_SET_ROUND_DOWN) && (_fout > *fout)) { if (div_n > div_d && div_n > 1) div_n--; else div_d++; } /* Check range after rounding */ if (div_n > 0xFFFF || div_d > 0xFFFF) return (ERANGE); if (div_d == 0) { printf("%s: %s divider is zero!\n", clknode_get_name(clk), __func__); return(EINVAL); } /* Recompute final output frequency */ _fout = fin * div_n; _fout /= div_d; *stop = 1; if ((flags & CLK_SET_DRYRUN) == 0) { if (*stop != 0 && (flags & (CLK_SET_ROUND_UP | CLK_SET_ROUND_DOWN)) == 0 && *fout != _fout) return (ERANGE); sc->numerator = (uint32_t)div_n; sc->denominator = (uint32_t)div_d; DEVICE_LOCK(clk); WR4(clk, sc->offset, sc->numerator << 16 | sc->denominator); DEVICE_UNLOCK(clk); } *fout = _fout; return (0); } int rk_clk_fract_register(struct clkdom *clkdom, struct rk_clk_fract_def *clkdef) { struct clknode *clk; struct rk_clk_fract_sc *sc; clk = clknode_create(clkdom, &rk_clk_fract_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->flags = clkdef->flags; sc->offset = clkdef->offset; sc->gate_offset = clkdef->gate_offset; sc->gate_shift = clkdef->gate_shift; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/rockchip/rk_clk_fract.h b/sys/dev/clk/rockchip/rk_clk_fract.h index 6923f162f265..57f539d8d246 100644 --- a/sys/dev/clk/rockchip/rk_clk_fract.h +++ b/sys/dev/clk/rockchip/rk_clk_fract.h @@ -1,46 +1,46 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2019 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _RK_CLK_FRACT_H_ #define _RK_CLK_FRACT_H_ -#include +#include struct rk_clk_fract_def { struct clknode_init_def clkdef; uint32_t offset; uint32_t gate_offset; uint32_t gate_shift; uint32_t flags; }; #define RK_CLK_FRACT_HAVE_GATE 0x0001 int rk_clk_fract_register(struct clkdom *clkdom, struct rk_clk_fract_def *clkdef); #endif /* _RK_CLK_FRACT_H_ */ diff --git a/sys/dev/clk/rockchip/rk_clk_gate.c b/sys/dev/clk/rockchip/rk_clk_gate.c index 053236d043e6..911e4cbad2c9 100644 --- a/sys/dev/clk/rockchip/rk_clk_gate.c +++ b/sys/dev/clk/rockchip/rk_clk_gate.c @@ -1,132 +1,132 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include -#include +#include #include #include "clkdev_if.h" #define WR4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define RD4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define MD4(_clk, off, clr, set ) \ CLKDEV_MODIFY_4(clknode_get_device(_clk), off, clr, set) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) static int rk_clk_gate_init(struct clknode *clk, device_t dev); static int rk_clk_gate_set_gate(struct clknode *clk, bool enable); struct rk_clk_gate_sc { uint32_t offset; uint32_t shift; uint32_t mask; uint32_t on_value; uint32_t off_value; int gate_flags; bool ungated; }; static clknode_method_t rk_clk_gate_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, rk_clk_gate_init), CLKNODEMETHOD(clknode_set_gate, rk_clk_gate_set_gate), CLKNODEMETHOD_END }; DEFINE_CLASS_1(rk_clk_gate, rk_clk_gate_class, rk_clk_gate_methods, sizeof(struct rk_clk_gate_sc), clknode_class); static int rk_clk_gate_init(struct clknode *clk, device_t dev) { uint32_t reg; struct rk_clk_gate_sc *sc; int rv; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); rv = RD4(clk, sc->offset, ®); DEVICE_UNLOCK(clk); if (rv != 0) return (rv); reg = (reg >> sc->shift) & sc->mask; sc->ungated = reg == sc->on_value ? 1 : 0; clknode_init_parent_idx(clk, 0); return(0); } static int rk_clk_gate_set_gate(struct clknode *clk, bool enable) { uint32_t reg; struct rk_clk_gate_sc *sc; int rv; sc = clknode_get_softc(clk); sc->ungated = enable; DEVICE_LOCK(clk); rv = MD4(clk, sc->offset, sc->mask << sc->shift, ((sc->ungated ? sc->on_value : sc->off_value) << sc->shift) | RK_CLK_GATE_MASK); if (rv != 0) { DEVICE_UNLOCK(clk); return (rv); } RD4(clk, sc->offset, ®); DEVICE_UNLOCK(clk); return(0); } int rk_clk_gate_register(struct clkdom *clkdom, struct rk_clk_gate_def *clkdef) { struct clknode *clk; struct rk_clk_gate_sc *sc; clk = clknode_create(clkdom, &rk_clk_gate_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->offset = clkdef->offset; sc->shift = clkdef->shift; sc->mask = clkdef->mask; sc->on_value = clkdef->on_value; sc->off_value = clkdef->off_value; sc->gate_flags = clkdef->gate_flags; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/rockchip/rk_clk_gate.h b/sys/dev/clk/rockchip/rk_clk_gate.h index add5cd22aa89..2282f7c19284 100644 --- a/sys/dev/clk/rockchip/rk_clk_gate.h +++ b/sys/dev/clk/rockchip/rk_clk_gate.h @@ -1,47 +1,47 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _RK_CLK_GATE_H_ #define _RK_CLK_GATE_H_ -#include +#include struct rk_clk_gate_def { struct clknode_init_def clkdef; uint32_t offset; uint32_t shift; uint32_t mask; uint32_t on_value; uint32_t off_value; int gate_flags; }; #define RK_CLK_GATE_MASK 0xFFFF0000 int rk_clk_gate_register(struct clkdom *clkdom, struct rk_clk_gate_def *clkdef); #endif /* _RK_CLK_GATE_H_ */ diff --git a/sys/dev/clk/rockchip/rk_clk_mux.c b/sys/dev/clk/rockchip/rk_clk_mux.c index e42d5819378a..fba57f794d16 100644 --- a/sys/dev/clk/rockchip/rk_clk_mux.c +++ b/sys/dev/clk/rockchip/rk_clk_mux.c @@ -1,239 +1,239 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include -#include +#include #include #include #include #include "clkdev_if.h" #include "syscon_if.h" #define WR4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define RD4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define MD4(_clk, off, clr, set ) \ CLKDEV_MODIFY_4(clknode_get_device(_clk), off, clr, set) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) #if 0 #define dprintf(format, arg...) \ printf("%s:(%s)" format, __func__, clknode_get_name(clk), arg) #else #define dprintf(format, arg...) #endif static int rk_clk_mux_init(struct clknode *clk, device_t dev); static int rk_clk_mux_set_mux(struct clknode *clk, int idx); static int rk_clk_mux_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout, int flags, int *stop); struct rk_clk_mux_sc { uint32_t offset; uint32_t shift; uint32_t mask; int mux_flags; struct syscon *grf; }; static clknode_method_t rk_clk_mux_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, rk_clk_mux_init), CLKNODEMETHOD(clknode_set_mux, rk_clk_mux_set_mux), CLKNODEMETHOD(clknode_set_freq, rk_clk_mux_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(rk_clk_mux, rk_clk_mux_class, rk_clk_mux_methods, sizeof(struct rk_clk_mux_sc), clknode_class); static struct syscon * rk_clk_mux_get_grf(struct clknode *clk) { device_t dev; phandle_t node; struct syscon *grf; grf = NULL; dev = clknode_get_device(clk); node = ofw_bus_get_node(dev); if (OF_hasprop(node, "rockchip,grf") && syscon_get_by_ofw_property(dev, node, "rockchip,grf", &grf) != 0) { return (NULL); } return (grf); } static int rk_clk_mux_init(struct clknode *clk, device_t dev) { uint32_t reg; struct rk_clk_mux_sc *sc; int rv; sc = clknode_get_softc(clk); if ((sc->mux_flags & RK_CLK_MUX_GRF) != 0) { sc->grf = rk_clk_mux_get_grf(clk); if (sc->grf == NULL) panic("clock %s has GRF flag set but no syscon is available", clknode_get_name(clk)); } DEVICE_LOCK(clk); if (sc->grf) { reg = SYSCON_READ_4(sc->grf, sc->offset); rv = 0; } else rv = RD4(clk, sc->offset, ®); DEVICE_UNLOCK(clk); if (rv != 0) { return (rv); } reg = (reg >> sc->shift) & sc->mask; clknode_init_parent_idx(clk, reg); return(0); } static int rk_clk_mux_set_mux(struct clknode *clk, int idx) { uint32_t reg; struct rk_clk_mux_sc *sc; int rv; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); if (sc->grf) rv = SYSCON_MODIFY_4(sc->grf, sc->offset, sc->mask << sc->shift, ((idx & sc->mask) << sc->shift) | RK_CLK_MUX_MASK); else rv = MD4(clk, sc->offset, sc->mask << sc->shift, ((idx & sc->mask) << sc->shift) | RK_CLK_MUX_MASK); if (rv != 0) { DEVICE_UNLOCK(clk); return (rv); } if (sc->grf == NULL) RD4(clk, sc->offset, ®); DEVICE_UNLOCK(clk); return(0); } static int rk_clk_mux_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout, int flags, int *stop) { struct rk_clk_mux_sc *sc; struct clknode *p_clk, *p_best_clk; const char **p_names; int p_idx, best_parent; int rv; sc = clknode_get_softc(clk); if ((sc->mux_flags & RK_CLK_MUX_GRF) != 0) { *stop = 1; return (ENOTSUP); } if ((sc->mux_flags & RK_CLK_MUX_REPARENT) == 0) { *stop = 0; return (0); } dprintf("Finding best parent for target freq of %ju\n", *fout); p_names = clknode_get_parent_names(clk); for (p_idx = 0; p_idx != clknode_get_parents_num(clk); p_idx++) { p_clk = clknode_find_by_name(p_names[p_idx]); dprintf("Testing with parent %s (%d)\n", clknode_get_name(p_clk), p_idx); rv = clknode_set_freq(p_clk, *fout, flags | CLK_SET_DRYRUN, 0); dprintf("Testing with parent %s (%d) rv=%d\n", clknode_get_name(p_clk), p_idx, rv); if (rv == 0) { best_parent = p_idx; p_best_clk = p_clk; *stop = 1; } } if (!*stop) return (0); if ((flags & CLK_SET_DRYRUN) != 0) return (0); p_idx = clknode_get_parent_idx(clk); if (p_idx != best_parent) { dprintf("Switching parent index from %d to %d\n", p_idx, best_parent); clknode_set_parent_by_idx(clk, best_parent); } clknode_set_freq(p_best_clk, *fout, flags, 0); clknode_get_freq(p_best_clk, fout); return (0); } int rk_clk_mux_register(struct clkdom *clkdom, struct rk_clk_mux_def *clkdef) { struct clknode *clk; struct rk_clk_mux_sc *sc; clk = clknode_create(clkdom, &rk_clk_mux_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->offset = clkdef->offset; sc->shift = clkdef->shift; sc->mask = (1 << clkdef->width) - 1; sc->mux_flags = clkdef->mux_flags; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/rockchip/rk_clk_mux.h b/sys/dev/clk/rockchip/rk_clk_mux.h index b32e0687fac3..c6c261294b68 100644 --- a/sys/dev/clk/rockchip/rk_clk_mux.h +++ b/sys/dev/clk/rockchip/rk_clk_mux.h @@ -1,47 +1,47 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _RK_CLK_MUX_H_ #define _RK_CLK_MUX_H_ -#include +#include struct rk_clk_mux_def { struct clknode_init_def clkdef; uint32_t offset; uint32_t shift; uint32_t width; int mux_flags; }; #define RK_CLK_MUX_MASK 0xFFFF0000 #define RK_CLK_MUX_REPARENT (1 << 0) #define RK_CLK_MUX_GRF (1 << 1) int rk_clk_mux_register(struct clkdom *clkdom, struct rk_clk_mux_def *clkdef); #endif /* _RK_CLK_MUX_H_ */ diff --git a/sys/dev/clk/rockchip/rk_clk_pll.c b/sys/dev/clk/rockchip/rk_clk_pll.c index f89fdeb0c341..8e6551ef6c29 100644 --- a/sys/dev/clk/rockchip/rk_clk_pll.c +++ b/sys/dev/clk/rockchip/rk_clk_pll.c @@ -1,774 +1,774 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include -#include +#include #include #include "clkdev_if.h" struct rk_clk_pll_sc { uint32_t base_offset; uint32_t gate_offset; uint32_t gate_shift; uint32_t mode_reg; uint32_t mode_shift; uint32_t flags; struct rk_clk_pll_rate *rates; struct rk_clk_pll_rate *frac_rates; }; #define WRITE4(_clk, off, val) \ CLKDEV_WRITE_4(clknode_get_device(_clk), off, val) #define READ4(_clk, off, val) \ CLKDEV_READ_4(clknode_get_device(_clk), off, val) #define DEVICE_LOCK(_clk) \ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk)) #define DEVICE_UNLOCK(_clk) \ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk)) #define RK_CLK_PLL_MASK_SHIFT 16 #if 0 #define dprintf(format, arg...) \ printf("%s:(%s)" format, __func__, clknode_get_name(clk), arg) #else #define dprintf(format, arg...) #endif static int rk_clk_pll_set_gate(struct clknode *clk, bool enable) { struct rk_clk_pll_sc *sc; uint32_t val = 0; sc = clknode_get_softc(clk); if ((sc->flags & RK_CLK_PLL_HAVE_GATE) == 0) return (0); dprintf("%sabling gate\n", enable ? "En" : "Dis"); if (!enable) val |= 1 << sc->gate_shift; dprintf("sc->gate_shift: %x\n", sc->gate_shift); val |= (1 << sc->gate_shift) << RK_CLK_PLL_MASK_SHIFT; dprintf("Write: gate_offset=%x, val=%x\n", sc->gate_offset, val); DEVICE_LOCK(clk); WRITE4(clk, sc->gate_offset, val); DEVICE_UNLOCK(clk); return (0); } /* CON0 */ #define RK3066_CLK_PLL_REFDIV_SHIFT 8 #define RK3066_CLK_PLL_REFDIV_MASK 0x3F00 #define RK3066_CLK_PLL_POSTDIV_SHIFT 0 #define RK3066_CLK_PLL_POSTDIV_MASK 0x000F /* CON1 */ #define RK3066_CLK_PLL_LOCK_MASK (1U << 31) #define RK3066_CLK_PLL_FBDIV_SHIFT 0 #define RK3066_CLK_PLL_FBDIV_MASK 0x0FFF /* CON2 */ /* CON3 */ #define RK3066_CLK_PLL_RESET (1 << 5) #define RK3066_CLK_PLL_TEST (1 << 4) #define RK3066_CLK_PLL_ENSAT (1 << 3) #define RK3066_CLK_PLL_FASTEN (1 << 2) #define RK3066_CLK_PLL_POWER_DOWN (1 << 1) #define RK3066_CLK_PLL_BYPASS (1 << 0) #define RK3066_CLK_PLL_MODE_SLOW 0 #define RK3066_CLK_PLL_MODE_NORMAL 1 #define RK3066_CLK_PLL_MODE_DEEP_SLOW 2 #define RK3066_CLK_PLL_MODE_MASK 0x3 static int rk3066_clk_pll_init(struct clknode *clk, device_t dev) { struct rk_clk_pll_sc *sc; uint32_t reg; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->mode_reg, ®); DEVICE_UNLOCK(clk); reg = (reg >> sc->mode_shift) & RK3066_CLK_PLL_MODE_MASK; clknode_init_parent_idx(clk, reg); return (0); } static int rk3066_clk_pll_set_mux(struct clknode *clk, int idx) { uint32_t reg; struct rk_clk_pll_sc *sc; sc = clknode_get_softc(clk); reg = (idx & RK3066_CLK_PLL_MODE_MASK) << sc->mode_shift; reg |= (RK3066_CLK_PLL_MODE_MASK << sc->mode_shift) << RK_CLK_PLL_MASK_SHIFT; DEVICE_LOCK(clk); WRITE4(clk, sc->mode_reg, reg); DEVICE_UNLOCK(clk); return(0); } static int rk3066_clk_pll_recalc(struct clknode *clk, uint64_t *freq) { struct rk_clk_pll_sc *sc; uint64_t rate; uint32_t refdiv, fbdiv, postdiv; uint32_t raw0, raw1, raw2, reg; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->base_offset, &raw0); READ4(clk, sc->base_offset + 4, &raw1); READ4(clk, sc->base_offset + 8, &raw2); READ4(clk, sc->mode_reg, ®); DEVICE_UNLOCK(clk); reg = (reg >> sc->mode_shift) & RK3066_CLK_PLL_MODE_MASK; if (reg != RK3066_CLK_PLL_MODE_NORMAL) return (0); if (!(raw1 & RK3066_CLK_PLL_LOCK_MASK)) { *freq = 0; return (0); } /* TODO MUX */ refdiv = (raw0 & RK3066_CLK_PLL_REFDIV_MASK) >> RK3066_CLK_PLL_REFDIV_SHIFT; refdiv += 1; postdiv = (raw0 & RK3066_CLK_PLL_POSTDIV_MASK) >> RK3066_CLK_PLL_POSTDIV_SHIFT; postdiv += 1; fbdiv = (raw1 & RK3066_CLK_PLL_FBDIV_MASK) >> RK3066_CLK_PLL_FBDIV_SHIFT; fbdiv += 1; rate = *freq * fbdiv; rate /= refdiv; *freq = rate / postdiv; return (0); } static int rk3066_clk_pll_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout, int flags, int *stop) { struct rk_clk_pll_rate *rates; struct rk_clk_pll_sc *sc; uint32_t reg; int rv, timeout; sc = clknode_get_softc(clk); if (sc->rates == NULL) return (EINVAL); for (rates = sc->rates; rates->freq; rates++) { if (rates->freq == *fout) break; } if (rates->freq == 0) { *stop = 1; return (EINVAL); } DEVICE_LOCK(clk); /* Setting to slow mode during frequency change */ reg = (RK3066_CLK_PLL_MODE_MASK << sc->mode_shift) << RK_CLK_PLL_MASK_SHIFT; dprintf("Set PLL_MODEREG to %x\n", reg); WRITE4(clk, sc->mode_reg, reg); /* Reset PLL */ WRITE4(clk, sc->base_offset + 12, RK3066_CLK_PLL_RESET | RK3066_CLK_PLL_RESET << RK_CLK_PLL_MASK_SHIFT); /* Setting postdiv and refdiv */ reg = 0; reg |= RK3066_CLK_PLL_POSTDIV_MASK << 16; reg |= (rates->postdiv1 - 1) << RK3066_CLK_PLL_POSTDIV_SHIFT; reg |= RK3066_CLK_PLL_REFDIV_MASK << 16; reg |= (rates->refdiv - 1)<< RK3066_CLK_PLL_REFDIV_SHIFT; dprintf("Set PLL_CON0 to %x\n", reg); WRITE4(clk, sc->base_offset, reg); /* Setting fbdiv (no write mask)*/ READ4(clk, sc->base_offset + 4, ®); reg &= ~RK3066_CLK_PLL_FBDIV_MASK; reg |= RK3066_CLK_PLL_FBDIV_MASK << 16; reg = (rates->fbdiv - 1) << RK3066_CLK_PLL_FBDIV_SHIFT; dprintf("Set PLL_CON1 to %x\n", reg); WRITE4(clk, sc->base_offset + 0x4, reg); /* PLL loop bandwidth adjust */ reg = rates->bwadj - 1; dprintf("Set PLL_CON2 to %x (%x)\n", reg, rates->bwadj); WRITE4(clk, sc->base_offset + 0x8, reg); /* Clear reset */ WRITE4(clk, sc->base_offset + 12, RK3066_CLK_PLL_RESET << RK_CLK_PLL_MASK_SHIFT); DELAY(100000); /* Reading lock */ for (timeout = 1000; timeout >= 0; timeout--) { READ4(clk, sc->base_offset + 0x4, ®); if ((reg & RK3066_CLK_PLL_LOCK_MASK) != 0) break; DELAY(1); } rv = 0; if (timeout < 0) { device_printf(clknode_get_device(clk), "%s - Timedout while waiting for lock.\n", clknode_get_name(clk)); dprintf("PLL_CON1: %x\n", reg); rv = ETIMEDOUT; } /* Set back to normal mode */ reg = (RK3066_CLK_PLL_MODE_NORMAL << sc->mode_shift); reg |= (RK3066_CLK_PLL_MODE_MASK << sc->mode_shift) << RK_CLK_PLL_MASK_SHIFT; dprintf("Set PLL_MODEREG to %x\n", reg); WRITE4(clk, sc->mode_reg, reg); DEVICE_UNLOCK(clk); *stop = 1; rv = clknode_set_parent_by_idx(clk, 1); return (rv); } static clknode_method_t rk3066_clk_pll_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, rk3066_clk_pll_init), CLKNODEMETHOD(clknode_set_gate, rk_clk_pll_set_gate), CLKNODEMETHOD(clknode_recalc_freq, rk3066_clk_pll_recalc), CLKNODEMETHOD(clknode_set_freq, rk3066_clk_pll_set_freq), CLKNODEMETHOD(clknode_set_mux, rk3066_clk_pll_set_mux), CLKNODEMETHOD_END }; DEFINE_CLASS_1(rk3066_clk_pll_clknode, rk3066_clk_pll_clknode_class, rk3066_clk_pll_clknode_methods, sizeof(struct rk_clk_pll_sc), clknode_class); int rk3066_clk_pll_register(struct clkdom *clkdom, struct rk_clk_pll_def *clkdef) { struct clknode *clk; struct rk_clk_pll_sc *sc; clk = clknode_create(clkdom, &rk3066_clk_pll_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->base_offset = clkdef->base_offset; sc->gate_offset = clkdef->gate_offset; sc->gate_shift = clkdef->gate_shift; sc->mode_reg = clkdef->mode_reg; sc->mode_shift = clkdef->mode_shift; sc->flags = clkdef->flags; sc->rates = clkdef->rates; sc->frac_rates = clkdef->frac_rates; clknode_register(clkdom, clk); return (0); } #define RK3328_CLK_PLL_FBDIV_OFFSET 0 #define RK3328_CLK_PLL_FBDIV_SHIFT 0 #define RK3328_CLK_PLL_FBDIV_MASK 0xFFF #define RK3328_CLK_PLL_POSTDIV1_OFFSET 0 #define RK3328_CLK_PLL_POSTDIV1_SHIFT 12 #define RK3328_CLK_PLL_POSTDIV1_MASK 0x7000 #define RK3328_CLK_PLL_DSMPD_OFFSET 4 #define RK3328_CLK_PLL_DSMPD_SHIFT 12 #define RK3328_CLK_PLL_DSMPD_MASK 0x1000 #define RK3328_CLK_PLL_REFDIV_OFFSET 4 #define RK3328_CLK_PLL_REFDIV_SHIFT 0 #define RK3328_CLK_PLL_REFDIV_MASK 0x3F #define RK3328_CLK_PLL_POSTDIV2_OFFSET 4 #define RK3328_CLK_PLL_POSTDIV2_SHIFT 6 #define RK3328_CLK_PLL_POSTDIV2_MASK 0x1C0 #define RK3328_CLK_PLL_FRAC_OFFSET 8 #define RK3328_CLK_PLL_FRAC_SHIFT 0 #define RK3328_CLK_PLL_FRAC_MASK 0xFFFFFF #define RK3328_CLK_PLL_LOCK_MASK 0x400 #define RK3328_CLK_PLL_MODE_SLOW 0 #define RK3328_CLK_PLL_MODE_NORMAL 1 #define RK3328_CLK_PLL_MODE_MASK 0x1 static int rk3328_clk_pll_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static int rk3328_clk_pll_recalc(struct clknode *clk, uint64_t *freq) { struct rk_clk_pll_sc *sc; uint64_t rate; uint32_t dsmpd, refdiv, fbdiv; uint32_t postdiv1, postdiv2, frac; uint32_t raw1, raw2, raw3; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->base_offset, &raw1); READ4(clk, sc->base_offset + 4, &raw2); READ4(clk, sc->base_offset + 8, &raw3); fbdiv = (raw1 & RK3328_CLK_PLL_FBDIV_MASK) >> RK3328_CLK_PLL_FBDIV_SHIFT; postdiv1 = (raw1 & RK3328_CLK_PLL_POSTDIV1_MASK) >> RK3328_CLK_PLL_POSTDIV1_SHIFT; dsmpd = (raw2 & RK3328_CLK_PLL_DSMPD_MASK) >> RK3328_CLK_PLL_DSMPD_SHIFT; refdiv = (raw2 & RK3328_CLK_PLL_REFDIV_MASK) >> RK3328_CLK_PLL_REFDIV_SHIFT; postdiv2 = (raw2 & RK3328_CLK_PLL_POSTDIV2_MASK) >> RK3328_CLK_PLL_POSTDIV2_SHIFT; frac = (raw3 & RK3328_CLK_PLL_FRAC_MASK) >> RK3328_CLK_PLL_FRAC_SHIFT; DEVICE_UNLOCK(clk); rate = *freq * fbdiv / refdiv; if (dsmpd == 0) { /* Fractional mode */ uint64_t frac_rate; frac_rate = *freq * frac / refdiv; rate += frac_rate >> 24; } *freq = rate / postdiv1 / postdiv2; if (*freq % 2) *freq = *freq + 1; return (0); } static int rk3328_clk_pll_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout, int flags, int *stop) { struct rk_clk_pll_rate *rates; struct rk_clk_pll_sc *sc; uint32_t reg; int timeout; sc = clknode_get_softc(clk); if (sc->rates) rates = sc->rates; else if (sc->frac_rates) rates = sc->frac_rates; else return (EINVAL); for (; rates->freq; rates++) { if (rates->freq == *fout) break; } if (rates->freq == 0) { *stop = 1; return (EINVAL); } DEVICE_LOCK(clk); /* Setting to slow mode during frequency change */ reg = (RK3328_CLK_PLL_MODE_MASK << sc->mode_shift) << RK_CLK_PLL_MASK_SHIFT; dprintf("Set PLL_MODEREG to %x\n", reg); WRITE4(clk, sc->mode_reg, reg); /* Setting postdiv1 and fbdiv */ reg = (rates->postdiv1 << RK3328_CLK_PLL_POSTDIV1_SHIFT) | (rates->fbdiv << RK3328_CLK_PLL_FBDIV_SHIFT); reg |= (RK3328_CLK_PLL_POSTDIV1_MASK | RK3328_CLK_PLL_FBDIV_MASK) << 16; dprintf("Set PLL_CON0 to %x\n", reg); WRITE4(clk, sc->base_offset, reg); /* Setting dsmpd, postdiv2 and refdiv */ reg = (rates->dsmpd << RK3328_CLK_PLL_DSMPD_SHIFT) | (rates->postdiv2 << RK3328_CLK_PLL_POSTDIV2_SHIFT) | (rates->refdiv << RK3328_CLK_PLL_REFDIV_SHIFT); reg |= (RK3328_CLK_PLL_DSMPD_MASK | RK3328_CLK_PLL_POSTDIV2_MASK | RK3328_CLK_PLL_REFDIV_MASK) << RK_CLK_PLL_MASK_SHIFT; dprintf("Set PLL_CON1 to %x\n", reg); WRITE4(clk, sc->base_offset + 0x4, reg); /* Setting frac */ READ4(clk, sc->base_offset + 0x8, ®); reg &= ~RK3328_CLK_PLL_FRAC_MASK; reg |= rates->frac << RK3328_CLK_PLL_FRAC_SHIFT; dprintf("Set PLL_CON2 to %x\n", reg); WRITE4(clk, sc->base_offset + 0x8, reg); /* Reading lock */ for (timeout = 1000; timeout; timeout--) { READ4(clk, sc->base_offset + 0x4, ®); if ((reg & RK3328_CLK_PLL_LOCK_MASK) == 0) break; DELAY(1); } /* Set back to normal mode */ reg = (RK3328_CLK_PLL_MODE_NORMAL << sc->mode_shift); reg |= (RK3328_CLK_PLL_MODE_MASK << sc->mode_shift) << RK_CLK_PLL_MASK_SHIFT; dprintf("Set PLL_MODEREG to %x\n", reg); WRITE4(clk, sc->mode_reg, reg); DEVICE_UNLOCK(clk); *stop = 1; return (0); } static clknode_method_t rk3328_clk_pll_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, rk3328_clk_pll_init), CLKNODEMETHOD(clknode_set_gate, rk_clk_pll_set_gate), CLKNODEMETHOD(clknode_recalc_freq, rk3328_clk_pll_recalc), CLKNODEMETHOD(clknode_set_freq, rk3328_clk_pll_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(rk3328_clk_pll_clknode, rk3328_clk_pll_clknode_class, rk3328_clk_pll_clknode_methods, sizeof(struct rk_clk_pll_sc), clknode_class); int rk3328_clk_pll_register(struct clkdom *clkdom, struct rk_clk_pll_def *clkdef) { struct clknode *clk; struct rk_clk_pll_sc *sc; clk = clknode_create(clkdom, &rk3328_clk_pll_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->base_offset = clkdef->base_offset; sc->gate_offset = clkdef->gate_offset; sc->gate_shift = clkdef->gate_shift; sc->mode_reg = clkdef->mode_reg; sc->mode_shift = clkdef->mode_shift; sc->flags = clkdef->flags; sc->rates = clkdef->rates; sc->frac_rates = clkdef->frac_rates; clknode_register(clkdom, clk); return (0); } #define RK3399_CLK_PLL_FBDIV_OFFSET 0 #define RK3399_CLK_PLL_FBDIV_SHIFT 0 #define RK3399_CLK_PLL_FBDIV_MASK 0xFFF #define RK3399_CLK_PLL_POSTDIV2_OFFSET 4 #define RK3399_CLK_PLL_POSTDIV2_SHIFT 12 #define RK3399_CLK_PLL_POSTDIV2_MASK 0x7000 #define RK3399_CLK_PLL_POSTDIV1_OFFSET 4 #define RK3399_CLK_PLL_POSTDIV1_SHIFT 8 #define RK3399_CLK_PLL_POSTDIV1_MASK 0x700 #define RK3399_CLK_PLL_REFDIV_OFFSET 4 #define RK3399_CLK_PLL_REFDIV_SHIFT 0 #define RK3399_CLK_PLL_REFDIV_MASK 0x3F #define RK3399_CLK_PLL_FRAC_OFFSET 8 #define RK3399_CLK_PLL_FRAC_SHIFT 0 #define RK3399_CLK_PLL_FRAC_MASK 0xFFFFFF #define RK3399_CLK_PLL_DSMPD_OFFSET 0xC #define RK3399_CLK_PLL_DSMPD_SHIFT 3 #define RK3399_CLK_PLL_DSMPD_MASK 0x8 #define RK3399_CLK_PLL_LOCK_OFFSET 8 #define RK3399_CLK_PLL_LOCK_MASK 0x400 #define RK3399_CLK_PLL_MODE_OFFSET 0xC #define RK3399_CLK_PLL_MODE_MASK 0x300 #define RK3399_CLK_PLL_MODE_SLOW 0 #define RK3399_CLK_PLL_MODE_NORMAL 1 #define RK3399_CLK_PLL_MODE_DEEPSLOW 2 #define RK3399_CLK_PLL_MODE_SHIFT 8 #define RK3399_CLK_PLL_WRITE_MASK 0xFFFF0000 static int rk3399_clk_pll_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static int rk3399_clk_pll_recalc(struct clknode *clk, uint64_t *freq) { struct rk_clk_pll_sc *sc; uint32_t dsmpd, refdiv, fbdiv; uint32_t postdiv1, postdiv2, fracdiv; uint32_t con1, con2, con3, con4; uint64_t foutvco; uint32_t mode; sc = clknode_get_softc(clk); DEVICE_LOCK(clk); READ4(clk, sc->base_offset, &con1); READ4(clk, sc->base_offset + 4, &con2); READ4(clk, sc->base_offset + 8, &con3); READ4(clk, sc->base_offset + 0xC, &con4); DEVICE_UNLOCK(clk); /* * if we are in slow mode the output freq * is the parent one, the 24Mhz external oscillator * if we are in deep mode the output freq is 32.768khz */ mode = (con4 & RK3399_CLK_PLL_MODE_MASK) >> RK3399_CLK_PLL_MODE_SHIFT; if (mode == RK3399_CLK_PLL_MODE_SLOW) { dprintf("pll in slow mode, con4=%x\n", con4); return (0); } else if (mode == RK3399_CLK_PLL_MODE_DEEPSLOW) { dprintf("pll in deep slow, con4=%x\n", con4); *freq = 32768; return (0); } dprintf("con0: %x\n", con1); dprintf("con1: %x\n", con2); dprintf("con2: %x\n", con3); dprintf("con3: %x\n", con4); fbdiv = (con1 & RK3399_CLK_PLL_FBDIV_MASK) >> RK3399_CLK_PLL_FBDIV_SHIFT; postdiv1 = (con2 & RK3399_CLK_PLL_POSTDIV1_MASK) >> RK3399_CLK_PLL_POSTDIV1_SHIFT; postdiv2 = (con2 & RK3399_CLK_PLL_POSTDIV2_MASK) >> RK3399_CLK_PLL_POSTDIV2_SHIFT; refdiv = (con2 & RK3399_CLK_PLL_REFDIV_MASK) >> RK3399_CLK_PLL_REFDIV_SHIFT; fracdiv = (con3 & RK3399_CLK_PLL_FRAC_MASK) >> RK3399_CLK_PLL_FRAC_SHIFT; fracdiv >>= 24; dsmpd = (con4 & RK3399_CLK_PLL_DSMPD_MASK) >> RK3399_CLK_PLL_DSMPD_SHIFT; dprintf("fbdiv: %d\n", fbdiv); dprintf("postdiv1: %d\n", postdiv1); dprintf("postdiv2: %d\n", postdiv2); dprintf("refdiv: %d\n", refdiv); dprintf("fracdiv: %d\n", fracdiv); dprintf("dsmpd: %d\n", dsmpd); dprintf("parent freq=%ju\n", *freq); if (dsmpd == 0) { /* Fractional mode */ foutvco = *freq / refdiv * (fbdiv + fracdiv); } else { /* Integer mode */ foutvco = *freq / refdiv * fbdiv; } dprintf("foutvco: %ju\n", foutvco); *freq = foutvco / postdiv1 / postdiv2; dprintf("freq: %ju\n", *freq); return (0); } static int rk3399_clk_pll_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout, int flags, int *stop) { struct rk_clk_pll_rate *rates; struct rk_clk_pll_sc *sc; uint32_t reg; int timeout; sc = clknode_get_softc(clk); if (sc->rates) rates = sc->rates; else if (sc->frac_rates) rates = sc->frac_rates; else return (EINVAL); for (; rates->freq; rates++) { if (rates->freq == *fout) break; } if (rates->freq == 0) { *stop = 1; return (EINVAL); } DEVICE_LOCK(clk); /* Set to slow mode during frequency change */ reg = RK3399_CLK_PLL_MODE_SLOW << RK3399_CLK_PLL_MODE_SHIFT; reg |= RK3399_CLK_PLL_MODE_MASK << RK_CLK_PLL_MASK_SHIFT; WRITE4(clk, sc->base_offset + 0xC, reg); /* Setting fbdiv */ reg = rates->fbdiv << RK3399_CLK_PLL_FBDIV_SHIFT; reg |= RK3399_CLK_PLL_FBDIV_MASK << RK_CLK_PLL_MASK_SHIFT; WRITE4(clk, sc->base_offset, reg); /* Setting postdiv1, postdiv2 and refdiv */ reg = rates->postdiv1 << RK3399_CLK_PLL_POSTDIV1_SHIFT; reg |= rates->postdiv2 << RK3399_CLK_PLL_POSTDIV2_SHIFT; reg |= rates->refdiv << RK3399_CLK_PLL_REFDIV_SHIFT; reg |= (RK3399_CLK_PLL_POSTDIV1_MASK | RK3399_CLK_PLL_POSTDIV2_MASK | RK3399_CLK_PLL_REFDIV_MASK) << RK_CLK_PLL_MASK_SHIFT; WRITE4(clk, sc->base_offset + 0x4, reg); /* Setting frac */ READ4(clk, sc->base_offset + 0x8, ®); reg &= ~RK3399_CLK_PLL_FRAC_MASK; reg |= rates->frac << RK3399_CLK_PLL_FRAC_SHIFT; WRITE4(clk, sc->base_offset + 0x8, reg | RK3399_CLK_PLL_WRITE_MASK); /* Set dsmpd */ reg = rates->dsmpd << RK3399_CLK_PLL_DSMPD_SHIFT; reg |= RK3399_CLK_PLL_DSMPD_MASK << RK_CLK_PLL_MASK_SHIFT; WRITE4(clk, sc->base_offset + 0xC, reg); /* Reading lock */ for (timeout = 1000; timeout; timeout--) { READ4(clk, sc->base_offset + RK3399_CLK_PLL_LOCK_OFFSET, ®); if ((reg & RK3399_CLK_PLL_LOCK_MASK) == 0) break; DELAY(1); } /* Set back to normal mode */ reg = RK3399_CLK_PLL_MODE_NORMAL << RK3399_CLK_PLL_MODE_SHIFT; reg |= RK3399_CLK_PLL_MODE_MASK << RK_CLK_PLL_MASK_SHIFT; WRITE4(clk, sc->base_offset + 0xC, reg); DEVICE_UNLOCK(clk); *stop = 1; return (0); } static clknode_method_t rk3399_clk_pll_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, rk3399_clk_pll_init), CLKNODEMETHOD(clknode_set_gate, rk_clk_pll_set_gate), CLKNODEMETHOD(clknode_recalc_freq, rk3399_clk_pll_recalc), CLKNODEMETHOD(clknode_set_freq, rk3399_clk_pll_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(rk3399_clk_pll_clknode, rk3399_clk_pll_clknode_class, rk3399_clk_pll_clknode_methods, sizeof(struct rk_clk_pll_sc), clknode_class); int rk3399_clk_pll_register(struct clkdom *clkdom, struct rk_clk_pll_def *clkdef) { struct clknode *clk; struct rk_clk_pll_sc *sc; clk = clknode_create(clkdom, &rk3399_clk_pll_clknode_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->base_offset = clkdef->base_offset; sc->gate_offset = clkdef->gate_offset; sc->gate_shift = clkdef->gate_shift; sc->flags = clkdef->flags; sc->rates = clkdef->rates; sc->frac_rates = clkdef->frac_rates; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/rockchip/rk_clk_pll.h b/sys/dev/clk/rockchip/rk_clk_pll.h index 2d0c0bb67fdf..28d795b4e5b8 100644 --- a/sys/dev/clk/rockchip/rk_clk_pll.h +++ b/sys/dev/clk/rockchip/rk_clk_pll.h @@ -1,66 +1,66 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _RK_CLK_PLL_H_ #define _RK_CLK_PLL_H_ -#include +#include struct rk_clk_pll_rate { uint32_t freq; uint32_t refdiv; uint32_t fbdiv; uint32_t postdiv1; uint32_t postdiv2; uint32_t dsmpd; uint32_t frac; uint32_t bwadj; }; struct rk_clk_pll_def { struct clknode_init_def clkdef; uint32_t base_offset; uint32_t gate_offset; uint32_t gate_shift; uint32_t mode_reg; uint32_t mode_shift; uint32_t flags; struct rk_clk_pll_rate *rates; struct rk_clk_pll_rate *frac_rates; }; #define RK_CLK_PLL_HAVE_GATE 0x1 int rk3066_clk_pll_register(struct clkdom *clkdom, struct rk_clk_pll_def *clkdef); int rk3328_clk_pll_register(struct clkdom *clkdom, struct rk_clk_pll_def *clkdef); int rk3399_clk_pll_register(struct clkdom *clkdom, struct rk_clk_pll_def *clkdef); #endif /* _RK_CLK_PLL_H_ */ diff --git a/sys/dev/clk/rockchip/rk_cru.c b/sys/dev/clk/rockchip/rk_cru.c index 81760bcd74a1..a7d1b26166f1 100644 --- a/sys/dev/clk/rockchip/rk_cru.c +++ b/sys/dev/clk/rockchip/rk_cru.c @@ -1,305 +1,305 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * RockChip Clock and Reset Unit */ #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include #include #include #include #include #include #include "clkdev_if.h" #include "hwreset_if.h" static struct resource_spec rk_cru_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; #define CCU_READ4(sc, reg) bus_read_4((sc)->res, (reg)) #define CCU_WRITE4(sc, reg, val) bus_write_4((sc)->res, (reg), (val)) void rk3328_cru_register_clocks(struct rk_cru_softc *sc); static int rk_cru_write_4(device_t dev, bus_addr_t addr, uint32_t val) { struct rk_cru_softc *sc; sc = device_get_softc(dev); CCU_WRITE4(sc, addr, val); return (0); } static int rk_cru_read_4(device_t dev, bus_addr_t addr, uint32_t *val) { struct rk_cru_softc *sc; sc = device_get_softc(dev); *val = CCU_READ4(sc, addr); return (0); } static int rk_cru_modify_4(device_t dev, bus_addr_t addr, uint32_t clr, uint32_t set) { struct rk_cru_softc *sc; uint32_t reg; sc = device_get_softc(dev); reg = CCU_READ4(sc, addr); reg &= ~clr; reg |= set; CCU_WRITE4(sc, addr, reg); return (0); } static int rk_cru_reset_assert(device_t dev, intptr_t id, bool reset) { struct rk_cru_softc *sc; uint32_t reg; int bit; uint32_t val; sc = device_get_softc(dev); if (id > sc->reset_num) return (ENXIO); reg = sc->reset_offset + id / 16 * 4; bit = id % 16; mtx_lock(&sc->mtx); val = 0; if (reset) val = (1 << bit); CCU_WRITE4(sc, reg, val | ((1 << bit) << 16)); mtx_unlock(&sc->mtx); return (0); } static int rk_cru_reset_is_asserted(device_t dev, intptr_t id, bool *reset) { struct rk_cru_softc *sc; uint32_t reg; int bit; uint32_t val; sc = device_get_softc(dev); if (id > sc->reset_num) return (ENXIO); reg = sc->reset_offset + id / 16 * 4; bit = id % 16; mtx_lock(&sc->mtx); val = CCU_READ4(sc, reg); mtx_unlock(&sc->mtx); *reset = false; if (val & (1 << bit)) *reset = true; return (0); } static void rk_cru_device_lock(device_t dev) { struct rk_cru_softc *sc; sc = device_get_softc(dev); mtx_lock(&sc->mtx); } static void rk_cru_device_unlock(device_t dev) { struct rk_cru_softc *sc; sc = device_get_softc(dev); mtx_unlock(&sc->mtx); } static int rk_cru_register_gates(struct rk_cru_softc *sc) { struct rk_clk_gate_def def; int i; for (i = 0; i < sc->ngates; i++) { if (sc->gates[i].name == NULL) continue; memset(&def, 0, sizeof(def)); def.clkdef.id = sc->gates[i].id; def.clkdef.name = sc->gates[i].name; def.clkdef.parent_names = &sc->gates[i].parent_name; def.clkdef.parent_cnt = 1; def.offset = sc->gates[i].offset; def.shift = sc->gates[i].shift; def.mask = 1; def.on_value = 0; def.off_value = 1; rk_clk_gate_register(sc->clkdom, &def); } return (0); } int rk_cru_attach(device_t dev) { struct rk_cru_softc *sc; phandle_t node; int i; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); if (bus_alloc_resources(dev, rk_cru_spec, &sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); return (ENXIO); } mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); sc->clkdom = clkdom_create(dev); if (sc->clkdom == NULL) panic("Cannot create clkdom\n"); for (i = 0; i < sc->nclks; i++) { switch (sc->clks[i].type) { case RK_CLK_UNDEFINED: break; case RK3066_CLK_PLL: rk3066_clk_pll_register(sc->clkdom, sc->clks[i].clk.pll); break; case RK3328_CLK_PLL: rk3328_clk_pll_register(sc->clkdom, sc->clks[i].clk.pll); break; case RK3399_CLK_PLL: rk3399_clk_pll_register(sc->clkdom, sc->clks[i].clk.pll); break; case RK_CLK_COMPOSITE: rk_clk_composite_register(sc->clkdom, sc->clks[i].clk.composite); break; case RK_CLK_MUX: rk_clk_mux_register(sc->clkdom, sc->clks[i].clk.mux); break; case RK_CLK_ARMCLK: rk_clk_armclk_register(sc->clkdom, sc->clks[i].clk.armclk); break; case RK_CLK_FIXED: clknode_fixed_register(sc->clkdom, sc->clks[i].clk.fixed); break; case RK_CLK_FRACT: rk_clk_fract_register(sc->clkdom, sc->clks[i].clk.fract); break; case RK_CLK_LINK: clknode_link_register(sc->clkdom, sc->clks[i].clk.link); break; default: device_printf(dev, "Unknown clock type\n"); return (ENXIO); } } if (sc->gates) rk_cru_register_gates(sc); if (clkdom_finit(sc->clkdom) != 0) panic("cannot finalize clkdom initialization\n"); if (bootverbose) clkdom_dump(sc->clkdom); clk_set_assigned(dev, node); /* register our self as a reset provider */ hwreset_register_ofw_provider(dev); return (0); } static device_method_t rk_cru_methods[] = { /* clkdev interface */ DEVMETHOD(clkdev_write_4, rk_cru_write_4), DEVMETHOD(clkdev_read_4, rk_cru_read_4), DEVMETHOD(clkdev_modify_4, rk_cru_modify_4), DEVMETHOD(clkdev_device_lock, rk_cru_device_lock), DEVMETHOD(clkdev_device_unlock, rk_cru_device_unlock), /* Reset interface */ DEVMETHOD(hwreset_assert, rk_cru_reset_assert), DEVMETHOD(hwreset_is_asserted, rk_cru_reset_is_asserted), DEVMETHOD_END }; DEFINE_CLASS_0(rk_cru, rk_cru_driver, rk_cru_methods, sizeof(struct rk_cru_softc)); diff --git a/sys/dev/clk/rockchip/rk_cru.h b/sys/dev/clk/rockchip/rk_cru.h index 3249c8c8f13c..685c6bb67568 100644 --- a/sys/dev/clk/rockchip/rk_cru.h +++ b/sys/dev/clk/rockchip/rk_cru.h @@ -1,266 +1,266 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef __RK_CRU_H__ #define __RK_CRU_H__ -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #include #include #include #include #include #include /* Macro for defining various types of clocks. */ /* Parent list */ #define PLIST(_name) static const char *_name[] /* Pure gate */ #define GATE(_idx, _clkname, _pname, _o, _s) \ { \ .id = _idx, \ .name = _clkname, \ .parent_name = _pname, \ .offset = CRU_CLKGATE_CON(_o), \ .shift = _s, \ } /* Fixed rate clock. */ #define FRATE(_id, _name, _freq) \ { \ .type = RK_CLK_FIXED, \ .clk.fixed = &(struct clk_fixed_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = NULL, \ .clkdef.parent_cnt = 0, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .freq = _freq, \ }, \ } /* Fixed factor multipier/divider. */ #define FFACT(_id, _name, _pname, _mult, _div) \ { \ .type = RK_CLK_FIXED, \ .clk.fixed = &(struct clk_fixed_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = (const char *[]){_pname}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .mult = _mult, \ .div = _div, \ }, \ } /* Linked clock. */ #define LINK(_name) \ { \ .type = RK_CLK_LINK, \ .clk.link = &(struct clk_link_def) { \ .clkdef.id = 0, \ .clkdef.name = _name, \ .clkdef.parent_names = NULL, \ .clkdef.parent_cnt = 0, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ }, \ } /* Complex clock fo ARM cores. */ #define ARMDIV(_id, _name, _pn, _r, _o, _ds, _dw, _ms, _mw, _mp, _ap) \ { \ .type = RK_CLK_ARMCLK, \ .clk.armclk = &(struct rk_clk_armclk_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = _pn, \ .clkdef.parent_cnt = nitems(_pn), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .muxdiv_offset = CRU_CLKSEL_CON(_o), \ .mux_shift = _ms, \ .mux_width = _mw, \ .div_shift = _ds, \ .div_width = _dw, \ .main_parent = _mp, \ .alt_parent = _ap, \ .rates = _r, \ .nrates = nitems(_r), \ }, \ } /* Fractional rate multipier/divider. */ #define FRACT(_id, _name, _pname, _f, _o) \ { \ .type = RK_CLK_FRACT, \ .clk.fract = &(struct rk_clk_fract_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = (const char *[]){_pname}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = CRU_CLKSEL_CON(_o), \ .flags = _f, \ }, \ } /* Full composite clock. */ #define COMP(_id, _name, _pnames, _f, _o, _ds, _dw, _ms, _mw) \ { \ .type = RK_CLK_COMPOSITE, \ .clk.composite = &(struct rk_clk_composite_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = _pnames, \ .clkdef.parent_cnt = nitems(_pnames), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .muxdiv_offset = CRU_CLKSEL_CON(_o), \ .mux_shift = _ms, \ .mux_width = _mw, \ .div_shift = _ds, \ .div_width = _dw, \ .flags = RK_CLK_COMPOSITE_HAVE_MUX | _f, \ }, \ } /* Composite clock without mux (divider only). */ #define CDIV(_id, _name, _pname, _f, _o, _ds, _dw) \ { \ .type = RK_CLK_COMPOSITE, \ .clk.composite = &(struct rk_clk_composite_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = (const char *[]){_pname}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .muxdiv_offset = CRU_CLKSEL_CON(_o), \ .div_shift = _ds, \ .div_width = _dw, \ .flags = _f, \ }, \ } /* Complex clock without divider (multiplexer only). */ #define MUXRAW(_id, _name, _pn, _f, _mo, _ms, _mw) \ { \ .type = RK_CLK_MUX, \ .clk.mux = &(struct rk_clk_mux_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = _pn, \ .clkdef.parent_cnt = nitems(_pn), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = _mo, \ .shift = _ms, \ .width = _mw, \ .mux_flags = _f, \ }, \ } #define MUX(_id, _name, _pn, _f, _mo, _ms, _mw) \ MUXRAW(_id, _name, _pn, _f, CRU_CLKSEL_CON(_mo), _ms, _mw) /* Complex clock without divider (multiplexer only in GRF). */ #define MUXGRF(_id, _name, _pn, _f, _mo, _ms, _mw) \ { \ .type = RK_CLK_MUX, \ .clk.mux = &(struct rk_clk_mux_def) { \ .clkdef.id = _id, \ .clkdef.name = _name, \ .clkdef.parent_names = _pn, \ .clkdef.parent_cnt = nitems(_pn), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = _mo, \ .shift = _ms, \ .width = _mw, \ .mux_flags = RK_CLK_MUX_GRF | _f, \ }, \ } struct rk_cru_gate { const char *name; const char *parent_name; uint32_t id; uint32_t offset; uint32_t shift; }; enum rk_clk_type { RK_CLK_UNDEFINED = 0, RK3066_CLK_PLL, RK3328_CLK_PLL, RK3399_CLK_PLL, RK_CLK_COMPOSITE, RK_CLK_FIXED, RK_CLK_FRACT, RK_CLK_MUX, RK_CLK_ARMCLK, RK_CLK_LINK, }; struct rk_clk { enum rk_clk_type type; union { struct rk_clk_pll_def *pll; struct rk_clk_composite_def *composite; struct rk_clk_mux_def *mux; struct rk_clk_armclk_def *armclk; struct clk_fixed_def *fixed; struct rk_clk_fract_def *fract; struct clk_link_def *link; } clk; }; struct rk_cru_softc { device_t dev; struct resource *res; struct clkdom *clkdom; struct mtx mtx; int type; uint32_t reset_offset; uint32_t reset_num; struct rk_cru_gate *gates; int ngates; struct rk_clk *clks; int nclks; struct rk_clk_armclk_def *armclk; struct rk_clk_armclk_rates *armclk_rates; int narmclk_rates; }; DECLARE_CLASS(rk_cru_driver); int rk_cru_attach(device_t dev); #endif /* __RK_CRU_H__ */ diff --git a/sys/dev/clk/xilinx/zynqmp_clk_div.c b/sys/dev/clk/xilinx/zynqmp_clk_div.c index cc6e4d73e92d..ce754b10fb55 100644 --- a/sys/dev/clk/xilinx/zynqmp_clk_div.c +++ b/sys/dev/clk/xilinx/zynqmp_clk_div.c @@ -1,140 +1,140 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2023 Beckhoff Automation GmbH & Co. KG * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include -#include +#include #include #include "clkdev_if.h" #include "zynqmp_firmware_if.h" #define DIV_ROUND_CLOSEST(n, d) (((n) + (d) / 2) / (d)) struct zynqmp_clk_div_softc { device_t firmware; enum zynqmp_clk_div_type type; uint32_t id; }; static int zynqmp_clk_div_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static int zynqmp_clk_div_recalc(struct clknode *clk, uint64_t *freq) { struct zynqmp_clk_div_softc *sc; uint32_t div; int rv; sc = clknode_get_softc(clk); rv = ZYNQMP_FIRMWARE_CLOCK_GETDIVIDER(sc->firmware, sc->id, &div); if (rv != 0) { printf("%s: Error while getting divider for %s\n", __func__, clknode_get_name(clk)); return (EINVAL); } if (sc->type == CLK_DIV_TYPE_DIV0) div &= 0xFFFF; else div = div >> 16; *freq = howmany((unsigned long long)*freq, div + 1); return (0); } static int zynqmp_clk_div_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout, int flags, int *stop) { struct zynqmp_clk_div_softc *sc; uint32_t div; int rv; sc = clknode_get_softc(clk); div = DIV_ROUND_CLOSEST(fparent, *fout); if (sc->type == CLK_DIV_TYPE_DIV0) { div &= 0xFFFF; div |= 0xFFFF << 16; } else { div <<= 16; div |= 0xFFFF; } rv = ZYNQMP_FIRMWARE_CLOCK_SETDIVIDER(sc->firmware, sc->id, div); if (rv != 0) { printf("%s: Error while setting divider for %s\n", __func__, clknode_get_name(clk)); return (EINVAL); } return (rv); } static clknode_method_t zynqmp_clk_div_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, zynqmp_clk_div_init), CLKNODEMETHOD(clknode_recalc_freq, zynqmp_clk_div_recalc), CLKNODEMETHOD(clknode_set_freq, zynqmp_clk_div_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(zynqmp_clk_div_clknode, zynqmp_clk_div_clknode_class, zynqmp_clk_div_clknode_methods, sizeof(struct zynqmp_clk_div_softc), clknode_class); int zynqmp_clk_div_register(struct clkdom *clkdom, device_t fw, struct clknode_init_def *clkdef, enum zynqmp_clk_div_type type) { struct clknode *clk; struct zynqmp_clk_div_softc *sc; uint32_t fw_clk_id; fw_clk_id = clkdef->id - 1; clkdef->id = 0; clk = clknode_create(clkdom, &zynqmp_clk_div_clknode_class, clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->id = fw_clk_id; sc->firmware = fw; sc->type = type; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/xilinx/zynqmp_clk_fixed.c b/sys/dev/clk/xilinx/zynqmp_clk_fixed.c index 94de0491acb6..7a8016593c8f 100644 --- a/sys/dev/clk/xilinx/zynqmp_clk_fixed.c +++ b/sys/dev/clk/xilinx/zynqmp_clk_fixed.c @@ -1,101 +1,101 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2023 Beckhoff Automation GmbH & Co. KG * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include -#include +#include #include #include "clkdev_if.h" #include "zynqmp_firmware_if.h" struct zynqmp_clk_fixed_softc { device_t firmware; uint32_t id; }; static int zynqmp_clk_fixed_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static int zynqmp_clk_fixed_recalc(struct clknode *clk, uint64_t *freq) { struct zynqmp_clk_fixed_softc *sc; uint32_t mult, div; int rv; sc = clknode_get_softc(clk); rv = ZYNQMP_FIRMWARE_CLOCK_GET_FIXEDFACTOR(sc->firmware, sc->id, &mult, &div); if (rv != 0) { printf("%s: Error while getting fixed factor for %s\n", __func__, clknode_get_name(clk)); return (EINVAL); } *freq = (*freq * mult) / div; return (0); } static clknode_method_t zynqmp_clk_fixed_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, zynqmp_clk_fixed_init), CLKNODEMETHOD(clknode_recalc_freq, zynqmp_clk_fixed_recalc), CLKNODEMETHOD_END }; DEFINE_CLASS_1(zynqmp_clk_fixed_clknode, zynqmp_clk_fixed_clknode_class, zynqmp_clk_fixed_clknode_methods, sizeof(struct zynqmp_clk_fixed_softc), clknode_class); int zynqmp_clk_fixed_register(struct clkdom *clkdom, device_t fw, struct clknode_init_def *clkdef) { struct clknode *clk; struct zynqmp_clk_fixed_softc *sc; uint32_t fw_clk_id; fw_clk_id = clkdef->id - 1; clkdef->id = 0; clk = clknode_create(clkdom, &zynqmp_clk_fixed_clknode_class, clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->id = fw_clk_id; sc->firmware = fw; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/xilinx/zynqmp_clk_gate.c b/sys/dev/clk/xilinx/zynqmp_clk_gate.c index 180c7db38c09..8f3f7de1c151 100644 --- a/sys/dev/clk/xilinx/zynqmp_clk_gate.c +++ b/sys/dev/clk/xilinx/zynqmp_clk_gate.c @@ -1,102 +1,102 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2023 Beckhoff Automation GmbH & Co. KG * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include -#include +#include #include #include "clkdev_if.h" #include "zynqmp_firmware_if.h" struct zynqmp_clk_gate_softc { device_t firmware; uint32_t id; }; static int zynqmp_clk_gate_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static int zynqmp_clk_set_gate(struct clknode *clk, bool enable) { struct zynqmp_clk_gate_softc *sc; int rv; sc = clknode_get_softc(clk); if (enable) rv = ZYNQMP_FIRMWARE_CLOCK_ENABLE(sc->firmware, sc->id); else rv = ZYNQMP_FIRMWARE_CLOCK_DISABLE(sc->firmware, sc->id); if (rv != 0) { printf("%s: Error %sbling %s\n", __func__, enable == true ? "ena" : "disa", clknode_get_name(clk)); return (EINVAL); } return (0); } static clknode_method_t zynqmp_clk_gate_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, zynqmp_clk_gate_init), CLKNODEMETHOD(clknode_set_gate, zynqmp_clk_set_gate), CLKNODEMETHOD_END }; DEFINE_CLASS_1(zynqmp_clk_gate_clknode, zynqmp_clk_gate_clknode_class, zynqmp_clk_gate_clknode_methods, sizeof(struct zynqmp_clk_gate_softc), clknode_class); int zynqmp_clk_gate_register(struct clkdom *clkdom, device_t fw, struct clknode_init_def *clkdef) { struct clknode *clk; struct zynqmp_clk_gate_softc *sc; uint32_t fw_clk_id; fw_clk_id = clkdef->id - 1; clkdef->id = 0; clk = clknode_create(clkdom, &zynqmp_clk_gate_clknode_class, clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->id = fw_clk_id; sc->firmware = fw; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/xilinx/zynqmp_clk_mux.c b/sys/dev/clk/xilinx/zynqmp_clk_mux.c index 6826ee5123fe..49034edeff96 100644 --- a/sys/dev/clk/xilinx/zynqmp_clk_mux.c +++ b/sys/dev/clk/xilinx/zynqmp_clk_mux.c @@ -1,89 +1,89 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2023 Beckhoff Automation GmbH & Co. KG * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include -#include +#include #include #include "clkdev_if.h" #include "zynqmp_firmware_if.h" struct zynqmp_clk_mux_softc { device_t firmware; uint32_t id; }; static int zynqmp_clk_mux_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static int zynqmp_clk_mux_set_mux(struct clknode *clk, int idx) { printf("%s: called for %s\n", __func__, clknode_get_name(clk)); return (0); } static clknode_method_t zynqmp_clk_mux_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, zynqmp_clk_mux_init), CLKNODEMETHOD(clknode_set_mux, zynqmp_clk_mux_set_mux), CLKNODEMETHOD_END }; DEFINE_CLASS_1(zynqmp_clk_mux_clknode, zynqmp_clk_mux_clknode_class, zynqmp_clk_mux_clknode_methods, sizeof(struct zynqmp_clk_mux_softc), clknode_class); int zynqmp_clk_mux_register(struct clkdom *clkdom, device_t fw, struct clknode_init_def *clkdef) { struct clknode *clk; struct zynqmp_clk_mux_softc *sc; uint32_t fw_clk_id; fw_clk_id = clkdef->id - 1; clkdef->id = 0; clk = clknode_create(clkdom, &zynqmp_clk_mux_clknode_class, clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->id = fw_clk_id; sc->firmware = fw; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/xilinx/zynqmp_clk_pll.c b/sys/dev/clk/xilinx/zynqmp_clk_pll.c index 6e5da9533859..9a8157994971 100644 --- a/sys/dev/clk/xilinx/zynqmp_clk_pll.c +++ b/sys/dev/clk/xilinx/zynqmp_clk_pll.c @@ -1,132 +1,132 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2023 Beckhoff Automation GmbH & Co. KG * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include -#include +#include #include #include "clkdev_if.h" #include "zynqmp_firmware_if.h" struct zynqmp_clk_pll_softc { device_t firmware; uint32_t id; }; enum pll_mode { PLL_MODE_INT = 0, PLL_MODE_FRAC, PLL_MODE_ERROR, }; static int zynqmp_clk_pll_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static int zynqmp_clk_pll_recalc(struct clknode *clk, uint64_t *freq) { struct zynqmp_clk_pll_softc *sc; uint64_t pll_freq, pll_frac; uint32_t div, mode, frac; int rv; sc = clknode_get_softc(clk); rv = ZYNQMP_FIRMWARE_CLOCK_GETDIVIDER(sc->firmware, sc->id, &div); if (rv != 0) { printf("%s: Error while getting divider for %s\n", __func__, clknode_get_name(clk)); } rv = ZYNQMP_FIRMWARE_PLL_GET_MODE(sc->firmware, sc->id, &mode); if (rv != 0) { printf("%s: Error while getting mode for %s\n", __func__, clknode_get_name(clk)); } if (mode == PLL_MODE_ERROR) return (0); pll_freq = *freq * div; if (mode == PLL_MODE_FRAC) { ZYNQMP_FIRMWARE_PLL_GET_FRAC_DATA(sc->firmware, sc->id, &frac); pll_frac = (*freq * frac) / (1 << 16); pll_freq += pll_frac; } *freq = pll_freq; return (0); } static int zynqmp_clk_pll_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout, int flags, int *stop) { /* TODO probably at one point */ return (ENOTSUP); } static clknode_method_t zynqmp_clk_pll_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, zynqmp_clk_pll_init), CLKNODEMETHOD(clknode_recalc_freq, zynqmp_clk_pll_recalc), CLKNODEMETHOD(clknode_set_freq, zynqmp_clk_pll_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(zynqmp_clk_pll_clknode, zynqmp_clk_pll_clknode_class, zynqmp_clk_pll_clknode_methods, sizeof(struct zynqmp_clk_pll_softc), clknode_class); int zynqmp_clk_pll_register(struct clkdom *clkdom, device_t fw, struct clknode_init_def *clkdef) { struct clknode *clk; struct zynqmp_clk_pll_softc *sc; uint32_t fw_clk_id; fw_clk_id = clkdef->id - 1; clkdef->id = 0; clk = clknode_create(clkdom, &zynqmp_clk_pll_clknode_class, clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->id = fw_clk_id; sc->firmware = fw; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/clk/xilinx/zynqmp_clock.c b/sys/dev/clk/xilinx/zynqmp_clock.c index 76f23cec38d0..6d7eb7fa2b7e 100644 --- a/sys/dev/clk/xilinx/zynqmp_clock.c +++ b/sys/dev/clk/xilinx/zynqmp_clock.c @@ -1,562 +1,562 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2023 Beckhoff Automation GmbH & Co. KG * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include +#include +#include #include #include #include #include #include #include #include "clkdev_if.h" #include "zynqmp_firmware_if.h" #define ZYNQMP_MAX_NAME_LEN 16 #define ZYNQMP_MAX_NODES 6 #define ZYNQMP_MAX_PARENTS 100 #define ZYNQMP_CLK_IS_VALID (1 << 0) #define ZYNQMP_CLK_IS_EXT (1 << 2) #define ZYNQMP_GET_NODE_TYPE(x) (x & 0x7) #define ZYNQMP_GET_NODE_CLKFLAGS(x) ((x >> 8) & 0xFF) #define ZYNQMP_GET_NODE_TYPEFLAGS(x) ((x >> 24) & 0xF) enum ZYNQMP_NODE_TYPE { CLK_NODE_TYPE_NULL = 0, CLK_NODE_TYPE_MUX, CLK_NODE_TYPE_PLL, CLK_NODE_TYPE_FIXED, CLK_NODE_TYPE_DIV0, CLK_NODE_TYPE_DIV1, CLK_NODE_TYPE_GATE, }; /* * Clock IDs in the firmware starts at 0 but * exported clocks (and so clock exposed by the clock framework) * starts at 1 */ #define ZYNQMP_ID_TO_CLK(x) ((x) + 1) #define CLK_ID_TO_ZYNQMP(x) ((x) - 1) struct zynqmp_clk { TAILQ_ENTRY(zynqmp_clk) next; struct clknode_init_def clkdef; uint32_t id; uint32_t parentids[ZYNQMP_MAX_PARENTS]; uint32_t topology[ZYNQMP_MAX_NODES]; uint32_t attributes; }; struct zynqmp_clock_softc { device_t dev; device_t parent; phandle_t node; clk_t clk_pss_ref; clk_t clk_video; clk_t clk_pss_alt_ref; clk_t clk_aux_ref; clk_t clk_gt_crx_ref; struct clkdom *clkdom; }; struct name_resp { char name[16]; }; struct zynqmp_clk_softc { struct zynqmp_clk *clk; device_t firmware; uint32_t id; }; static int zynqmp_clk_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static clknode_method_t zynqmp_clk_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, zynqmp_clk_init), CLKNODEMETHOD_END }; DEFINE_CLASS_1(zynqmp_clk_clknode, zynqmp_clk_clknode_class, zynqmp_clk_clknode_methods, sizeof(struct zynqmp_clk_softc), clknode_class); static int zynqmp_clk_register(struct clkdom *clkdom, device_t fw, struct zynqmp_clk *clkdef) { struct clknode *clknode; struct zynqmp_clk_softc *sc; char *prev_clock_name = NULL; char *clkname, *parent_name; struct clknode_init_def *zynqclk; int i; for (i = 0; i < ZYNQMP_MAX_NODES; i++) { /* Bail early if we have no node */ if (ZYNQMP_GET_NODE_TYPE(clkdef->topology[i]) == CLK_NODE_TYPE_NULL) break; zynqclk = malloc(sizeof(*zynqclk), M_DEVBUF, M_WAITOK | M_ZERO); zynqclk->id = clkdef->clkdef.id; /* For the first node in the topology we use the main clock parents */ if (i == 0) { zynqclk->parent_cnt = clkdef->clkdef.parent_cnt; zynqclk->parent_names = clkdef->clkdef.parent_names; } else { zynqclk->parent_cnt = 1; zynqclk->parent_names = malloc(sizeof(char *) * zynqclk->parent_cnt, M_DEVBUF, M_ZERO | M_WAITOK); parent_name = strdup(prev_clock_name, M_DEVBUF); zynqclk->parent_names[0] = (const char *)parent_name; } /* Register the clock node based on the topology type */ switch (ZYNQMP_GET_NODE_TYPE(clkdef->topology[i])) { case CLK_NODE_TYPE_MUX: asprintf(&clkname, M_DEVBUF, "%s_mux", clkdef->clkdef.name); zynqclk->name = (const char *)clkname; zynqmp_clk_mux_register(clkdom, fw, zynqclk); break; case CLK_NODE_TYPE_PLL: asprintf(&clkname, M_DEVBUF, "%s_pll", clkdef->clkdef.name); zynqclk->name = (const char *)clkname; zynqmp_clk_pll_register(clkdom, fw, zynqclk); break; case CLK_NODE_TYPE_FIXED: asprintf(&clkname, M_DEVBUF, "%s_fixed", clkdef->clkdef.name); zynqclk->name = (const char *)clkname; zynqmp_clk_fixed_register(clkdom, fw, zynqclk); break; case CLK_NODE_TYPE_DIV0: asprintf(&clkname, M_DEVBUF, "%s_div0", clkdef->clkdef.name); zynqclk->name = (const char *)clkname; zynqmp_clk_div_register(clkdom, fw, zynqclk, CLK_DIV_TYPE_DIV0); break; case CLK_NODE_TYPE_DIV1: asprintf(&clkname, M_DEVBUF, "%s_div1", clkdef->clkdef.name); zynqclk->name = (const char *)clkname; zynqmp_clk_div_register(clkdom, fw, zynqclk, CLK_DIV_TYPE_DIV1); break; case CLK_NODE_TYPE_GATE: asprintf(&clkname, M_DEVBUF, "%s_gate", clkdef->clkdef.name); zynqclk->name = (const char *)clkname; zynqmp_clk_gate_register(clkdom, fw, zynqclk); break; case CLK_NODE_TYPE_NULL: default: clkname = NULL; break; } if (i != 0) { free(parent_name, M_DEVBUF); free(zynqclk->parent_names, M_DEVBUF); } if (clkname != NULL) prev_clock_name = strdup(clkname, M_DEVBUF); free(clkname, M_DEVBUF); free(zynqclk, M_DEVBUF); } /* Register main clock */ clkdef->clkdef.name = clkdef->clkdef.name; clkdef->clkdef.parent_cnt = 1; clkdef->clkdef.parent_names = malloc(sizeof(char *) * clkdef->clkdef.parent_cnt, M_DEVBUF, M_ZERO | M_WAITOK); clkdef->clkdef.parent_names[0] = strdup(prev_clock_name, M_DEVBUF); clknode = clknode_create(clkdom, &zynqmp_clk_clknode_class, &clkdef->clkdef); if (clknode == NULL) return (1); sc = clknode_get_softc(clknode); sc->id = clkdef->clkdef.id - 1; sc->firmware = fw; sc->clk = clkdef; clknode_register(clkdom, clknode); return (0); } static int zynqmp_fw_clk_get_name(struct zynqmp_clock_softc *sc, struct zynqmp_clk *clk, uint32_t id) { char *clkname; uint32_t query_data[4]; int rv; rv = ZYNQMP_FIRMWARE_QUERY_DATA(sc->parent, PM_QID_CLOCK_GET_NAME, id, 0, 0, query_data); if (rv != 0) return (rv); if (query_data[0] == '\0') return (EINVAL); clkname = malloc(ZYNQMP_MAX_NAME_LEN, M_DEVBUF, M_ZERO | M_WAITOK); memcpy(clkname, query_data, ZYNQMP_MAX_NAME_LEN); clk->clkdef.name = clkname; return (0); } static int zynqmp_fw_clk_get_attributes(struct zynqmp_clock_softc *sc, struct zynqmp_clk *clk, uint32_t id) { uint32_t query_data[4]; int rv; rv = ZYNQMP_FIRMWARE_QUERY_DATA(sc->parent, PM_QID_CLOCK_GET_ATTRIBUTES, id, 0, 0, query_data); if (rv != 0) return (rv); clk->attributes = query_data[1]; return (0); } static int zynqmp_fw_clk_get_parents(struct zynqmp_clock_softc *sc, struct zynqmp_clk *clk, uint32_t id) { int rv, i; uint32_t query_data[4]; for (i = 0; i < ZYNQMP_MAX_PARENTS; i += 3) { clk->parentids[i] = -1; clk->parentids[i + 1] = -1; clk->parentids[i + 2] = -1; rv = ZYNQMP_FIRMWARE_QUERY_DATA(sc->parent, PM_QID_CLOCK_GET_PARENTS, id, i, 0, query_data); clk->parentids[i] = query_data[1] & 0xFFFF; clk->parentids[i + 1] = query_data[2] & 0xFFFF; clk->parentids[i + 2] = query_data[3] & 0xFFFF; if ((int32_t)query_data[1] == -1) { clk->parentids[i] = -1; break; } clk->parentids[i] += 1; clk->clkdef.parent_cnt++; if ((int32_t)query_data[2] == -1) { clk->parentids[i + 1] = -1; break; } clk->parentids[i + 1] += 1; clk->clkdef.parent_cnt++; if ((int32_t)query_data[3] == -1) { clk->parentids[i + 2] = -1; break; } clk->parentids[i + 2] += 1; clk->clkdef.parent_cnt++; if ((int32_t)query_data[1] == -2) clk->parentids[i] = -2; if ((int32_t)query_data[2] == -2) clk->parentids[i + 1] = -2; if ((int32_t)query_data[3] == -2) clk->parentids[i + 2] = -2; if (rv != 0) break; } return (0); } static int zynqmp_fw_clk_get_topology(struct zynqmp_clock_softc *sc, struct zynqmp_clk *clk, uint32_t id) { uint32_t query_data[4]; int rv; rv = ZYNQMP_FIRMWARE_QUERY_DATA(sc->parent, PM_QID_CLOCK_GET_TOPOLOGY, id, 0, 0, query_data); if (rv != 0) return (rv); clk->topology[0] = query_data[1]; clk->topology[1] = query_data[2]; clk->topology[2] = query_data[3]; if (query_data[3] == '\0') goto out; rv = ZYNQMP_FIRMWARE_QUERY_DATA(sc->parent, PM_QID_CLOCK_GET_TOPOLOGY, id, 3, 0, query_data); if (rv != 0) return (rv); clk->topology[3] = query_data[1]; clk->topology[4] = query_data[2]; clk->topology[5] = query_data[3]; out: return (0); } static int zynqmp_clock_ofw_map(struct clkdom *clkdom, uint32_t ncells, phandle_t *cells, struct clknode **clk) { if (ncells != 1) return (ERANGE); *clk = clknode_find_by_id(clkdom, ZYNQMP_ID_TO_CLK(cells[0])); if (*clk == NULL) return (ENXIO); return (0); } static int zynqmp_fw_clk_get_all(struct zynqmp_clock_softc *sc) { TAILQ_HEAD(tailhead, zynqmp_clk) clk_list; struct zynqmp_clk *clk, *tmp, *tmp2; char *clkname; int rv, i; uint32_t query_data[4], num_clock; TAILQ_INIT(&clk_list); rv = ZYNQMP_FIRMWARE_QUERY_DATA(sc->parent, PM_QID_CLOCK_GET_NUM_CLOCKS, 0, 0, 0, query_data); if (rv != 0) { device_printf(sc->dev, "Cannot get clock details from the firmware\n"); return (ENXIO); } num_clock = query_data[1]; for (i = 0; i < num_clock; i++) { clk = malloc(sizeof(*clk), M_DEVBUF, M_WAITOK | M_ZERO); clk->clkdef.id = ZYNQMP_ID_TO_CLK(i); zynqmp_fw_clk_get_name(sc, clk, i); zynqmp_fw_clk_get_attributes(sc, clk, i); if ((clk->attributes & ZYNQMP_CLK_IS_VALID) == 0) { free(clk, M_DEVBUF); continue; } if (clk->attributes & ZYNQMP_CLK_IS_EXT) goto skip_ext; /* Get parents id */ rv = zynqmp_fw_clk_get_parents(sc, clk, i); if (rv != 0) { device_printf(sc->dev, "Cannot get parent for %s\n", clk->clkdef.name); free(clk, M_DEVBUF); continue; } /* Get topology */ rv = zynqmp_fw_clk_get_topology(sc, clk, i); if (rv != 0) { device_printf(sc->dev, "Cannot get topology for %s\n", clk->clkdef.name); free(clk, M_DEVBUF); continue; } skip_ext: TAILQ_INSERT_TAIL(&clk_list, clk, next); } /* Add a dummy clock */ clk = malloc(sizeof(*clk), M_DEVBUF, M_WAITOK | M_ZERO); clkname = strdup("dummy", M_DEVBUF); clk->clkdef.name = (const char *)clkname; clk->clkdef.id = i; clk->attributes = ZYNQMP_CLK_IS_EXT; TAILQ_INSERT_TAIL(&clk_list, clk, next); /* Map parents id to name */ TAILQ_FOREACH_SAFE(clk, &clk_list, next, tmp) { if (clk->attributes & ZYNQMP_CLK_IS_EXT) continue; clk->clkdef.parent_names = malloc(sizeof(char *) * clk->clkdef.parent_cnt, M_DEVBUF, M_ZERO | M_WAITOK); for (i = 0; i < ZYNQMP_MAX_PARENTS; i++) { if (clk->parentids[i] == -1) break; if (clk->parentids[i] == -2) { clk->clkdef.parent_names[i] = strdup("dummy", M_DEVBUF); continue; } TAILQ_FOREACH(tmp2, &clk_list, next) { if (tmp2->clkdef.id == clk->parentids[i]) { if (tmp2->attributes & ZYNQMP_CLK_IS_EXT) { int idx; if (ofw_bus_find_string_index( sc->node, "clock-names", tmp2->clkdef.name, &idx) == ENOENT) clk->clkdef.parent_names[i] = strdup("dummy", M_DEVBUF); else clk->clkdef.parent_names[i] = strdup(tmp2->clkdef.name, M_DEVBUF); } else clk->clkdef.parent_names[i] = strdup(tmp2->clkdef.name, M_DEVBUF); break; } } } } sc->clkdom = clkdom_create(sc->dev); if (sc->clkdom == NULL) panic("Cannot create clkdom\n"); clkdom_set_ofw_mapper(sc->clkdom, zynqmp_clock_ofw_map); /* Register the clocks */ TAILQ_FOREACH_SAFE(clk, &clk_list, next, tmp) { if (clk->attributes & ZYNQMP_CLK_IS_EXT) { if (strcmp(clk->clkdef.name, "dummy") == 0) { struct clk_fixed_def dummy; bzero(&dummy, sizeof(dummy)); dummy.clkdef.id = clk->clkdef.id; dummy.clkdef.name = strdup("dummy", M_DEVBUF); clknode_fixed_register(sc->clkdom, &dummy); free(__DECONST(char *, dummy.clkdef.name), M_DEVBUF); } } else zynqmp_clk_register(sc->clkdom, sc->parent, clk); TAILQ_REMOVE(&clk_list, clk, next); for (i = 0; i < clk->clkdef.parent_cnt; i++) free(__DECONST(char *, clk->clkdef.parent_names[i]), M_DEVBUF); free(clk->clkdef.parent_names, M_DEVBUF); free(__DECONST(char *, clk->clkdef.name), M_DEVBUF); free(clk, M_DEVBUF); } if (clkdom_finit(sc->clkdom) != 0) panic("cannot finalize clkdom initialization\n"); if (bootverbose) clkdom_dump(sc->clkdom); return (0); } static int zynqmp_clock_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "xlnx,zynqmp-clk")) return (ENXIO); device_set_desc(dev, "ZynqMP Clock Controller"); return (BUS_PROBE_DEFAULT); } static int zynqmp_clock_attach(device_t dev) { struct zynqmp_clock_softc *sc; int rv; sc = device_get_softc(dev); sc->dev = dev; sc->parent = device_get_parent(dev); sc->node = ofw_bus_get_node(dev); /* Enable all clocks */ if (clk_get_by_ofw_name(dev, 0, "pss_ref_clk", &sc->clk_pss_ref) != 0) { device_printf(dev, "Cannot get pss_ref_clk clock\n"); return (ENXIO); } rv = clk_enable(sc->clk_pss_ref); if (rv != 0) { device_printf(dev, "Could not enable clock pss_ref_clk\n"); return (ENXIO); } if (clk_get_by_ofw_name(dev, 0, "video_clk", &sc->clk_video) != 0) { device_printf(dev, "Cannot get video_clk clock\n"); return (ENXIO); } rv = clk_enable(sc->clk_video); if (rv != 0) { device_printf(dev, "Could not enable clock video_clk\n"); return (ENXIO); } if (clk_get_by_ofw_name(dev, 0, "pss_alt_ref_clk", &sc->clk_pss_alt_ref) != 0) { device_printf(dev, "Cannot get pss_alt_ref_clk clock\n"); return (ENXIO); } rv = clk_enable(sc->clk_pss_alt_ref); if (rv != 0) { device_printf(dev, "Could not enable clock pss_alt_ref_clk\n"); return (ENXIO); } if (clk_get_by_ofw_name(dev, 0, "aux_ref_clk", &sc->clk_aux_ref) != 0) { device_printf(dev, "Cannot get pss_aux_clk clock\n"); return (ENXIO); } rv = clk_enable(sc->clk_aux_ref); if (rv != 0) { device_printf(dev, "Could not enable clock pss_aux_clk\n"); return (ENXIO); } if (clk_get_by_ofw_name(dev, 0, "gt_crx_ref_clk", &sc->clk_gt_crx_ref) != 0) { device_printf(dev, "Cannot get gt_crx_ref_clk clock\n"); return (ENXIO); } rv = clk_enable(sc->clk_gt_crx_ref); if (rv != 0) { device_printf(dev, "Could not enable clock gt_crx_ref_clk\n"); return (ENXIO); } rv = zynqmp_fw_clk_get_all(sc); if (rv != 0) { clk_disable(sc->clk_gt_crx_ref); clk_disable(sc->clk_aux_ref); clk_disable(sc->clk_pss_alt_ref); clk_disable(sc->clk_video); clk_disable(sc->clk_pss_ref); return (rv); } return (0); } static device_method_t zynqmp_clock_methods[] = { /* device_if */ DEVMETHOD(device_probe, zynqmp_clock_probe), DEVMETHOD(device_attach, zynqmp_clock_attach), DEVMETHOD_END }; static driver_t zynqmp_clock_driver = { "zynqmp_clock", zynqmp_clock_methods, sizeof(struct zynqmp_clock_softc), }; EARLY_DRIVER_MODULE(zynqmp_clock, simplebus, zynqmp_clock_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_LAST); diff --git a/sys/dev/cpufreq/cpufreq_dt.c b/sys/dev/cpufreq/cpufreq_dt.c index cfd052c56a96..4dae7142b380 100644 --- a/sys/dev/cpufreq/cpufreq_dt.c +++ b/sys/dev/cpufreq/cpufreq_dt.c @@ -1,627 +1,627 @@ /*- * Copyright (c) 2018 Emmanuel Vadot * Copyright (c) 2016 Jared McNeill * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Generic DT based cpufreq driver */ #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include "cpufreq_if.h" #if 0 #define DPRINTF(dev, msg...) device_printf(dev, "cpufreq_dt: " msg); #else #define DPRINTF(dev, msg...) #endif enum opp_version { OPP_V1 = 1, OPP_V2, }; struct cpufreq_dt_opp { uint64_t freq; uint32_t uvolt_target; uint32_t uvolt_min; uint32_t uvolt_max; uint32_t uamps; uint32_t clk_latency; bool turbo_mode; bool opp_suspend; }; #define CPUFREQ_DT_HAVE_REGULATOR(sc) ((sc)->reg != NULL) struct cpufreq_dt_softc { device_t dev; clk_t clk; regulator_t reg; struct cpufreq_dt_opp *opp; ssize_t nopp; int cpu; cpuset_t cpus; }; static void cpufreq_dt_notify(device_t dev, uint64_t freq) { struct cpufreq_dt_softc *sc; struct pcpu *pc; int cpu; sc = device_get_softc(dev); CPU_FOREACH(cpu) { if (CPU_ISSET(cpu, &sc->cpus)) { pc = pcpu_find(cpu); pc->pc_clock = freq; } } } static const struct cpufreq_dt_opp * cpufreq_dt_find_opp(device_t dev, uint64_t freq) { struct cpufreq_dt_softc *sc; uint64_t diff, best_diff; ssize_t n, best_n; sc = device_get_softc(dev); diff = 0; best_diff = ~0; DPRINTF(dev, "Looking for freq %ju\n", freq); for (n = 0; n < sc->nopp; n++) { diff = abs64((int64_t)sc->opp[n].freq - (int64_t)freq); DPRINTF(dev, "Testing %ju, diff is %ju\n", sc->opp[n].freq, diff); if (diff < best_diff) { best_diff = diff; best_n = n; DPRINTF(dev, "%ju is best for now\n", sc->opp[n].freq); } } DPRINTF(dev, "Will use %ju\n", sc->opp[best_n].freq); return (&sc->opp[best_n]); } static void cpufreq_dt_opp_to_setting(device_t dev, const struct cpufreq_dt_opp *opp, struct cf_setting *set) { memset(set, 0, sizeof(*set)); set->freq = opp->freq / 1000000; set->volts = opp->uvolt_target / 1000; set->power = CPUFREQ_VAL_UNKNOWN; set->lat = opp->clk_latency; set->dev = dev; } static int cpufreq_dt_get(device_t dev, struct cf_setting *set) { struct cpufreq_dt_softc *sc; const struct cpufreq_dt_opp *opp; uint64_t freq; sc = device_get_softc(dev); DPRINTF(dev, "cpufreq_dt_get\n"); if (clk_get_freq(sc->clk, &freq) != 0) return (ENXIO); opp = cpufreq_dt_find_opp(dev, freq); if (opp == NULL) { device_printf(dev, "Can't find the current freq in opp\n"); return (ENOENT); } cpufreq_dt_opp_to_setting(dev, opp, set); DPRINTF(dev, "Current freq %dMhz\n", set->freq); return (0); } static int cpufreq_dt_set(device_t dev, const struct cf_setting *set) { struct cpufreq_dt_softc *sc; const struct cpufreq_dt_opp *opp, *copp; uint64_t freq; int uvolt, error; sc = device_get_softc(dev); DPRINTF(dev, "Working on cpu %d\n", sc->cpu); DPRINTF(dev, "We have %d cpu on this dev\n", CPU_COUNT(&sc->cpus)); if (!CPU_ISSET(sc->cpu, &sc->cpus)) { DPRINTF(dev, "Not for this CPU\n"); return (0); } if (clk_get_freq(sc->clk, &freq) != 0) { device_printf(dev, "Can't get current clk freq\n"); return (ENXIO); } /* * Only do the regulator work if it's required. */ if (CPUFREQ_DT_HAVE_REGULATOR(sc)) { /* Try to get current valtage by using regulator first. */ error = regulator_get_voltage(sc->reg, &uvolt); if (error != 0) { /* * Try oppoints table as backup way. However, * this is insufficient because the actual processor * frequency may not be in the table. PLL frequency * granularity can be different that granularity of * oppoint table. */ copp = cpufreq_dt_find_opp(sc->dev, freq); if (copp == NULL) { device_printf(dev, "Can't find the current freq in opp\n"); return (ENOENT); } uvolt = copp->uvolt_target; } } else uvolt = 0; opp = cpufreq_dt_find_opp(sc->dev, set->freq * 1000000); if (opp == NULL) { device_printf(dev, "Couldn't find an opp for this freq\n"); return (EINVAL); } DPRINTF(sc->dev, "Current freq %ju, uvolt: %d\n", freq, uvolt); DPRINTF(sc->dev, "Target freq %ju, , uvolt: %d\n", opp->freq, opp->uvolt_target); if (CPUFREQ_DT_HAVE_REGULATOR(sc) && (uvolt < opp->uvolt_target)) { DPRINTF(dev, "Changing regulator from %u to %u\n", uvolt, opp->uvolt_target); error = regulator_set_voltage(sc->reg, opp->uvolt_min, opp->uvolt_max); if (error != 0) { DPRINTF(dev, "Failed, backout\n"); return (ENXIO); } } DPRINTF(dev, "Setting clk to %ju\n", opp->freq); error = clk_set_freq(sc->clk, opp->freq, CLK_SET_ROUND_DOWN); if (error != 0) { DPRINTF(dev, "Failed, backout\n"); /* Restore previous voltage (best effort) */ if (CPUFREQ_DT_HAVE_REGULATOR(sc)) error = regulator_set_voltage(sc->reg, copp->uvolt_min, copp->uvolt_max); return (ENXIO); } if (CPUFREQ_DT_HAVE_REGULATOR(sc) && (uvolt > opp->uvolt_target)) { DPRINTF(dev, "Changing regulator from %u to %u\n", uvolt, opp->uvolt_target); error = regulator_set_voltage(sc->reg, opp->uvolt_min, opp->uvolt_max); if (error != 0) { DPRINTF(dev, "Failed to switch regulator to %d\n", opp->uvolt_target); /* Restore previous CPU frequency (best effort) */ (void)clk_set_freq(sc->clk, copp->freq, 0); return (ENXIO); } } if (clk_get_freq(sc->clk, &freq) == 0) cpufreq_dt_notify(dev, freq); return (0); } static int cpufreq_dt_type(device_t dev, int *type) { if (type == NULL) return (EINVAL); *type = CPUFREQ_TYPE_ABSOLUTE; return (0); } static int cpufreq_dt_settings(device_t dev, struct cf_setting *sets, int *count) { struct cpufreq_dt_softc *sc; ssize_t n; DPRINTF(dev, "cpufreq_dt_settings\n"); if (sets == NULL || count == NULL) return (EINVAL); sc = device_get_softc(dev); if (*count < sc->nopp) { *count = (int)sc->nopp; return (E2BIG); } for (n = 0; n < sc->nopp; n++) cpufreq_dt_opp_to_setting(dev, &sc->opp[n], &sets[n]); *count = (int)sc->nopp; return (0); } static void cpufreq_dt_identify(driver_t *driver, device_t parent) { phandle_t node; /* Properties must be listed under node /cpus/cpu@0 */ node = ofw_bus_get_node(parent); /* The cpu@0 node must have the following properties */ if (!OF_hasprop(node, "clocks")) return; if (!OF_hasprop(node, "operating-points") && !OF_hasprop(node, "operating-points-v2")) return; if (device_find_child(parent, "cpufreq_dt", -1) != NULL) return; if (BUS_ADD_CHILD(parent, 0, "cpufreq_dt", device_get_unit(parent)) == NULL) device_printf(parent, "add cpufreq_dt child failed\n"); } static int cpufreq_dt_probe(device_t dev) { phandle_t node; node = ofw_bus_get_node(device_get_parent(dev)); /* * Note - supply isn't required here for probe; we'll check * it out in more detail during attach. */ if (!OF_hasprop(node, "clocks")) return (ENXIO); if (!OF_hasprop(node, "operating-points") && !OF_hasprop(node, "operating-points-v2")) return (ENXIO); device_set_desc(dev, "Generic cpufreq driver"); return (BUS_PROBE_GENERIC); } static int cpufreq_dt_oppv1_parse(struct cpufreq_dt_softc *sc, phandle_t node) { uint32_t *opp, lat; ssize_t n; sc->nopp = OF_getencprop_alloc_multi(node, "operating-points", sizeof(uint32_t) * 2, (void **)&opp); if (sc->nopp == -1) return (ENXIO); if (OF_getencprop(node, "clock-latency", &lat, sizeof(lat)) == -1) lat = CPUFREQ_VAL_UNKNOWN; sc->opp = malloc(sizeof(*sc->opp) * sc->nopp, M_DEVBUF, M_WAITOK); for (n = 0; n < sc->nopp; n++) { sc->opp[n].freq = opp[n * 2 + 0] * 1000; sc->opp[n].uvolt_min = opp[n * 2 + 1]; sc->opp[n].uvolt_max = sc->opp[n].uvolt_min; sc->opp[n].uvolt_target = sc->opp[n].uvolt_min; sc->opp[n].clk_latency = lat; if (bootverbose) device_printf(sc->dev, "%ju.%03ju MHz, %u uV\n", sc->opp[n].freq / 1000000, sc->opp[n].freq % 1000000, sc->opp[n].uvolt_target); } free(opp, M_OFWPROP); return (0); } static int cpufreq_dt_oppv2_parse(struct cpufreq_dt_softc *sc, phandle_t node) { phandle_t opp, opp_table, opp_xref; pcell_t cell[2]; uint32_t *volts, lat; int nvolt, i; /* * operating-points-v2 does not require the voltage entries * and a regulator. So, it's OK if they're not there. */ if (OF_getencprop(node, "operating-points-v2", &opp_xref, sizeof(opp_xref)) == -1) { device_printf(sc->dev, "Cannot get xref to oppv2 table\n"); return (ENXIO); } opp_table = OF_node_from_xref(opp_xref); if (opp_table == opp_xref) return (ENXIO); if (!OF_hasprop(opp_table, "opp-shared")) { device_printf(sc->dev, "Only opp-shared is supported\n"); return (ENXIO); } for (opp = OF_child(opp_table); opp > 0; opp = OF_peer(opp)) sc->nopp += 1; sc->opp = malloc(sizeof(*sc->opp) * sc->nopp, M_DEVBUF, M_WAITOK); for (i = 0, opp_table = OF_child(opp_table); opp_table > 0; opp_table = OF_peer(opp_table), i++) { /* opp-hz is a required property */ if (OF_getencprop(opp_table, "opp-hz", cell, sizeof(cell)) == -1) continue; sc->opp[i].freq = cell[0]; sc->opp[i].freq <<= 32; sc->opp[i].freq |= cell[1]; if (OF_getencprop(opp_table, "clock-latency", &lat, sizeof(lat)) == -1) sc->opp[i].clk_latency = CPUFREQ_VAL_UNKNOWN; else sc->opp[i].clk_latency = (int)lat; if (OF_hasprop(opp_table, "turbo-mode")) sc->opp[i].turbo_mode = true; if (OF_hasprop(opp_table, "opp-suspend")) sc->opp[i].opp_suspend = true; if (CPUFREQ_DT_HAVE_REGULATOR(sc)) { nvolt = OF_getencprop_alloc_multi(opp_table, "opp-microvolt", sizeof(*volts), (void **)&volts); if (nvolt == 1) { sc->opp[i].uvolt_target = volts[0]; sc->opp[i].uvolt_min = volts[0]; sc->opp[i].uvolt_max = volts[0]; } else if (nvolt == 3) { sc->opp[i].uvolt_target = volts[0]; sc->opp[i].uvolt_min = volts[1]; sc->opp[i].uvolt_max = volts[2]; } else { device_printf(sc->dev, "Wrong count of opp-microvolt property\n"); OF_prop_free(volts); free(sc->opp, M_DEVBUF); return (ENXIO); } OF_prop_free(volts); } else { /* No regulator required; don't add anything */ sc->opp[i].uvolt_target = 0; sc->opp[i].uvolt_min = 0; sc->opp[i].uvolt_max = 0; } if (bootverbose) device_printf(sc->dev, "%ju.%03ju Mhz (%u uV)\n", sc->opp[i].freq / 1000000, sc->opp[i].freq % 1000000, sc->opp[i].uvolt_target); } return (0); } static int cpufreq_dt_attach(device_t dev) { struct cpufreq_dt_softc *sc; phandle_t node; phandle_t cnode, opp, copp; int cpu; uint64_t freq; int rv = 0; char device_type[16]; enum opp_version version; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(device_get_parent(dev)); sc->cpu = device_get_unit(device_get_parent(dev)); sc->reg = NULL; DPRINTF(dev, "cpu=%d\n", sc->cpu); if (sc->cpu >= mp_ncpus) { device_printf(dev, "Not attaching as cpu is not present\n"); rv = ENXIO; goto error; } /* * Cache if we have the regulator supply but don't error out * quite yet. If it's operating-points-v2 then regulator * and voltage entries are optional. */ if (regulator_get_by_ofw_property(dev, node, "cpu-supply", &sc->reg) == 0) device_printf(dev, "Found cpu-supply\n"); else if (regulator_get_by_ofw_property(dev, node, "cpu0-supply", &sc->reg) == 0) device_printf(dev, "Found cpu0-supply\n"); /* * Determine which operating mode we're in. Error out if we expect * a regulator but we're not getting it. */ if (OF_hasprop(node, "operating-points")) version = OPP_V1; else if (OF_hasprop(node, "operating-points-v2")) version = OPP_V2; else { device_printf(dev, "didn't find a valid operating-points or v2 node\n"); rv = ENXIO; goto error; } /* * Now, we only enforce needing a regulator for v1. */ if ((version == OPP_V1) && !CPUFREQ_DT_HAVE_REGULATOR(sc)) { device_printf(dev, "no regulator for %s\n", ofw_bus_get_name(device_get_parent(dev))); rv = ENXIO; goto error; } if (clk_get_by_ofw_index(dev, node, 0, &sc->clk) != 0) { device_printf(dev, "no clock for %s\n", ofw_bus_get_name(device_get_parent(dev))); rv = ENXIO; goto error; } if (version == OPP_V1) { rv = cpufreq_dt_oppv1_parse(sc, node); if (rv != 0) { device_printf(dev, "Failed to parse opp-v1 table\n"); goto error; } OF_getencprop(node, "operating-points", &opp, sizeof(opp)); } else if (version == OPP_V2) { rv = cpufreq_dt_oppv2_parse(sc, node); if (rv != 0) { device_printf(dev, "Failed to parse opp-v2 table\n"); goto error; } OF_getencprop(node, "operating-points-v2", &opp, sizeof(opp)); } else { device_printf(dev, "operating points version is incorrect\n"); goto error; } /* * Find all CPUs that share the same opp table */ CPU_ZERO(&sc->cpus); cnode = OF_parent(node); for (cpu = 0, cnode = OF_child(cnode); cnode > 0; cnode = OF_peer(cnode)) { if (OF_getprop(cnode, "device_type", device_type, sizeof(device_type)) <= 0) continue; if (strcmp(device_type, "cpu") != 0) continue; if (cpu == sc->cpu) { DPRINTF(dev, "Skipping our cpu\n"); CPU_SET(cpu, &sc->cpus); cpu++; continue; } DPRINTF(dev, "Testing CPU %d\n", cpu); copp = -1; if (version == OPP_V1) OF_getencprop(cnode, "operating-points", &copp, sizeof(copp)); else if (version == OPP_V2) OF_getencprop(cnode, "operating-points-v2", &copp, sizeof(copp)); if (opp == copp) { DPRINTF(dev, "CPU %d is using the same opp as this one (%d)\n", cpu, sc->cpu); CPU_SET(cpu, &sc->cpus); } cpu++; } if (clk_get_freq(sc->clk, &freq) == 0) cpufreq_dt_notify(dev, freq); cpufreq_register(dev); return (0); error: if (CPUFREQ_DT_HAVE_REGULATOR(sc)) regulator_release(sc->reg); return (rv); } static device_method_t cpufreq_dt_methods[] = { /* Device interface */ DEVMETHOD(device_identify, cpufreq_dt_identify), DEVMETHOD(device_probe, cpufreq_dt_probe), DEVMETHOD(device_attach, cpufreq_dt_attach), /* cpufreq interface */ DEVMETHOD(cpufreq_drv_get, cpufreq_dt_get), DEVMETHOD(cpufreq_drv_set, cpufreq_dt_set), DEVMETHOD(cpufreq_drv_type, cpufreq_dt_type), DEVMETHOD(cpufreq_drv_settings, cpufreq_dt_settings), DEVMETHOD_END }; static driver_t cpufreq_dt_driver = { "cpufreq_dt", cpufreq_dt_methods, sizeof(struct cpufreq_dt_softc), }; DRIVER_MODULE(cpufreq_dt, cpu, cpufreq_dt_driver, 0, 0); MODULE_VERSION(cpufreq_dt, 1); diff --git a/sys/dev/dwc/dwc1000_core.c b/sys/dev/dwc/dwc1000_core.c index 83d54d8325e1..d25c31e66e28 100644 --- a/sys/dev/dwc/dwc1000_core.c +++ b/sys/dev/dwc/dwc1000_core.c @@ -1,447 +1,447 @@ /*- * Copyright (c) 2014 Ruslan Bukin * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) * ("CTSRD"), as part of the DARPA CRASH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Ethernet media access controller (EMAC) * Chapter 17, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22) * * EMAC is an instance of the Synopsys DesignWare 3504-0 * Universal 10/100/1000 Ethernet MAC (DWC_gmac). */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include #include "if_dwc_if.h" struct dwc_hash_maddr_ctx { struct dwc_softc *sc; uint32_t hash[8]; }; #define STATS_HARVEST_INTERVAL 2 /* Pause time field in the transmitted control frame */ static int dwc_pause_time = 0xffff; TUNABLE_INT("hw.dwc.pause_time", &dwc_pause_time); /* * MIIBUS functions */ int dwc1000_miibus_read_reg(device_t dev, int phy, int reg) { struct dwc_softc *sc; uint16_t mii; size_t cnt; int rv = 0; sc = device_get_softc(dev); mii = ((phy & GMII_ADDRESS_PA_MASK) << GMII_ADDRESS_PA_SHIFT) | ((reg & GMII_ADDRESS_GR_MASK) << GMII_ADDRESS_GR_SHIFT) | (sc->mii_clk << GMII_ADDRESS_CR_SHIFT) | GMII_ADDRESS_GB; /* Busy flag */ WRITE4(sc, GMII_ADDRESS, mii); for (cnt = 0; cnt < 1000; cnt++) { if (!(READ4(sc, GMII_ADDRESS) & GMII_ADDRESS_GB)) { rv = READ4(sc, GMII_DATA); break; } DELAY(10); } return rv; } int dwc1000_miibus_write_reg(device_t dev, int phy, int reg, int val) { struct dwc_softc *sc; uint16_t mii; size_t cnt; sc = device_get_softc(dev); mii = ((phy & GMII_ADDRESS_PA_MASK) << GMII_ADDRESS_PA_SHIFT) | ((reg & GMII_ADDRESS_GR_MASK) << GMII_ADDRESS_GR_SHIFT) | (sc->mii_clk << GMII_ADDRESS_CR_SHIFT) | GMII_ADDRESS_GB | GMII_ADDRESS_GW; WRITE4(sc, GMII_DATA, val); WRITE4(sc, GMII_ADDRESS, mii); for (cnt = 0; cnt < 1000; cnt++) { if (!(READ4(sc, GMII_ADDRESS) & GMII_ADDRESS_GB)) { break; } DELAY(10); } return (0); } void dwc1000_miibus_statchg(device_t dev) { struct dwc_softc *sc; struct mii_data *mii; uint32_t reg; /* * Called by the MII bus driver when the PHY establishes * link to set the MAC interface registers. */ sc = device_get_softc(dev); DWC_ASSERT_LOCKED(sc); mii = sc->mii_softc; if (mii->mii_media_status & IFM_ACTIVE) sc->link_is_up = true; else sc->link_is_up = false; reg = READ4(sc, MAC_CONFIGURATION); switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_1000_T: case IFM_1000_SX: reg &= ~(CONF_FES | CONF_PS); break; case IFM_100_TX: reg |= (CONF_FES | CONF_PS); break; case IFM_10_T: reg &= ~(CONF_FES); reg |= (CONF_PS); break; case IFM_NONE: sc->link_is_up = false; return; default: sc->link_is_up = false; device_printf(dev, "Unsupported media %u\n", IFM_SUBTYPE(mii->mii_media_active)); return; } if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) reg |= (CONF_DM); else reg &= ~(CONF_DM); WRITE4(sc, MAC_CONFIGURATION, reg); reg = FLOW_CONTROL_UP; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) reg |= FLOW_CONTROL_TX; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) reg |= FLOW_CONTROL_RX; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) reg |= dwc_pause_time << FLOW_CONTROL_PT_SHIFT; WRITE4(sc, FLOW_CONTROL, reg); IF_DWC_SET_SPEED(dev, IFM_SUBTYPE(mii->mii_media_active)); } void dwc1000_core_setup(struct dwc_softc *sc) { uint32_t reg; DWC_ASSERT_LOCKED(sc); /* Enable core */ reg = READ4(sc, MAC_CONFIGURATION); reg |= (CONF_JD | CONF_ACS | CONF_BE); WRITE4(sc, MAC_CONFIGURATION, reg); } void dwc1000_enable_mac(struct dwc_softc *sc, bool enable) { uint32_t reg; DWC_ASSERT_LOCKED(sc); reg = READ4(sc, MAC_CONFIGURATION); if (enable) reg |= CONF_TE | CONF_RE; else reg &= ~(CONF_TE | CONF_RE); WRITE4(sc, MAC_CONFIGURATION, reg); } void dwc1000_enable_csum_offload(struct dwc_softc *sc) { uint32_t reg; DWC_ASSERT_LOCKED(sc); reg = READ4(sc, MAC_CONFIGURATION); if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0) reg |= CONF_IPC; else reg &= ~CONF_IPC; WRITE4(sc, MAC_CONFIGURATION, reg); } static const uint8_t nibbletab[] = { /* 0x0 0000 -> 0000 */ 0x0, /* 0x1 0001 -> 1000 */ 0x8, /* 0x2 0010 -> 0100 */ 0x4, /* 0x3 0011 -> 1100 */ 0xc, /* 0x4 0100 -> 0010 */ 0x2, /* 0x5 0101 -> 1010 */ 0xa, /* 0x6 0110 -> 0110 */ 0x6, /* 0x7 0111 -> 1110 */ 0xe, /* 0x8 1000 -> 0001 */ 0x1, /* 0x9 1001 -> 1001 */ 0x9, /* 0xa 1010 -> 0101 */ 0x5, /* 0xb 1011 -> 1101 */ 0xd, /* 0xc 1100 -> 0011 */ 0x3, /* 0xd 1101 -> 1011 */ 0xb, /* 0xe 1110 -> 0111 */ 0x7, /* 0xf 1111 -> 1111 */ 0xf, }; static uint8_t bitreverse(uint8_t x) { return (nibbletab[x & 0xf] << 4) | nibbletab[x >> 4]; } static u_int dwc_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct dwc_hash_maddr_ctx *ctx = arg; uint32_t crc, hashbit, hashreg; uint8_t val; crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN); /* Take lower 8 bits and reverse it */ val = bitreverse(~crc & 0xff); /* * TODO: There is probably a HW_FEATURES bit which isn't * related to the extended descriptors that describe this */ if (!ctx->sc->dma_ext_desc) val >>= 2; /* Only need lower 6 bits */ hashreg = (val >> 5); hashbit = (val & 31); ctx->hash[hashreg] |= (1 << hashbit); return (1); } void dwc1000_setup_rxfilter(struct dwc_softc *sc) { struct dwc_hash_maddr_ctx ctx; if_t ifp; uint8_t *eaddr; uint32_t ffval, hi, lo; int nhash, i; DWC_ASSERT_LOCKED(sc); ifp = sc->ifp; /* * TODO: There is probably a HW_FEATURES bit which isn't * related to the extended descriptors that describe this */ nhash = sc->dma_ext_desc == false ? 2 : 8; /* * Set the multicast (group) filter hash. */ if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) { ffval = (FRAME_FILTER_PM); for (i = 0; i < nhash; i++) ctx.hash[i] = ~0; } else { ffval = (FRAME_FILTER_HMC); for (i = 0; i < nhash; i++) ctx.hash[i] = 0; ctx.sc = sc; if_foreach_llmaddr(ifp, dwc_hash_maddr, &ctx); } /* * Set the individual address filter hash. */ if ((if_getflags(ifp) & IFF_PROMISC) != 0) ffval |= (FRAME_FILTER_PR); /* * Set the primary address. */ eaddr = if_getlladdr(ifp); lo = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) | (eaddr[3] << 24); hi = eaddr[4] | (eaddr[5] << 8); WRITE4(sc, MAC_ADDRESS_LOW(0), lo); WRITE4(sc, MAC_ADDRESS_HIGH(0), hi); WRITE4(sc, MAC_FRAME_FILTER, ffval); if (!sc->dma_ext_desc) { WRITE4(sc, GMAC_MAC_HTLOW, ctx.hash[0]); WRITE4(sc, GMAC_MAC_HTHIGH, ctx.hash[1]); } else { for (i = 0; i < nhash; i++) WRITE4(sc, HASH_TABLE_REG(i), ctx.hash[i]); } } void dwc1000_get_hwaddr(struct dwc_softc *sc, uint8_t *hwaddr) { uint32_t hi, lo, rnd; /* * Try to recover a MAC address from the running hardware. If there's * something non-zero there, assume the bootloader did the right thing * and just use it. * * Otherwise, set the address to a convenient locally assigned address, * 'bsd' + random 24 low-order bits. 'b' is 0x62, which has the locally * assigned bit set, and the broadcast/multicast bit clear. */ lo = READ4(sc, MAC_ADDRESS_LOW(0)); hi = READ4(sc, MAC_ADDRESS_HIGH(0)) & 0xffff; if ((lo != 0xffffffff) || (hi != 0xffff)) { hwaddr[0] = (lo >> 0) & 0xff; hwaddr[1] = (lo >> 8) & 0xff; hwaddr[2] = (lo >> 16) & 0xff; hwaddr[3] = (lo >> 24) & 0xff; hwaddr[4] = (hi >> 0) & 0xff; hwaddr[5] = (hi >> 8) & 0xff; } else { rnd = arc4random() & 0x00ffffff; hwaddr[0] = 'b'; hwaddr[1] = 's'; hwaddr[2] = 'd'; hwaddr[3] = rnd >> 16; hwaddr[4] = rnd >> 8; hwaddr[5] = rnd >> 0; } } /* * Stats */ static void dwc1000_clear_stats(struct dwc_softc *sc) { uint32_t reg; reg = READ4(sc, MMC_CONTROL); reg |= (MMC_CONTROL_CNTRST); WRITE4(sc, MMC_CONTROL, reg); } void dwc1000_harvest_stats(struct dwc_softc *sc) { if_t ifp; /* We don't need to harvest too often. */ if (++sc->stats_harvest_count < STATS_HARVEST_INTERVAL) return; sc->stats_harvest_count = 0; ifp = sc->ifp; if_inc_counter(ifp, IFCOUNTER_IERRORS, READ4(sc, RXOVERSIZE_G) + READ4(sc, RXUNDERSIZE_G) + READ4(sc, RXCRCERROR) + READ4(sc, RXALIGNMENTERROR) + READ4(sc, RXRUNTERROR) + READ4(sc, RXJABBERERROR) + READ4(sc, RXLENGTHERROR)); if_inc_counter(ifp, IFCOUNTER_OERRORS, READ4(sc, TXOVERSIZE_G) + READ4(sc, TXEXCESSDEF) + READ4(sc, TXCARRIERERR) + READ4(sc, TXUNDERFLOWERROR)); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, READ4(sc, TXEXESSCOL) + READ4(sc, TXLATECOL)); dwc1000_clear_stats(sc); } void dwc1000_intr(struct dwc_softc *sc) { uint32_t reg; DWC_ASSERT_LOCKED(sc); reg = READ4(sc, INTERRUPT_STATUS); if (reg) READ4(sc, SGMII_RGMII_SMII_CTRL_STATUS); } void dwc1000_intr_disable(struct dwc_softc *sc) { WRITE4(sc, INTERRUPT_ENABLE, 0); } diff --git a/sys/dev/dwc/dwc1000_dma.c b/sys/dev/dwc/dwc1000_dma.c index 8f3340eb3128..e89ccee5b0ff 100644 --- a/sys/dev/dwc/dwc1000_dma.c +++ b/sys/dev/dwc/dwc1000_dma.c @@ -1,889 +1,889 @@ /*- * Copyright (c) 2014 Ruslan Bukin * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) * ("CTSRD"), as part of the DARPA CRASH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #define WATCHDOG_TIMEOUT_SECS 5 #define DMA_RESET_TIMEOUT 100 /* TX descriptors - TDESC0 is almost unified */ #define TDESC0_OWN (1U << 31) #define TDESC0_IHE (1U << 16) /* IP Header Error */ #define TDESC0_ES (1U << 15) /* Error Summary */ #define TDESC0_JT (1U << 14) /* Jabber Timeout */ #define TDESC0_FF (1U << 13) /* Frame Flushed */ #define TDESC0_PCE (1U << 12) /* Payload Checksum Error */ #define TDESC0_LOC (1U << 11) /* Loss of Carrier */ #define TDESC0_NC (1U << 10) /* No Carrier */ #define TDESC0_LC (1U << 9) /* Late Collision */ #define TDESC0_EC (1U << 8) /* Excessive Collision */ #define TDESC0_VF (1U << 7) /* VLAN Frame */ #define TDESC0_CC_MASK 0xf #define TDESC0_CC_SHIFT 3 /* Collision Count */ #define TDESC0_ED (1U << 2) /* Excessive Deferral */ #define TDESC0_UF (1U << 1) /* Underflow Error */ #define TDESC0_DB (1U << 0) /* Deferred Bit */ /* TX descriptors - TDESC0 extended format only */ #define ETDESC0_IC (1U << 30) /* Interrupt on Completion */ #define ETDESC0_LS (1U << 29) /* Last Segment */ #define ETDESC0_FS (1U << 28) /* First Segment */ #define ETDESC0_DC (1U << 27) /* Disable CRC */ #define ETDESC0_DP (1U << 26) /* Disable Padding */ #define ETDESC0_CIC_NONE (0U << 22) /* Checksum Insertion Control */ #define ETDESC0_CIC_HDR (1U << 22) #define ETDESC0_CIC_SEG (2U << 22) #define ETDESC0_CIC_FULL (3U << 22) #define ETDESC0_TER (1U << 21) /* Transmit End of Ring */ #define ETDESC0_TCH (1U << 20) /* Second Address Chained */ /* TX descriptors - TDESC1 normal format */ #define NTDESC1_IC (1U << 31) /* Interrupt on Completion */ #define NTDESC1_LS (1U << 30) /* Last Segment */ #define NTDESC1_FS (1U << 29) /* First Segment */ #define NTDESC1_CIC_NONE (0U << 27) /* Checksum Insertion Control */ #define NTDESC1_CIC_HDR (1U << 27) #define NTDESC1_CIC_SEG (2U << 27) #define NTDESC1_CIC_FULL (3U << 27) #define NTDESC1_DC (1U << 26) /* Disable CRC */ #define NTDESC1_TER (1U << 25) /* Transmit End of Ring */ #define NTDESC1_TCH (1U << 24) /* Second Address Chained */ /* TX descriptors - TDESC1 extended format */ #define ETDESC1_DP (1U << 23) /* Disable Padding */ #define ETDESC1_TBS2_MASK 0x7ff #define ETDESC1_TBS2_SHIFT 11 /* Receive Buffer 2 Size */ #define ETDESC1_TBS1_MASK 0x7ff #define ETDESC1_TBS1_SHIFT 0 /* Receive Buffer 1 Size */ /* RX descriptor - RDESC0 is unified */ #define RDESC0_OWN (1U << 31) #define RDESC0_AFM (1U << 30) /* Dest. Address Filter Fail */ #define RDESC0_FL_MASK 0x3fff #define RDESC0_FL_SHIFT 16 /* Frame Length */ #define RDESC0_ES (1U << 15) /* Error Summary */ #define RDESC0_DE (1U << 14) /* Descriptor Error */ #define RDESC0_SAF (1U << 13) /* Source Address Filter Fail */ #define RDESC0_LE (1U << 12) /* Length Error */ #define RDESC0_OE (1U << 11) /* Overflow Error */ #define RDESC0_VLAN (1U << 10) /* VLAN Tag */ #define RDESC0_FS (1U << 9) /* First Descriptor */ #define RDESC0_LS (1U << 8) /* Last Descriptor */ #define RDESC0_ICE (1U << 7) /* IPC Checksum Error */ #define RDESC0_LC (1U << 6) /* Late Collision */ #define RDESC0_FT (1U << 5) /* Frame Type */ #define RDESC0_RWT (1U << 4) /* Receive Watchdog Timeout */ #define RDESC0_RE (1U << 3) /* Receive Error */ #define RDESC0_DBE (1U << 2) /* Dribble Bit Error */ #define RDESC0_CE (1U << 1) /* CRC Error */ #define RDESC0_PCE (1U << 0) /* Payload Checksum Error */ #define RDESC0_RXMA (1U << 0) /* Rx MAC Address */ /* RX descriptors - RDESC1 normal format */ #define NRDESC1_DIC (1U << 31) /* Disable Intr on Completion */ #define NRDESC1_RER (1U << 25) /* Receive End of Ring */ #define NRDESC1_RCH (1U << 24) /* Second Address Chained */ #define NRDESC1_RBS2_MASK 0x7ff #define NRDESC1_RBS2_SHIFT 11 /* Receive Buffer 2 Size */ #define NRDESC1_RBS1_MASK 0x7ff #define NRDESC1_RBS1_SHIFT 0 /* Receive Buffer 1 Size */ /* RX descriptors - RDESC1 enhanced format */ #define ERDESC1_DIC (1U << 31) /* Disable Intr on Completion */ #define ERDESC1_RBS2_MASK 0x7ffff #define ERDESC1_RBS2_SHIFT 16 /* Receive Buffer 2 Size */ #define ERDESC1_RER (1U << 15) /* Receive End of Ring */ #define ERDESC1_RCH (1U << 14) /* Second Address Chained */ #define ERDESC1_RBS1_MASK 0x7ffff #define ERDESC1_RBS1_SHIFT 0 /* Receive Buffer 1 Size */ /* * The hardware imposes alignment restrictions on various objects involved in * DMA transfers. These values are expressed in bytes (not bits). */ #define DWC_DESC_RING_ALIGN 2048 static inline uint32_t next_txidx(struct dwc_softc *sc, uint32_t curidx) { return ((curidx + 1) % TX_DESC_COUNT); } static inline uint32_t next_rxidx(struct dwc_softc *sc, uint32_t curidx) { return ((curidx + 1) % RX_DESC_COUNT); } static void dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (error != 0) return; *(bus_addr_t *)arg = segs[0].ds_addr; } inline static void txdesc_clear(struct dwc_softc *sc, int idx) { sc->tx_desccount--; sc->txdesc_ring[idx].addr1 = (uint32_t)(0); sc->txdesc_ring[idx].desc0 = 0; sc->txdesc_ring[idx].desc1 = 0; } inline static void txdesc_setup(struct dwc_softc *sc, int idx, bus_addr_t paddr, uint32_t len, uint32_t flags, bool first, bool last) { uint32_t desc0, desc1; if (!sc->dma_ext_desc) { desc0 = 0; desc1 = NTDESC1_TCH | len | flags; if (first) desc1 |= NTDESC1_FS; if (last) desc1 |= NTDESC1_LS | NTDESC1_IC; } else { desc0 = ETDESC0_TCH | flags; if (first) desc0 |= ETDESC0_FS; if (last) desc0 |= ETDESC0_LS | ETDESC0_IC; desc1 = len; } ++sc->tx_desccount; sc->txdesc_ring[idx].addr1 = (uint32_t)(paddr); sc->txdesc_ring[idx].desc0 = desc0; sc->txdesc_ring[idx].desc1 = desc1; wmb(); sc->txdesc_ring[idx].desc0 |= TDESC0_OWN; wmb(); } inline static uint32_t rxdesc_setup(struct dwc_softc *sc, int idx, bus_addr_t paddr) { uint32_t nidx; sc->rxdesc_ring[idx].addr1 = (uint32_t)paddr; nidx = next_rxidx(sc, idx); sc->rxdesc_ring[idx].addr2 = sc->rxdesc_ring_paddr + (nidx * sizeof(struct dwc_hwdesc)); if (!sc->dma_ext_desc) sc->rxdesc_ring[idx].desc1 = NRDESC1_RCH | MIN(MCLBYTES, NRDESC1_RBS1_MASK); else sc->rxdesc_ring[idx].desc1 = ERDESC1_RCH | MIN(MCLBYTES, ERDESC1_RBS1_MASK); wmb(); sc->rxdesc_ring[idx].desc0 = RDESC0_OWN; wmb(); return (nidx); } int dma1000_setup_txbuf(struct dwc_softc *sc, int idx, struct mbuf **mp) { struct bus_dma_segment segs[TX_MAP_MAX_SEGS]; int error, nsegs; struct mbuf * m; uint32_t flags = 0; int i; int last; error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map, *mp, segs, &nsegs, 0); if (error == EFBIG) { /* * The map may be partially mapped from the first call. * Make sure to reset it. */ bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map); if ((m = m_defrag(*mp, M_NOWAIT)) == NULL) return (ENOMEM); *mp = m; error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map, *mp, segs, &nsegs, 0); } if (error != 0) return (ENOMEM); if (sc->tx_desccount + nsegs > TX_DESC_COUNT) { bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map); return (ENOMEM); } m = *mp; if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) { if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) { if (!sc->dma_ext_desc) flags = NTDESC1_CIC_FULL; else flags = ETDESC0_CIC_FULL; } else { if (!sc->dma_ext_desc) flags = NTDESC1_CIC_HDR; else flags = ETDESC0_CIC_HDR; } } bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map, BUS_DMASYNC_PREWRITE); sc->txbuf_map[idx].mbuf = m; for (i = 0; i < nsegs; i++) { txdesc_setup(sc, sc->tx_desc_head, segs[i].ds_addr, segs[i].ds_len, (i == 0) ? flags : 0, /* only first desc needs flags */ (i == 0), (i == nsegs - 1)); last = sc->tx_desc_head; sc->tx_desc_head = next_txidx(sc, sc->tx_desc_head); } sc->txbuf_map[idx].last_desc_idx = last; return (0); } static int dma1000_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m) { struct bus_dma_segment seg; int error, nsegs; m_adj(m, ETHER_ALIGN); error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map, m, &seg, &nsegs, 0); if (error != 0) return (error); KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map, BUS_DMASYNC_PREREAD); sc->rxbuf_map[idx].mbuf = m; rxdesc_setup(sc, idx, seg.ds_addr); return (0); } static struct mbuf * dwc_alloc_mbufcl(struct dwc_softc *sc) { struct mbuf *m; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m != NULL) m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; return (m); } static struct mbuf * dwc_rxfinish_one(struct dwc_softc *sc, struct dwc_hwdesc *desc, struct dwc_bufmap *map) { if_t ifp; struct mbuf *m, *m0; int len; uint32_t rdesc0; m = map->mbuf; ifp = sc->ifp; rdesc0 = desc ->desc0; if ((rdesc0 & (RDESC0_FS | RDESC0_LS)) != (RDESC0_FS | RDESC0_LS)) { /* * Something very wrong happens. The whole packet should be * recevied in one descriptr. Report problem. */ device_printf(sc->dev, "%s: RX descriptor without FIRST and LAST bit set: 0x%08X", __func__, rdesc0); return (NULL); } len = (rdesc0 >> RDESC0_FL_SHIFT) & RDESC0_FL_MASK; if (len < 64) { /* * Lenght is invalid, recycle old mbuf * Probably impossible case */ return (NULL); } /* Allocate new buffer */ m0 = dwc_alloc_mbufcl(sc); if (m0 == NULL) { /* no new mbuf available, recycle old */ if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1); return (NULL); } /* Do dmasync for newly received packet */ bus_dmamap_sync(sc->rxbuf_tag, map->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxbuf_tag, map->map); /* Received packet is valid, process it */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = len; m->m_len = len; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 && (rdesc0 & RDESC0_FT) != 0) { m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; if ((rdesc0 & RDESC0_ICE) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((rdesc0 & RDESC0_PCE) == 0) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } /* Remove trailing FCS */ m_adj(m, -ETHER_CRC_LEN); DWC_UNLOCK(sc); if_input(ifp, m); DWC_LOCK(sc); return (m0); } void dma1000_txfinish_locked(struct dwc_softc *sc) { struct dwc_bufmap *bmap; struct dwc_hwdesc *desc; if_t ifp; int idx, last_idx; bool map_finished; DWC_ASSERT_LOCKED(sc); ifp = sc->ifp; /* check if all descriptors of the map are done */ while (sc->tx_map_tail != sc->tx_map_head) { map_finished = true; bmap = &sc->txbuf_map[sc->tx_map_tail]; idx = sc->tx_desc_tail; last_idx = next_txidx(sc, bmap->last_desc_idx); while (idx != last_idx) { desc = &sc->txdesc_ring[idx]; if ((desc->desc0 & TDESC0_OWN) != 0) { map_finished = false; break; } idx = next_txidx(sc, idx); } if (!map_finished) break; bus_dmamap_sync(sc->txbuf_tag, bmap->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->txbuf_tag, bmap->map); m_freem(bmap->mbuf); bmap->mbuf = NULL; sc->tx_mapcount--; while (sc->tx_desc_tail != last_idx) { txdesc_clear(sc, sc->tx_desc_tail); sc->tx_desc_tail = next_txidx(sc, sc->tx_desc_tail); } sc->tx_map_tail = next_txidx(sc, sc->tx_map_tail); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } /* If there are no buffers outstanding, muzzle the watchdog. */ if (sc->tx_desc_tail == sc->tx_desc_head) { sc->tx_watchdog_count = 0; } } void dma1000_txstart(struct dwc_softc *sc) { int enqueued; struct mbuf *m; enqueued = 0; for (;;) { if (sc->tx_desccount > (TX_DESC_COUNT - TX_MAP_MAX_SEGS + 1)) { if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0); break; } if (sc->tx_mapcount == (TX_MAP_COUNT - 1)) { if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0); break; } m = if_dequeue(sc->ifp); if (m == NULL) break; if (dma1000_setup_txbuf(sc, sc->tx_map_head, &m) != 0) { if_sendq_prepend(sc->ifp, m); if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0); break; } bpf_mtap_if(sc->ifp, m); sc->tx_map_head = next_txidx(sc, sc->tx_map_head); sc->tx_mapcount++; ++enqueued; } if (enqueued != 0) { WRITE4(sc, TRANSMIT_POLL_DEMAND, 0x1); sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS; } } void dma1000_rxfinish_locked(struct dwc_softc *sc) { struct mbuf *m; int error, idx; struct dwc_hwdesc *desc; DWC_ASSERT_LOCKED(sc); for (;;) { idx = sc->rx_idx; desc = sc->rxdesc_ring + idx; if ((desc->desc0 & RDESC0_OWN) != 0) break; m = dwc_rxfinish_one(sc, desc, sc->rxbuf_map + idx); if (m == NULL) { wmb(); desc->desc0 = RDESC0_OWN; wmb(); } else { /* We cannot create hole in RX ring */ error = dma1000_setup_rxbuf(sc, idx, m); if (error != 0) panic("dma1000_setup_rxbuf failed: error %d\n", error); } sc->rx_idx = next_rxidx(sc, sc->rx_idx); } } /* * Start the DMA controller */ void dma1000_start(struct dwc_softc *sc) { uint32_t reg; DWC_ASSERT_LOCKED(sc); /* Initializa DMA and enable transmitters */ reg = READ4(sc, OPERATION_MODE); reg |= (MODE_TSF | MODE_OSF | MODE_FUF); reg &= ~(MODE_RSF); reg |= (MODE_RTC_LEV32 << MODE_RTC_SHIFT); WRITE4(sc, OPERATION_MODE, reg); WRITE4(sc, INTERRUPT_ENABLE, INT_EN_DEFAULT); /* Start DMA */ reg = READ4(sc, OPERATION_MODE); reg |= (MODE_ST | MODE_SR); WRITE4(sc, OPERATION_MODE, reg); } /* * Stop the DMA controller */ void dma1000_stop(struct dwc_softc *sc) { uint32_t reg; DWC_ASSERT_LOCKED(sc); /* Stop DMA TX */ reg = READ4(sc, OPERATION_MODE); reg &= ~(MODE_ST); WRITE4(sc, OPERATION_MODE, reg); /* Flush TX */ reg = READ4(sc, OPERATION_MODE); reg |= (MODE_FTF); WRITE4(sc, OPERATION_MODE, reg); /* Stop DMA RX */ reg = READ4(sc, OPERATION_MODE); reg &= ~(MODE_SR); WRITE4(sc, OPERATION_MODE, reg); } int dma1000_reset(struct dwc_softc *sc) { uint32_t reg; int i; reg = READ4(sc, BUS_MODE); reg |= (BUS_MODE_SWR); WRITE4(sc, BUS_MODE, reg); for (i = 0; i < DMA_RESET_TIMEOUT; i++) { if ((READ4(sc, BUS_MODE) & BUS_MODE_SWR) == 0) break; DELAY(10); } if (i >= DMA_RESET_TIMEOUT) { return (ENXIO); } return (0); } /* * Create the bus_dma resources */ int dma1000_init(struct dwc_softc *sc) { struct mbuf *m; uint32_t reg; int error; int nidx; int idx; reg = BUS_MODE_USP; if (!sc->nopblx8) reg |= BUS_MODE_EIGHTXPBL; reg |= (sc->txpbl << BUS_MODE_PBL_SHIFT); reg |= (sc->rxpbl << BUS_MODE_RPBL_SHIFT); if (sc->fixed_burst) reg |= BUS_MODE_FIXEDBURST; if (sc->mixed_burst) reg |= BUS_MODE_MIXEDBURST; if (sc->aal) reg |= BUS_MODE_AAL; WRITE4(sc, BUS_MODE, reg); reg = READ4(sc, HW_FEATURE); if (reg & HW_FEATURE_EXT_DESCRIPTOR) sc->dma_ext_desc = true; /* * DMA must be stop while changing descriptor list addresses. */ reg = READ4(sc, OPERATION_MODE); reg &= ~(MODE_ST | MODE_SR); WRITE4(sc, OPERATION_MODE, reg); /* * Set up TX descriptor ring, descriptors, and dma maps. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* Parent tag. */ DWC_DESC_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ TX_DESC_SIZE, 1, /* maxsize, nsegments */ TX_DESC_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->txdesc_tag); if (error != 0) { device_printf(sc->dev, "could not create TX ring DMA tag.\n"); goto out; } error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring, BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->txdesc_map); if (error != 0) { device_printf(sc->dev, "could not allocate TX descriptor ring.\n"); goto out; } error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map, sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr, &sc->txdesc_ring_paddr, 0); if (error != 0) { device_printf(sc->dev, "could not load TX descriptor ring map.\n"); goto out; } for (idx = 0; idx < TX_DESC_COUNT; idx++) { nidx = next_txidx(sc, idx); sc->txdesc_ring[idx].addr2 = sc->txdesc_ring_paddr + (nidx * sizeof(struct dwc_hwdesc)); } error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* Parent tag. */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES*TX_MAP_MAX_SEGS, /* maxsize */ TX_MAP_MAX_SEGS, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->txbuf_tag); if (error != 0) { device_printf(sc->dev, "could not create TX ring DMA tag.\n"); goto out; } for (idx = 0; idx < TX_MAP_COUNT; idx++) { error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT, &sc->txbuf_map[idx].map); if (error != 0) { device_printf(sc->dev, "could not create TX buffer DMA map.\n"); goto out; } } for (idx = 0; idx < TX_DESC_COUNT; idx++) txdesc_clear(sc, idx); WRITE4(sc, TX_DESCR_LIST_ADDR, sc->txdesc_ring_paddr); /* * Set up RX descriptor ring, descriptors, dma maps, and mbufs. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* Parent tag. */ DWC_DESC_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ RX_DESC_SIZE, 1, /* maxsize, nsegments */ RX_DESC_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rxdesc_tag); if (error != 0) { device_printf(sc->dev, "could not create RX ring DMA tag.\n"); goto out; } error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring, BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rxdesc_map); if (error != 0) { device_printf(sc->dev, "could not allocate RX descriptor ring.\n"); goto out; } error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map, sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr, &sc->rxdesc_ring_paddr, 0); if (error != 0) { device_printf(sc->dev, "could not load RX descriptor ring map.\n"); goto out; } error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* Parent tag. */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, 1, /* maxsize, nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rxbuf_tag); if (error != 0) { device_printf(sc->dev, "could not create RX buf DMA tag.\n"); goto out; } for (idx = 0; idx < RX_DESC_COUNT; idx++) { error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT, &sc->rxbuf_map[idx].map); if (error != 0) { device_printf(sc->dev, "could not create RX buffer DMA map.\n"); goto out; } if ((m = dwc_alloc_mbufcl(sc)) == NULL) { device_printf(sc->dev, "Could not alloc mbuf\n"); error = ENOMEM; goto out; } if ((error = dma1000_setup_rxbuf(sc, idx, m)) != 0) { device_printf(sc->dev, "could not create new RX buffer.\n"); goto out; } } WRITE4(sc, RX_DESCR_LIST_ADDR, sc->rxdesc_ring_paddr); out: if (error != 0) return (ENXIO); return (0); } /* * Free the bus_dma resources */ void dma1000_free(struct dwc_softc *sc) { bus_dmamap_t map; int idx; /* Clean up RX DMA resources and free mbufs. */ for (idx = 0; idx < RX_DESC_COUNT; ++idx) { if ((map = sc->rxbuf_map[idx].map) != NULL) { bus_dmamap_unload(sc->rxbuf_tag, map); bus_dmamap_destroy(sc->rxbuf_tag, map); m_freem(sc->rxbuf_map[idx].mbuf); } } if (sc->rxbuf_tag != NULL) bus_dma_tag_destroy(sc->rxbuf_tag); if (sc->rxdesc_map != NULL) { bus_dmamap_unload(sc->rxdesc_tag, sc->rxdesc_map); bus_dmamem_free(sc->rxdesc_tag, sc->rxdesc_ring, sc->rxdesc_map); } if (sc->rxdesc_tag != NULL) bus_dma_tag_destroy(sc->rxdesc_tag); /* Clean up TX DMA resources. */ for (idx = 0; idx < TX_DESC_COUNT; ++idx) { if ((map = sc->txbuf_map[idx].map) != NULL) { /* TX maps are already unloaded. */ bus_dmamap_destroy(sc->txbuf_tag, map); } } if (sc->txbuf_tag != NULL) bus_dma_tag_destroy(sc->txbuf_tag); if (sc->txdesc_map != NULL) { bus_dmamap_unload(sc->txdesc_tag, sc->txdesc_map); bus_dmamem_free(sc->txdesc_tag, sc->txdesc_ring, sc->txdesc_map); } if (sc->txdesc_tag != NULL) bus_dma_tag_destroy(sc->txdesc_tag); } /* * Interrupt function */ int dma1000_intr(struct dwc_softc *sc) { uint32_t reg; int rv; DWC_ASSERT_LOCKED(sc); rv = 0; reg = READ4(sc, DMA_STATUS); if (reg & DMA_STATUS_NIS) { if (reg & DMA_STATUS_RI) dma1000_rxfinish_locked(sc); if (reg & DMA_STATUS_TI) { dma1000_txfinish_locked(sc); dma1000_txstart(sc); } } if (reg & DMA_STATUS_AIS) { if (reg & DMA_STATUS_FBI) { /* Fatal bus error */ rv = EIO; } } WRITE4(sc, DMA_STATUS, reg & DMA_STATUS_INTR_MASK); return (rv); } diff --git a/sys/dev/dwc/if_dwc.c b/sys/dev/dwc/if_dwc.c index ab43ad6f8645..be44a6be193b 100644 --- a/sys/dev/dwc/if_dwc.c +++ b/sys/dev/dwc/if_dwc.c @@ -1,702 +1,702 @@ /*- * Copyright (c) 2014 Ruslan Bukin * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) * ("CTSRD"), as part of the DARPA CRASH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Ethernet media access controller (EMAC) * Chapter 17, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22) * * EMAC is an instance of the Synopsys DesignWare 3504-0 * Universal 10/100/1000 Ethernet MAC (DWC_gmac). */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include "if_dwc_if.h" #include "gpio_if.h" #include "miibus_if.h" static struct resource_spec dwc_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; static void dwc_stop_locked(struct dwc_softc *sc); static void dwc_tick(void *arg); /* * Media functions */ static void dwc_media_status(if_t ifp, struct ifmediareq *ifmr) { struct dwc_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = sc->mii_softc; DWC_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; DWC_UNLOCK(sc); } static int dwc_media_change_locked(struct dwc_softc *sc) { return (mii_mediachg(sc->mii_softc)); } static int dwc_media_change(if_t ifp) { struct dwc_softc *sc; int error; sc = if_getsoftc(ifp); DWC_LOCK(sc); error = dwc_media_change_locked(sc); DWC_UNLOCK(sc); return (error); } /* * if_ functions */ static void dwc_txstart_locked(struct dwc_softc *sc) { if_t ifp; DWC_ASSERT_LOCKED(sc); if (!sc->link_is_up) return; ifp = sc->ifp; if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; dma1000_txstart(sc); } static void dwc_txstart(if_t ifp) { struct dwc_softc *sc = if_getsoftc(ifp); DWC_LOCK(sc); dwc_txstart_locked(sc); DWC_UNLOCK(sc); } static void dwc_init_locked(struct dwc_softc *sc) { if_t ifp = sc->ifp; DWC_ASSERT_LOCKED(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) return; /* * Call mii_mediachg() which will call back into dwc1000_miibus_statchg() * to set up the remaining config registers based on current media. */ mii_mediachg(sc->mii_softc); dwc1000_setup_rxfilter(sc); dwc1000_core_setup(sc); dwc1000_enable_mac(sc, true); dwc1000_enable_csum_offload(sc); dma1000_start(sc); if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); callout_reset(&sc->dwc_callout, hz, dwc_tick, sc); } static void dwc_init(void *if_softc) { struct dwc_softc *sc = if_softc; DWC_LOCK(sc); dwc_init_locked(sc); DWC_UNLOCK(sc); } static void dwc_stop_locked(struct dwc_softc *sc) { if_t ifp; DWC_ASSERT_LOCKED(sc); ifp = sc->ifp; if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->tx_watchdog_count = 0; sc->stats_harvest_count = 0; callout_stop(&sc->dwc_callout); dma1000_stop(sc); dwc1000_enable_mac(sc, false); } static int dwc_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct dwc_softc *sc; struct mii_data *mii; struct ifreq *ifr; int flags, mask, error; sc = if_getsoftc(ifp); ifr = (struct ifreq *)data; error = 0; switch (cmd) { case SIOCSIFFLAGS: DWC_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { flags = if_getflags(ifp) ^ sc->if_flags; if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) dwc1000_setup_rxfilter(sc); } else { if (!sc->is_detaching) dwc_init_locked(sc); } } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) dwc_stop_locked(sc); } sc->if_flags = if_getflags(ifp); DWC_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { DWC_LOCK(sc); dwc1000_setup_rxfilter(sc); DWC_UNLOCK(sc); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: mii = sc->mii_softc; error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); if (mask & IFCAP_VLAN_MTU) { /* No work to do except acknowledge the change took */ if_togglecapenable(ifp, IFCAP_VLAN_MTU); } if (mask & IFCAP_RXCSUM) if_togglecapenable(ifp, IFCAP_RXCSUM); if (mask & IFCAP_TXCSUM) if_togglecapenable(ifp, IFCAP_TXCSUM); if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) if_sethwassistbits(ifp, CSUM_IP | CSUM_UDP | CSUM_TCP, 0); else if_sethwassistbits(ifp, 0, CSUM_IP | CSUM_UDP | CSUM_TCP); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { DWC_LOCK(sc); dwc1000_enable_csum_offload(sc); DWC_UNLOCK(sc); } break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } /* * Interrupts functions */ static void dwc_intr(void *arg) { struct dwc_softc *sc; int rv; sc = arg; DWC_LOCK(sc); dwc1000_intr(sc); rv = dma1000_intr(sc); if (rv == EIO) { device_printf(sc->dev, "Ethernet DMA error, restarting controller.\n"); dwc_stop_locked(sc); dwc_init_locked(sc); } DWC_UNLOCK(sc); } static void dwc_tick(void *arg) { struct dwc_softc *sc; if_t ifp; int link_was_up; sc = arg; DWC_ASSERT_LOCKED(sc); ifp = sc->ifp; if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; /* * Typical tx watchdog. If this fires it indicates that we enqueued * packets for output and never got a txdone interrupt for them. Maybe * it's a missed interrupt somehow, just pretend we got one. */ if (sc->tx_watchdog_count > 0) { if (--sc->tx_watchdog_count == 0) { dma1000_txfinish_locked(sc); } } /* Gather stats from hardware counters. */ dwc1000_harvest_stats(sc); /* Check the media status. */ link_was_up = sc->link_is_up; mii_tick(sc->mii_softc); if (sc->link_is_up && !link_was_up) dwc_txstart_locked(sc); /* Schedule another check one second from now. */ callout_reset(&sc->dwc_callout, hz, dwc_tick, sc); } static int dwc_reset_phy(struct dwc_softc *sc) { pcell_t gpio_prop[4]; pcell_t delay_prop[3]; phandle_t gpio_node; device_t gpio; uint32_t pin, flags; uint32_t pin_value; /* * All those properties are deprecated but still used in some DTS. * The new way to deal with this is to use the generic bindings * present in the ethernet-phy node. */ if (OF_getencprop(sc->node, "snps,reset-gpio", gpio_prop, sizeof(gpio_prop)) <= 0) return (0); if (OF_getencprop(sc->node, "snps,reset-delays-us", delay_prop, sizeof(delay_prop)) <= 0) { device_printf(sc->dev, "Wrong property for snps,reset-delays-us"); return (ENXIO); } gpio_node = OF_node_from_xref(gpio_prop[0]); if ((gpio = OF_device_from_xref(gpio_prop[0])) == NULL) { device_printf(sc->dev, "Can't find gpio controller for phy reset\n"); return (ENXIO); } if (GPIO_MAP_GPIOS(gpio, sc->node, gpio_node, nitems(gpio_prop) - 1, gpio_prop + 1, &pin, &flags) != 0) { device_printf(sc->dev, "Can't map gpio for phy reset\n"); return (ENXIO); } pin_value = GPIO_PIN_LOW; if (OF_hasprop(sc->node, "snps,reset-active-low")) pin_value = GPIO_PIN_HIGH; GPIO_PIN_SETFLAGS(gpio, pin, GPIO_PIN_OUTPUT); GPIO_PIN_SET(gpio, pin, pin_value); DELAY(delay_prop[0] * 5); GPIO_PIN_SET(gpio, pin, !pin_value); DELAY(delay_prop[1] * 5); GPIO_PIN_SET(gpio, pin, pin_value); DELAY(delay_prop[2] * 5); return (0); } static int dwc_clock_init(struct dwc_softc *sc) { int rv; int64_t freq; /* Required clock */ rv = clk_get_by_ofw_name(sc->dev, 0, "stmmaceth", &sc->clk_stmmaceth); if (rv != 0) { device_printf(sc->dev, "Cannot get GMAC main clock\n"); return (ENXIO); } if ((rv = clk_enable(sc->clk_stmmaceth)) != 0) { device_printf(sc->dev, "could not enable main clock\n"); return (rv); } /* Optional clock */ rv = clk_get_by_ofw_name(sc->dev, 0, "pclk", &sc->clk_pclk); if (rv != 0) return (0); if ((rv = clk_enable(sc->clk_pclk)) != 0) { device_printf(sc->dev, "could not enable peripheral clock\n"); return (rv); } if (bootverbose) { clk_get_freq(sc->clk_stmmaceth, &freq); device_printf(sc->dev, "MAC clock(%s) freq: %jd\n", clk_get_name(sc->clk_stmmaceth), (intmax_t)freq); } return (0); } static int dwc_reset_deassert(struct dwc_softc *sc) { int rv; /* Required reset */ rv = hwreset_get_by_ofw_name(sc->dev, 0, "stmmaceth", &sc->rst_stmmaceth); if (rv != 0) { device_printf(sc->dev, "Cannot get GMAC reset\n"); return (ENXIO); } rv = hwreset_deassert(sc->rst_stmmaceth); if (rv != 0) { device_printf(sc->dev, "could not de-assert GMAC reset\n"); return (rv); } /* Optional reset */ rv = hwreset_get_by_ofw_name(sc->dev, 0, "ahb", &sc->rst_ahb); if (rv != 0) return (0); rv = hwreset_deassert(sc->rst_ahb); if (rv != 0) { device_printf(sc->dev, "could not de-assert AHB reset\n"); return (rv); } return (0); } /* * Probe/Attach functions */ static int dwc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "snps,dwmac")) return (ENXIO); device_set_desc(dev, "Gigabit Ethernet Controller"); return (BUS_PROBE_DEFAULT); } static int dwc_attach(device_t dev) { uint8_t macaddr[ETHER_ADDR_LEN]; struct dwc_softc *sc; if_t ifp; int error; uint32_t pbl; sc = device_get_softc(dev); sc->dev = dev; sc->rx_idx = 0; sc->tx_desccount = TX_DESC_COUNT; sc->tx_mapcount = 0; sc->node = ofw_bus_get_node(dev); sc->phy_mode = mii_fdt_get_contype(sc->node); switch (sc->phy_mode) { case MII_CONTYPE_RGMII: case MII_CONTYPE_RGMII_ID: case MII_CONTYPE_RGMII_RXID: case MII_CONTYPE_RGMII_TXID: case MII_CONTYPE_RMII: case MII_CONTYPE_MII: break; default: device_printf(dev, "Unsupported MII type\n"); return (ENXIO); } if (OF_getencprop(sc->node, "snps,pbl", &pbl, sizeof(uint32_t)) <= 0) pbl = DMA_DEFAULT_PBL; if (OF_getencprop(sc->node, "snps,txpbl", &sc->txpbl, sizeof(uint32_t)) <= 0) sc->txpbl = pbl; if (OF_getencprop(sc->node, "snps,rxpbl", &sc->rxpbl, sizeof(uint32_t)) <= 0) sc->rxpbl = pbl; if (OF_hasprop(sc->node, "snps,no-pbl-x8") == 1) sc->nopblx8 = true; if (OF_hasprop(sc->node, "snps,fixed-burst") == 1) sc->fixed_burst = true; if (OF_hasprop(sc->node, "snps,mixed-burst") == 1) sc->mixed_burst = true; if (OF_hasprop(sc->node, "snps,aal") == 1) sc->aal = true; error = clk_set_assigned(dev, ofw_bus_get_node(dev)); if (error != 0) { device_printf(dev, "clk_set_assigned failed\n"); return (error); } /* Enable main clock */ if ((error = dwc_clock_init(sc)) != 0) return (error); /* De-assert main reset */ if ((error = dwc_reset_deassert(sc)) != 0) return (error); if (IF_DWC_INIT(dev) != 0) return (ENXIO); if ((sc->mii_clk = IF_DWC_MII_CLK(dev)) < 0) { device_printf(dev, "Cannot get mii clock value %d\n", -sc->mii_clk); return (ENXIO); } if (bus_alloc_resources(dev, dwc_spec, sc->res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } /* Read MAC before reset */ dwc1000_get_hwaddr(sc, macaddr); /* Reset the PHY if needed */ if (dwc_reset_phy(sc) != 0) { device_printf(dev, "Can't reset the PHY\n"); bus_release_resources(dev, dwc_spec, sc->res); return (ENXIO); } /* Reset */ if ((error = dma1000_reset(sc)) != 0) { device_printf(sc->dev, "Can't reset DMA controller.\n"); bus_release_resources(sc->dev, dwc_spec, sc->res); return (error); } if (dma1000_init(sc)) { bus_release_resources(dev, dwc_spec, sc->res); return (ENXIO); } mtx_init(&sc->mtx, device_get_nameunit(sc->dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->dwc_callout, &sc->mtx, 0); /* Setup interrupt handler. */ error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE, NULL, dwc_intr, sc, &sc->intr_cookie); if (error != 0) { device_printf(dev, "could not setup interrupt handler.\n"); bus_release_resources(dev, dwc_spec, sc->res); return (ENXIO); } /* Set up the ethernet interface. */ sc->ifp = ifp = if_alloc(IFT_ETHER); if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setstartfn(ifp, dwc_txstart); if_setioctlfn(ifp, dwc_ioctl); if_setinitfn(ifp, dwc_init); if_setsendqlen(ifp, TX_MAP_COUNT - 1); if_setsendqready(sc->ifp); if_sethwassist(sc->ifp, CSUM_IP | CSUM_UDP | CSUM_TCP); if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM); if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp)); /* Attach the mii driver. */ error = mii_attach(dev, &sc->miibus, ifp, dwc_media_change, dwc_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (error != 0) { device_printf(dev, "PHY attach failed\n"); bus_teardown_intr(dev, sc->res[1], sc->intr_cookie); bus_release_resources(dev, dwc_spec, sc->res); return (ENXIO); } sc->mii_softc = device_get_softc(sc->miibus); /* All ready to run, attach the ethernet interface. */ ether_ifattach(ifp, macaddr); sc->is_attached = true; return (0); } static int dwc_detach(device_t dev) { struct dwc_softc *sc; sc = device_get_softc(dev); /* * Disable and tear down interrupts before anything else, so we don't * race with the handler. */ dwc1000_intr_disable(sc); if (sc->intr_cookie != NULL) { bus_teardown_intr(dev, sc->res[1], sc->intr_cookie); } if (sc->is_attached) { DWC_LOCK(sc); sc->is_detaching = true; dwc_stop_locked(sc); DWC_UNLOCK(sc); callout_drain(&sc->dwc_callout); ether_ifdetach(sc->ifp); } if (sc->miibus != NULL) { device_delete_child(dev, sc->miibus); sc->miibus = NULL; } bus_generic_detach(dev); /* Free DMA descriptors */ dma1000_free(sc); if (sc->ifp != NULL) { if_free(sc->ifp); sc->ifp = NULL; } bus_release_resources(dev, dwc_spec, sc->res); mtx_destroy(&sc->mtx); return (0); } static device_method_t dwc_methods[] = { DEVMETHOD(device_probe, dwc_probe), DEVMETHOD(device_attach, dwc_attach), DEVMETHOD(device_detach, dwc_detach), /* MII Interface */ DEVMETHOD(miibus_readreg, dwc1000_miibus_read_reg), DEVMETHOD(miibus_writereg, dwc1000_miibus_write_reg), DEVMETHOD(miibus_statchg, dwc1000_miibus_statchg), { 0, 0 } }; driver_t dwc_driver = { "dwc", dwc_methods, sizeof(struct dwc_softc), }; DRIVER_MODULE(dwc, simplebus, dwc_driver, 0, 0); DRIVER_MODULE(miibus, dwc, miibus_driver, 0, 0); MODULE_DEPEND(dwc, ether, 1, 1, 1); MODULE_DEPEND(dwc, miibus, 1, 1, 1); diff --git a/sys/dev/dwc/if_dwc_aw.c b/sys/dev/dwc/if_dwc_aw.c index 5a39a08809f8..981f621e3f27 100644 --- a/sys/dev/dwc/if_dwc_aw.c +++ b/sys/dev/dwc/if_dwc_aw.c @@ -1,147 +1,147 @@ /*- * Copyright (c) 2015 Luiz Otavio O Souza * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include "if_dwc_if.h" static int a20_if_dwc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "allwinner,sun7i-a20-gmac")) return (ENXIO); device_set_desc(dev, "A20 Gigabit Ethernet Controller"); return (BUS_PROBE_DEFAULT); } static int a20_if_dwc_init(device_t dev) { struct dwc_softc *sc; const char *tx_parent_name; clk_t clk_tx, clk_tx_parent; regulator_t reg; int error; sc = device_get_softc(dev); /* Configure PHY for MII or RGMII mode */ switch(sc->phy_mode) { case MII_CONTYPE_RGMII: case MII_CONTYPE_RGMII_ID: case MII_CONTYPE_RGMII_RXID: case MII_CONTYPE_RGMII_TXID: tx_parent_name = "gmac_int_tx"; break; case MII_CONTYPE_MII: tx_parent_name = "mii_phy_tx"; break; default: device_printf(dev, "unsupported PHY connection type: %d", sc->phy_mode); return (ENXIO); } error = clk_get_by_ofw_name(dev, 0, "allwinner_gmac_tx", &clk_tx); if (error != 0) { device_printf(dev, "could not get tx clk\n"); return (error); } error = clk_get_by_name(dev, tx_parent_name, &clk_tx_parent); if (error != 0) { device_printf(dev, "could not get clock '%s'\n", tx_parent_name); return (error); } error = clk_set_parent_by_clk(clk_tx, clk_tx_parent); if (error != 0) { device_printf(dev, "could not set tx clk parent\n"); return (error); } /* Enable PHY regulator if applicable */ if (regulator_get_by_ofw_property(dev, 0, "phy-supply", ®) == 0) { error = regulator_enable(reg); if (error != 0) { device_printf(dev, "could not enable PHY regulator\n"); return (error); } } return (0); } static int a20_if_dwc_mii_clk(device_t dev) { return (GMAC_MII_CLK_150_250M_DIV102); } static device_method_t a20_dwc_methods[] = { DEVMETHOD(device_probe, a20_if_dwc_probe), DEVMETHOD(if_dwc_init, a20_if_dwc_init), DEVMETHOD(if_dwc_mii_clk, a20_if_dwc_mii_clk), DEVMETHOD_END }; extern driver_t dwc_driver; DEFINE_CLASS_1(dwc, a20_dwc_driver, a20_dwc_methods, sizeof(struct dwc_softc), dwc_driver); DRIVER_MODULE(a20_dwc, simplebus, a20_dwc_driver, 0, 0); MODULE_DEPEND(a20_dwc, dwc, 1, 1, 1); diff --git a/sys/dev/dwc/if_dwc_rk.c b/sys/dev/dwc/if_dwc_rk.c index 2a1a5e763c2f..76fd11dfd109 100644 --- a/sys/dev/dwc/if_dwc_rk.c +++ b/sys/dev/dwc/if_dwc_rk.c @@ -1,636 +1,636 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include "if_dwc_if.h" #include "syscon_if.h" #define RK3328_GRF_MAC_CON0 0x0900 #define MAC_CON0_GMAC2IO_TX_DL_CFG_MASK 0x7F #define MAC_CON0_GMAC2IO_TX_DL_CFG_SHIFT 0 #define MAC_CON0_GMAC2IO_RX_DL_CFG_MASK 0x7F #define MAC_CON0_GMAC2IO_RX_DL_CFG_SHIFT 7 #define RK3328_GRF_MAC_CON1 0x0904 #define MAC_CON1_GMAC2IO_GMAC_TXCLK_DLY_ENA (1 << 0) #define MAC_CON1_GMAC2IO_GMAC_RXCLK_DLY_ENA (1 << 1) #define MAC_CON1_GMAC2IO_GMII_CLK_SEL_MASK (3 << 11) #define MAC_CON1_GMAC2IO_GMII_CLK_SEL_125 (0 << 11) #define MAC_CON1_GMAC2IO_GMII_CLK_SEL_25 (3 << 11) #define MAC_CON1_GMAC2IO_GMII_CLK_SEL_2_5 (2 << 11) #define MAC_CON1_GMAC2IO_RMII_MODE_MASK (1 << 9) #define MAC_CON1_GMAC2IO_RMII_MODE (1 << 9) #define MAC_CON1_GMAC2IO_INTF_SEL_MASK (7 << 4) #define MAC_CON1_GMAC2IO_INTF_RMII (4 << 4) #define MAC_CON1_GMAC2IO_INTF_RGMII (1 << 4) #define MAC_CON1_GMAC2IO_RMII_CLK_SEL_MASK (1 << 7) #define MAC_CON1_GMAC2IO_RMII_CLK_SEL_25 (1 << 7) #define MAC_CON1_GMAC2IO_RMII_CLK_SEL_2_5 (0 << 7) #define MAC_CON1_GMAC2IO_MAC_SPEED_MASK (1 << 2) #define MAC_CON1_GMAC2IO_MAC_SPEED_100 (1 << 2) #define MAC_CON1_GMAC2IO_MAC_SPEED_10 (0 << 2) #define RK3328_GRF_MAC_CON2 0x0908 #define RK3328_GRF_MACPHY_CON0 0x0B00 #define MACPHY_CON0_CLK_50M_MASK (1 << 14) #define MACPHY_CON0_CLK_50M (1 << 14) #define MACPHY_CON0_RMII_MODE_MASK (3 << 6) #define MACPHY_CON0_RMII_MODE (1 << 6) #define RK3328_GRF_MACPHY_CON1 0x0B04 #define MACPHY_CON1_RMII_MODE_MASK (1 << 9) #define MACPHY_CON1_RMII_MODE (1 << 9) #define RK3328_GRF_MACPHY_CON2 0x0B08 #define RK3328_GRF_MACPHY_CON3 0x0B0C #define RK3328_GRF_MACPHY_STATUS 0x0B10 #define RK3399_GRF_SOC_CON5 0xc214 #define SOC_CON5_GMAC_CLK_SEL_MASK (3 << 4) #define SOC_CON5_GMAC_CLK_SEL_125 (0 << 4) #define SOC_CON5_GMAC_CLK_SEL_25 (3 << 4) #define SOC_CON5_GMAC_CLK_SEL_2_5 (2 << 4) #define RK3399_GRF_SOC_CON6 0xc218 #define SOC_CON6_GMAC_TXCLK_DLY_ENA (1 << 7) #define SOC_CON6_TX_DL_CFG_MASK 0x7F #define SOC_CON6_TX_DL_CFG_SHIFT 0 #define SOC_CON6_RX_DL_CFG_MASK 0x7F #define SOC_CON6_GMAC_RXCLK_DLY_ENA (1 << 15) #define SOC_CON6_RX_DL_CFG_SHIFT 8 struct if_dwc_rk_softc; typedef void (*if_dwc_rk_set_delaysfn_t)(struct if_dwc_rk_softc *); typedef int (*if_dwc_rk_set_speedfn_t)(struct if_dwc_rk_softc *, int); typedef void (*if_dwc_rk_set_phy_modefn_t)(struct if_dwc_rk_softc *); typedef void (*if_dwc_rk_phy_powerupfn_t)(struct if_dwc_rk_softc *); struct if_dwc_rk_ops { if_dwc_rk_set_delaysfn_t set_delays; if_dwc_rk_set_speedfn_t set_speed; if_dwc_rk_set_phy_modefn_t set_phy_mode; if_dwc_rk_phy_powerupfn_t phy_powerup; }; struct if_dwc_rk_softc { struct dwc_softc base; uint32_t tx_delay; uint32_t rx_delay; bool integrated_phy; bool clock_in; phandle_t phy_node; struct syscon *grf; struct if_dwc_rk_ops *ops; /* Common clocks */ clk_t mac_clk_rx; clk_t mac_clk_tx; clk_t aclk_mac; clk_t pclk_mac; clk_t clk_stmmaceth; clk_t clk_mac_speed; /* RMII clocks */ clk_t clk_mac_ref; clk_t clk_mac_refout; /* PHY clock */ clk_t clk_phy; }; static void rk3328_set_delays(struct if_dwc_rk_softc *sc); static int rk3328_set_speed(struct if_dwc_rk_softc *sc, int speed); static void rk3328_set_phy_mode(struct if_dwc_rk_softc *sc); static void rk3328_phy_powerup(struct if_dwc_rk_softc *sc); static void rk3399_set_delays(struct if_dwc_rk_softc *sc); static int rk3399_set_speed(struct if_dwc_rk_softc *sc, int speed); static struct if_dwc_rk_ops rk3288_ops = { }; static struct if_dwc_rk_ops rk3328_ops = { .set_delays = rk3328_set_delays, .set_speed = rk3328_set_speed, .set_phy_mode = rk3328_set_phy_mode, .phy_powerup = rk3328_phy_powerup, }; static struct if_dwc_rk_ops rk3399_ops = { .set_delays = rk3399_set_delays, .set_speed = rk3399_set_speed, }; static struct ofw_compat_data compat_data[] = { {"rockchip,rk3288-gmac", (uintptr_t)&rk3288_ops}, {"rockchip,rk3328-gmac", (uintptr_t)&rk3328_ops}, {"rockchip,rk3399-gmac", (uintptr_t)&rk3399_ops}, {NULL, 0} }; static void rk3328_set_delays(struct if_dwc_rk_softc *sc) { uint32_t reg; uint32_t tx, rx; if (!mii_contype_is_rgmii(sc->base.phy_mode)) return; reg = SYSCON_READ_4(sc->grf, RK3328_GRF_MAC_CON0); tx = ((reg >> MAC_CON0_GMAC2IO_TX_DL_CFG_SHIFT) & MAC_CON0_GMAC2IO_TX_DL_CFG_MASK); rx = ((reg >> MAC_CON0_GMAC2IO_RX_DL_CFG_SHIFT) & MAC_CON0_GMAC2IO_RX_DL_CFG_MASK); reg = SYSCON_READ_4(sc->grf, RK3328_GRF_MAC_CON1); if (bootverbose) { device_printf(sc->base.dev, "current delays settings: tx=%u(%s) rx=%u(%s)\n", tx, ((reg & MAC_CON1_GMAC2IO_GMAC_TXCLK_DLY_ENA) ? "enabled" : "disabled"), rx, ((reg & MAC_CON1_GMAC2IO_GMAC_RXCLK_DLY_ENA) ? "enabled" : "disabled")); device_printf(sc->base.dev, "setting new RK3328 RX/TX delays: %d/%d\n", sc->tx_delay, sc->rx_delay); } reg = (MAC_CON1_GMAC2IO_GMAC_TXCLK_DLY_ENA | MAC_CON1_GMAC2IO_GMAC_RXCLK_DLY_ENA) << 16; reg |= (MAC_CON1_GMAC2IO_GMAC_TXCLK_DLY_ENA | MAC_CON1_GMAC2IO_GMAC_RXCLK_DLY_ENA); SYSCON_WRITE_4(sc->grf, RK3328_GRF_MAC_CON1, reg); reg = 0xffff << 16; reg |= ((sc->tx_delay & MAC_CON0_GMAC2IO_TX_DL_CFG_MASK) << MAC_CON0_GMAC2IO_TX_DL_CFG_SHIFT); reg |= ((sc->rx_delay & MAC_CON0_GMAC2IO_TX_DL_CFG_MASK) << MAC_CON0_GMAC2IO_RX_DL_CFG_SHIFT); SYSCON_WRITE_4(sc->grf, RK3328_GRF_MAC_CON0, reg); } static int rk3328_set_speed(struct if_dwc_rk_softc *sc, int speed) { uint32_t reg; switch (sc->base.phy_mode) { case MII_CONTYPE_RGMII: case MII_CONTYPE_RGMII_ID: case MII_CONTYPE_RGMII_RXID: case MII_CONTYPE_RGMII_TXID: switch (speed) { case IFM_1000_T: case IFM_1000_SX: reg = MAC_CON1_GMAC2IO_GMII_CLK_SEL_125; break; case IFM_100_TX: reg = MAC_CON1_GMAC2IO_GMII_CLK_SEL_25; break; case IFM_10_T: reg = MAC_CON1_GMAC2IO_GMII_CLK_SEL_2_5; break; default: device_printf(sc->base.dev, "unsupported RGMII media %u\n", speed); return (-1); } SYSCON_WRITE_4(sc->grf, RK3328_GRF_MAC_CON1, ((MAC_CON1_GMAC2IO_GMII_CLK_SEL_MASK << 16) | reg)); break; case MII_CONTYPE_RMII: switch (speed) { case IFM_100_TX: reg = MAC_CON1_GMAC2IO_RMII_CLK_SEL_25 | MAC_CON1_GMAC2IO_MAC_SPEED_100; break; case IFM_10_T: reg = MAC_CON1_GMAC2IO_RMII_CLK_SEL_2_5 | MAC_CON1_GMAC2IO_MAC_SPEED_10; break; default: device_printf(sc->base.dev, "unsupported RMII media %u\n", speed); return (-1); } SYSCON_WRITE_4(sc->grf, sc->integrated_phy ? RK3328_GRF_MAC_CON2 : RK3328_GRF_MAC_CON1, reg | ((MAC_CON1_GMAC2IO_RMII_CLK_SEL_MASK | MAC_CON1_GMAC2IO_MAC_SPEED_MASK) << 16)); break; } return (0); } static void rk3328_set_phy_mode(struct if_dwc_rk_softc *sc) { switch (sc->base.phy_mode) { case MII_CONTYPE_RGMII: case MII_CONTYPE_RGMII_ID: case MII_CONTYPE_RGMII_RXID: case MII_CONTYPE_RGMII_TXID: SYSCON_WRITE_4(sc->grf, RK3328_GRF_MAC_CON1, ((MAC_CON1_GMAC2IO_INTF_SEL_MASK | MAC_CON1_GMAC2IO_RMII_MODE_MASK) << 16) | MAC_CON1_GMAC2IO_INTF_RGMII); break; case MII_CONTYPE_RMII: SYSCON_WRITE_4(sc->grf, sc->integrated_phy ? RK3328_GRF_MAC_CON2 : RK3328_GRF_MAC_CON1, ((MAC_CON1_GMAC2IO_INTF_SEL_MASK | MAC_CON1_GMAC2IO_RMII_MODE_MASK) << 16) | MAC_CON1_GMAC2IO_INTF_RMII | MAC_CON1_GMAC2IO_RMII_MODE); break; } } static void rk3328_phy_powerup(struct if_dwc_rk_softc *sc) { SYSCON_WRITE_4(sc->grf, RK3328_GRF_MACPHY_CON1, (MACPHY_CON1_RMII_MODE_MASK << 16) | MACPHY_CON1_RMII_MODE); } static void rk3399_set_delays(struct if_dwc_rk_softc *sc) { uint32_t reg, tx, rx; if (!mii_contype_is_rgmii(sc->base.phy_mode)) return; reg = SYSCON_READ_4(sc->grf, RK3399_GRF_SOC_CON6); tx = ((reg >> SOC_CON6_TX_DL_CFG_SHIFT) & SOC_CON6_TX_DL_CFG_MASK); rx = ((reg >> SOC_CON6_RX_DL_CFG_SHIFT) & SOC_CON6_RX_DL_CFG_MASK); if (bootverbose) { device_printf(sc->base.dev, "current delays settings: tx=%u(%s) rx=%u(%s)\n", tx, ((reg & SOC_CON6_GMAC_TXCLK_DLY_ENA) ? "enabled" : "disabled"), rx, ((reg & SOC_CON6_GMAC_RXCLK_DLY_ENA) ? "enabled" : "disabled")); device_printf(sc->base.dev, "setting new RK3399 RX/TX delays: %d/%d\n", sc->rx_delay, sc->tx_delay); } reg = 0xFFFF << 16; reg |= ((sc->tx_delay & SOC_CON6_TX_DL_CFG_MASK) << SOC_CON6_TX_DL_CFG_SHIFT); reg |= ((sc->rx_delay & SOC_CON6_RX_DL_CFG_MASK) << SOC_CON6_RX_DL_CFG_SHIFT); reg |= SOC_CON6_GMAC_TXCLK_DLY_ENA | SOC_CON6_GMAC_RXCLK_DLY_ENA; SYSCON_WRITE_4(sc->grf, RK3399_GRF_SOC_CON6, reg); } static int rk3399_set_speed(struct if_dwc_rk_softc *sc, int speed) { uint32_t reg; switch (speed) { case IFM_1000_T: case IFM_1000_SX: reg = SOC_CON5_GMAC_CLK_SEL_125; break; case IFM_100_TX: reg = SOC_CON5_GMAC_CLK_SEL_25; break; case IFM_10_T: reg = SOC_CON5_GMAC_CLK_SEL_2_5; break; default: device_printf(sc->base.dev, "unsupported media %u\n", speed); return (-1); } SYSCON_WRITE_4(sc->grf, RK3399_GRF_SOC_CON5, ((SOC_CON5_GMAC_CLK_SEL_MASK << 16) | reg)); return (0); } static int if_dwc_rk_sysctl_delays(SYSCTL_HANDLER_ARGS) { struct if_dwc_rk_softc *sc; int rv; uint32_t rxtx; sc = arg1; rxtx = ((sc->rx_delay << 8) | sc->tx_delay); rv = sysctl_handle_int(oidp, &rxtx, 0, req); if (rv != 0 || req->newptr == NULL) return (rv); sc->tx_delay = rxtx & 0xff; sc->rx_delay = (rxtx >> 8) & 0xff; if (sc->ops->set_delays) sc->ops->set_delays(sc); return (0); } static int if_dwc_rk_init_sysctl(struct if_dwc_rk_softc *sc) { struct sysctl_oid *child; struct sysctl_ctx_list *ctx_list; ctx_list = device_get_sysctl_ctx(sc->base.dev); child = device_get_sysctl_tree(sc->base.dev); SYSCTL_ADD_PROC(ctx_list, SYSCTL_CHILDREN(child), OID_AUTO, "delays", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, sc, 0, if_dwc_rk_sysctl_delays, "", "RGMII RX/TX delays: ((rx << 8) | tx)"); return (0); } static int if_dwc_rk_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Rockchip Gigabit Ethernet Controller"); return (BUS_PROBE_DEFAULT); } static int if_dwc_rk_init_clocks(device_t dev) { struct if_dwc_rk_softc *sc; sc = device_get_softc(dev); /* Enable clocks */ if (clk_get_by_ofw_name(dev, 0, "mac_clk_tx", &sc->mac_clk_tx) != 0) { device_printf(sc->base.dev, "could not get mac_clk_tx clock\n"); sc->mac_clk_tx = NULL; } if (clk_get_by_ofw_name(dev, 0, "aclk_mac", &sc->aclk_mac) != 0) { device_printf(sc->base.dev, "could not get aclk_mac clock\n"); sc->aclk_mac = NULL; } if (clk_get_by_ofw_name(dev, 0, "pclk_mac", &sc->pclk_mac) != 0) { device_printf(sc->base.dev, "could not get pclk_mac clock\n"); sc->pclk_mac = NULL; } /* Optional clock */ clk_get_by_ofw_name(dev, 0, "clk_mac_speed", &sc->clk_mac_speed); if (sc->base.phy_mode == MII_CONTYPE_RMII) { if (clk_get_by_ofw_name(dev, 0, "mac_clk_rx", &sc->mac_clk_rx) != 0) { device_printf(sc->base.dev, "could not get mac_clk_rx clock\n"); sc->mac_clk_rx = NULL; } if (clk_get_by_ofw_name(dev, 0, "clk_mac_ref", &sc->clk_mac_ref) != 0) { device_printf(sc->base.dev, "could not get clk_mac_ref clock\n"); sc->clk_mac_ref = NULL; } if (!sc->clock_in) { if (clk_get_by_ofw_name(dev, 0, "clk_mac_refout", &sc->clk_mac_refout) != 0) { device_printf(sc->base.dev, "could not get clk_mac_refout clock\n"); sc->clk_mac_refout = NULL; } clk_set_freq(sc->clk_stmmaceth, 50000000, 0); } } if ((sc->phy_node != 0) && sc->integrated_phy) { if (clk_get_by_ofw_index(dev, sc->phy_node, 0, &sc->clk_phy) != 0) { device_printf(sc->base.dev, "could not get PHY clock\n"); sc->clk_phy = NULL; } if (sc->clk_phy) { clk_set_freq(sc->clk_phy, 50000000, 0); } } if (sc->base.phy_mode == MII_CONTYPE_RMII) { if (sc->mac_clk_rx) clk_enable(sc->mac_clk_rx); if (sc->clk_mac_ref) clk_enable(sc->clk_mac_ref); if (sc->clk_mac_refout) clk_enable(sc->clk_mac_refout); } if (sc->clk_phy) clk_enable(sc->clk_phy); if (sc->aclk_mac) clk_enable(sc->aclk_mac); if (sc->pclk_mac) clk_enable(sc->pclk_mac); if (sc->mac_clk_tx) clk_enable(sc->mac_clk_tx); if (sc->clk_mac_speed) clk_enable(sc->clk_mac_speed); DELAY(50); return (0); } static int if_dwc_rk_init(device_t dev) { struct if_dwc_rk_softc *sc; phandle_t node; uint32_t rx, tx; int err; pcell_t phy_handle; char *clock_in_out; hwreset_t phy_reset; regulator_t phy_supply; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); sc->ops = (struct if_dwc_rk_ops *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; if (OF_hasprop(node, "rockchip,grf") && syscon_get_by_ofw_property(dev, node, "rockchip,grf", &sc->grf) != 0) { device_printf(dev, "cannot get grf driver handle\n"); return (ENXIO); } if (OF_getencprop(node, "tx_delay", &tx, sizeof(tx)) <= 0) tx = 0x30; if (OF_getencprop(node, "rx_delay", &rx, sizeof(rx)) <= 0) rx = 0x10; sc->tx_delay = tx; sc->rx_delay = rx; sc->clock_in = true; if (OF_getprop_alloc(node, "clock_in_out", (void **)&clock_in_out)) { if (strcmp(clock_in_out, "input") == 0) sc->clock_in = true; else sc->clock_in = false; OF_prop_free(clock_in_out); } if (OF_getencprop(node, "phy-handle", (void *)&phy_handle, sizeof(phy_handle)) > 0) sc->phy_node = OF_node_from_xref(phy_handle); if (sc->phy_node) sc->integrated_phy = OF_hasprop(sc->phy_node, "phy-is-integrated"); if (sc->integrated_phy) device_printf(sc->base.dev, "PHY is integrated\n"); if_dwc_rk_init_clocks(dev); if (sc->ops->set_phy_mode) sc->ops->set_phy_mode(sc); if (sc->ops->set_delays) sc->ops->set_delays(sc); /* * this also sets delays if tunable is defined */ err = if_dwc_rk_init_sysctl(sc); if (err != 0) return (err); if (regulator_get_by_ofw_property(sc->base.dev, 0, "phy-supply", &phy_supply) == 0) { if (regulator_enable(phy_supply)) { device_printf(sc->base.dev, "cannot enable 'phy' regulator\n"); } } else device_printf(sc->base.dev, "no phy-supply property\n"); /* Power up */ if (sc->integrated_phy) { if (sc->ops->phy_powerup) sc->ops->phy_powerup(sc); SYSCON_WRITE_4(sc->grf, RK3328_GRF_MACPHY_CON0, (MACPHY_CON0_CLK_50M_MASK << 16) | MACPHY_CON0_CLK_50M); SYSCON_WRITE_4(sc->grf, RK3328_GRF_MACPHY_CON0, (MACPHY_CON0_RMII_MODE_MASK << 16) | MACPHY_CON0_RMII_MODE); SYSCON_WRITE_4(sc->grf, RK3328_GRF_MACPHY_CON2, 0xffff1234); SYSCON_WRITE_4(sc->grf, RK3328_GRF_MACPHY_CON3, 0x003f0035); if (hwreset_get_by_ofw_idx(dev, sc->phy_node, 0, &phy_reset) == 0) { hwreset_assert(phy_reset); DELAY(20); hwreset_deassert(phy_reset); DELAY(20); } } return (0); } static int if_dwc_rk_mii_clk(device_t dev) { struct if_dwc_rk_softc *sc; uint64_t freq; int rv; sc = device_get_softc(dev); if ((rv = clk_get_freq(sc->pclk_mac, &freq)) != 0) return (-rv); freq = freq / 1000 / 1000; if (freq >= 60 && freq <= 100) return (GMAC_MII_CLK_60_100M_DIV42); else if (freq >= 100 && freq <= 150) return (GMAC_MII_CLK_100_150M_DIV62); else if (freq >= 20 && freq <= 35) return (GMAC_MII_CLK_25_35M_DIV16); else if (freq >= 35 && freq <= 60) return (GMAC_MII_CLK_35_60M_DIV26); else if (freq >= 150 && freq <= 250) return (GMAC_MII_CLK_150_250M_DIV102); else if (freq >= 250 && freq <= 300) return (GMAC_MII_CLK_250_300M_DIV124); return (-ERANGE); } static int if_dwc_rk_set_speed(device_t dev, int speed) { struct if_dwc_rk_softc *sc; sc = device_get_softc(dev); if (sc->ops->set_speed) return sc->ops->set_speed(sc, speed); return (0); } static device_method_t if_dwc_rk_methods[] = { DEVMETHOD(device_probe, if_dwc_rk_probe), DEVMETHOD(if_dwc_init, if_dwc_rk_init), DEVMETHOD(if_dwc_mii_clk, if_dwc_rk_mii_clk), DEVMETHOD(if_dwc_set_speed, if_dwc_rk_set_speed), DEVMETHOD_END }; extern driver_t dwc_driver; DEFINE_CLASS_1(dwc, dwc_rk_driver, if_dwc_rk_methods, sizeof(struct if_dwc_rk_softc), dwc_driver); DRIVER_MODULE(dwc_rk, simplebus, dwc_rk_driver, 0, 0); MODULE_DEPEND(dwc_rk, dwc, 1, 1, 1); diff --git a/sys/dev/dwc/if_dwc_socfpga.c b/sys/dev/dwc/if_dwc_socfpga.c index ae3ea55564b6..ae2bcac54a95 100644 --- a/sys/dev/dwc/if_dwc_socfpga.c +++ b/sys/dev/dwc/if_dwc_socfpga.c @@ -1,107 +1,107 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Ruslan Bukin * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory (Department of Computer Science and * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the * DARPA SSITH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "if_dwc_if.h" static int if_dwc_socfpga_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "altr,socfpga-stmmac")) return (ENXIO); device_set_desc(dev, "Altera SOCFPGA Ethernet MAC"); return (BUS_PROBE_DEFAULT); } static int if_dwc_socfpga_init(device_t dev) { return (0); } static int if_dwc_socfpga_mii_clk(device_t dev) { phandle_t root; root = OF_finddevice("/"); if (ofw_bus_node_is_compatible(root, "altr,socfpga-stratix10")) return (GMAC_MII_CLK_35_60M_DIV26); /* Default value. */ return (GMAC_MII_CLK_25_35M_DIV16); } static device_method_t dwc_socfpga_methods[] = { DEVMETHOD(device_probe, if_dwc_socfpga_probe), DEVMETHOD(if_dwc_init, if_dwc_socfpga_init), DEVMETHOD(if_dwc_mii_clk, if_dwc_socfpga_mii_clk), DEVMETHOD_END }; extern driver_t dwc_driver; DEFINE_CLASS_1(dwc, dwc_socfpga_driver, dwc_socfpga_methods, sizeof(struct dwc_softc), dwc_driver); EARLY_DRIVER_MODULE(dwc_socfpga, simplebus, dwc_socfpga_driver, 0, 0, BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_MIDDLE); MODULE_DEPEND(dwc_socfpga, dwc, 1, 1, 1); diff --git a/sys/dev/dwwdt/dwwdt.c b/sys/dev/dwwdt/dwwdt.c index e1787bb549ca..89f94fff9bad 100644 --- a/sys/dev/dwwdt/dwwdt.c +++ b/sys/dev/dwwdt/dwwdt.c @@ -1,373 +1,373 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 BusyTech * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include /* Registers */ #define DWWDT_CR 0x00 #define DWWDT_CR_WDT_EN (1 << 0) #define DWWDT_CR_RESP_MODE (1 << 1) #define DWWDT_TORR 0x04 #define DWWDT_CCVR 0x08 #define DWWDT_CRR 0x0C #define DWWDT_CRR_KICK 0x76 #define DWWDT_STAT 0x10 #define DWWDT_STAT_STATUS 0x01 #define DWWDT_EOI 0x14 #define DWWDT_READ4(sc, reg) bus_read_4((sc)->sc_mem_res, (reg)) #define DWWDT_WRITE4(sc, reg, val) \ bus_write_4((sc)->sc_mem_res, (reg), (val)) /* * 47 = 16 (timeout shift of dwwdt) + 30 (1s ~= 2 ** 30ns) + 1 * (pre-restart delay) */ #define DWWDT_EXP_OFFSET 47 struct dwwdt_softc { device_t sc_dev; struct resource *sc_mem_res; struct resource *sc_irq_res; void *sc_intr_cookie; clk_t sc_clk; uint64_t sc_clk_freq; eventhandler_tag sc_evtag; int sc_mem_rid; int sc_irq_rid; enum { DWWDT_STOPPED, DWWDT_RUNNING, } sc_status; }; static struct ofw_compat_data compat_data[] = { { "snps,dw-wdt", 1 }, { NULL, 0 } }; SYSCTL_NODE(_dev, OID_AUTO, dwwdt, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Synopsys Designware watchdog timer"); /* Setting this to true disables full restart mode. */ static bool dwwdt_prevent_restart = false; SYSCTL_BOOL(_dev_dwwdt, OID_AUTO, prevent_restart, CTLFLAG_RW | CTLFLAG_MPSAFE, &dwwdt_prevent_restart, 0, "Disable system reset on timeout"); static bool dwwdt_debug_enabled = false; SYSCTL_BOOL(_dev_dwwdt, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_MPSAFE, &dwwdt_debug_enabled, 0, "Enable debug mode"); static bool dwwdt_panic_first = true; SYSCTL_BOOL(_dev_dwwdt, OID_AUTO, panic_first, CTLFLAG_RW | CTLFLAG_MPSAFE, &dwwdt_panic_first, 0, "Try to panic on timeout, reset on another timeout"); static int dwwdt_probe(device_t); static int dwwdt_attach(device_t); static int dwwdt_detach(device_t); static int dwwdt_shutdown(device_t); static void dwwdt_intr(void *); static void dwwdt_event(void *, unsigned int, int *); /* Helpers */ static inline void dwwdt_start(struct dwwdt_softc *sc); static inline bool dwwdt_started(const struct dwwdt_softc *sc); static inline void dwwdt_stop(struct dwwdt_softc *sc); static inline void dwwdt_set_timeout(const struct dwwdt_softc *sc, int val); static void dwwdt_debug(device_t); static void dwwdt_debug(device_t dev) { /* * Reading from EOI may clear interrupt flag. */ const struct dwwdt_softc *sc = device_get_softc(dev); device_printf(dev, "Registers dump: \n"); device_printf(dev, " CR: %08x\n", DWWDT_READ4(sc, DWWDT_CR)); device_printf(dev, " CCVR: %08x\n", DWWDT_READ4(sc, DWWDT_CCVR)); device_printf(dev, " CRR: %08x\n", DWWDT_READ4(sc, DWWDT_CRR)); device_printf(dev, " STAT: %08x\n", DWWDT_READ4(sc, DWWDT_STAT)); device_printf(dev, "Clock: %s\n", clk_get_name(sc->sc_clk)); device_printf(dev, " FREQ: %lu\n", sc->sc_clk_freq); } static inline bool dwwdt_started(const struct dwwdt_softc *sc) { /* CR_WDT_E bit can be clear only by full CPU reset. */ return ((DWWDT_READ4(sc, DWWDT_CR) & DWWDT_CR_WDT_EN) != 0); } static void inline dwwdt_start(struct dwwdt_softc *sc) { uint32_t val; /* Enable watchdog */ val = DWWDT_READ4(sc, DWWDT_CR); val |= DWWDT_CR_WDT_EN | DWWDT_CR_RESP_MODE; DWWDT_WRITE4(sc, DWWDT_CR, val); sc->sc_status = DWWDT_RUNNING; } static void inline dwwdt_stop(struct dwwdt_softc *sc) { sc->sc_status = DWWDT_STOPPED; dwwdt_set_timeout(sc, 0x0f); } static void inline dwwdt_set_timeout(const struct dwwdt_softc *sc, int val) { DWWDT_WRITE4(sc, DWWDT_TORR, val); DWWDT_WRITE4(sc, DWWDT_CRR, DWWDT_CRR_KICK); } static void dwwdt_intr(void *arg) { struct dwwdt_softc *sc = arg; KASSERT((DWWDT_READ4(sc, DWWDT_STAT) & DWWDT_STAT_STATUS) != 0, ("Missing interrupt status bit?")); if (dwwdt_prevent_restart || sc->sc_status == DWWDT_STOPPED) { /* * Confirm interrupt reception. Restart counter. * This also emulates stopping watchdog. */ (void)DWWDT_READ4(sc, DWWDT_EOI); return; } if (dwwdt_panic_first) panic("dwwdt pre-timeout interrupt"); } static void dwwdt_event(void *arg, unsigned int cmd, int *error) { struct dwwdt_softc *sc = arg; const int exponent = flsl(sc->sc_clk_freq); int timeout; int val; timeout = cmd & WD_INTERVAL; val = MAX(0, timeout + exponent - DWWDT_EXP_OFFSET + 1); dwwdt_stop(sc); if (cmd == 0 || val > 0x0f) { /* * Set maximum time between interrupts and Leave watchdog * disabled. */ return; } dwwdt_set_timeout(sc, val); dwwdt_start(sc); *error = 0; if (dwwdt_debug_enabled) dwwdt_debug(sc->sc_dev); } static int dwwdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Synopsys Designware watchdog timer"); return (BUS_PROBE_DEFAULT); } static int dwwdt_attach(device_t dev) { struct dwwdt_softc *sc; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_mem_rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_mem_rid, RF_ACTIVE); if (sc->sc_mem_res == NULL) { device_printf(dev, "cannot allocate memory resource\n"); goto err_no_mem; } sc->sc_irq_rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irq_rid, RF_ACTIVE); if (sc->sc_irq_res == NULL) { device_printf(dev, "cannot allocate ireq resource\n"); goto err_no_irq; } sc->sc_intr_cookie = NULL; if (bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE | INTR_TYPE_MISC, NULL, dwwdt_intr, sc, &sc->sc_intr_cookie) != 0) { device_printf(dev, "cannot setup interrupt routine\n"); goto err_no_intr; } if (clk_get_by_ofw_index(dev, 0, 0, &sc->sc_clk) != 0) { device_printf(dev, "cannot find clock\n"); goto err_no_clock; } if (clk_enable(sc->sc_clk) != 0) { device_printf(dev, "cannot enable clock\n"); goto err_no_freq; } if (clk_get_freq(sc->sc_clk, &sc->sc_clk_freq) != 0) { device_printf(dev, "cannot get clock frequency\n"); goto err_no_freq; } if (sc->sc_clk_freq == 0UL) goto err_no_freq; sc->sc_evtag = EVENTHANDLER_REGISTER(watchdog_list, dwwdt_event, sc, 0); sc->sc_status = DWWDT_STOPPED; return (bus_generic_attach(dev)); err_no_freq: clk_release(sc->sc_clk); err_no_clock: bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intr_cookie); err_no_intr: bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, sc->sc_irq_res); err_no_irq: bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); err_no_mem: return (ENXIO); } static int dwwdt_detach(device_t dev) { struct dwwdt_softc *sc = device_get_softc(dev); if (dwwdt_started(sc)) { /* * Once started it cannot be stopped. Prevent module unload * instead. */ return (EBUSY); } EVENTHANDLER_DEREGISTER(watchdog_list, sc->sc_evtag); sc->sc_evtag = NULL; if (sc->sc_clk != NULL) clk_release(sc->sc_clk); if (sc->sc_intr_cookie != NULL) bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intr_cookie); if (sc->sc_irq_res) { bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, sc->sc_irq_res); } if (sc->sc_mem_res) { bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); } return (bus_generic_detach(dev)); } static int dwwdt_shutdown(device_t dev) { struct dwwdt_softc *sc; sc = device_get_softc(dev); /* Prevent restarts during shutdown. */ dwwdt_prevent_restart = true; dwwdt_stop(sc); return (bus_generic_shutdown(dev)); } static device_method_t dwwdt_methods[] = { DEVMETHOD(device_probe, dwwdt_probe), DEVMETHOD(device_attach, dwwdt_attach), DEVMETHOD(device_detach, dwwdt_detach), DEVMETHOD(device_shutdown, dwwdt_shutdown), {0, 0} }; static driver_t dwwdt_driver = { "dwwdt", dwwdt_methods, sizeof(struct dwwdt_softc), }; DRIVER_MODULE(dwwdt, simplebus, dwwdt_driver, NULL, NULL); MODULE_VERSION(dwwdt, 1); OFWBUS_PNP_INFO(compat_data); diff --git a/sys/dev/eqos/if_eqos.c b/sys/dev/eqos/if_eqos.c index 75e3ca957b48..17c820a1b191 100644 --- a/sys/dev/eqos/if_eqos.c +++ b/sys/dev/eqos/if_eqos.c @@ -1,1291 +1,1291 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Soren Schmidt * Copyright (c) 2022 Jared McNeill * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: eqos.c 1059 2022-12-08 19:32:32Z sos $ */ /* * DesignWare Ethernet Quality-of-Service controller */ #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "miibus_if.h" #include "if_eqos_if.h" #ifdef FDT #include #include #include -#include +#include #endif #include #include #define DESC_BOUNDARY (1ULL << 32) #define DESC_ALIGN sizeof(struct eqos_dma_desc) #define DESC_OFFSET(n) ((n) * sizeof(struct eqos_dma_desc)) #define TX_DESC_COUNT EQOS_DMA_DESC_COUNT #define TX_DESC_SIZE (TX_DESC_COUNT * DESC_ALIGN) #define TX_MAX_SEGS (TX_DESC_COUNT / 2) #define TX_NEXT(n) (((n) + 1 ) % TX_DESC_COUNT) #define TX_QUEUED(h, t) ((((h) - (t)) + TX_DESC_COUNT) % TX_DESC_COUNT) #define RX_DESC_COUNT EQOS_DMA_DESC_COUNT #define RX_DESC_SIZE (RX_DESC_COUNT * DESC_ALIGN) #define RX_NEXT(n) (((n) + 1) % RX_DESC_COUNT) #define MII_BUSY_RETRY 1000 #define WATCHDOG_TIMEOUT_SECS 3 #define EQOS_LOCK(sc) mtx_lock(&(sc)->lock) #define EQOS_UNLOCK(sc) mtx_unlock(&(sc)->lock) #define EQOS_ASSERT_LOCKED(sc) mtx_assert(&(sc)->lock, MA_OWNED) #define RD4(sc, o) bus_read_4(sc->res[EQOS_RES_MEM], (o)) #define WR4(sc, o, v) bus_write_4(sc->res[EQOS_RES_MEM], (o), (v)) static struct resource_spec eqos_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; static void eqos_tick(void *softc); static int eqos_miibus_readreg(device_t dev, int phy, int reg) { struct eqos_softc *sc = device_get_softc(dev); uint32_t addr; int retry, val; addr = sc->csr_clock_range | (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) | (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) | GMAC_MAC_MDIO_ADDRESS_GOC_READ | GMAC_MAC_MDIO_ADDRESS_GB; WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr); DELAY(100); for (retry = MII_BUSY_RETRY; retry > 0; retry--) { addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS); if (!(addr & GMAC_MAC_MDIO_ADDRESS_GB)) { val = RD4(sc, GMAC_MAC_MDIO_DATA) & 0xFFFF; break; } DELAY(10); } if (!retry) { device_printf(dev, "phy read timeout, phy=%d reg=%d\n", phy, reg); return (ETIMEDOUT); } return (val); } static int eqos_miibus_writereg(device_t dev, int phy, int reg, int val) { struct eqos_softc *sc = device_get_softc(dev); uint32_t addr; int retry; WR4(sc, GMAC_MAC_MDIO_DATA, val); addr = sc->csr_clock_range | (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) | (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) | GMAC_MAC_MDIO_ADDRESS_GOC_WRITE | GMAC_MAC_MDIO_ADDRESS_GB; WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr); DELAY(100); for (retry = MII_BUSY_RETRY; retry > 0; retry--) { addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS); if (!(addr & GMAC_MAC_MDIO_ADDRESS_GB)) break; DELAY(10); } if (!retry) { device_printf(dev, "phy write timeout, phy=%d reg=%d\n", phy, reg); return (ETIMEDOUT); } return (0); } static void eqos_miibus_statchg(device_t dev) { struct eqos_softc *sc = device_get_softc(dev); struct mii_data *mii = device_get_softc(sc->miibus); uint32_t reg; EQOS_ASSERT_LOCKED(sc); if (mii->mii_media_status & IFM_ACTIVE) sc->link_up = true; else sc->link_up = false; reg = RD4(sc, GMAC_MAC_CONFIGURATION); switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: reg |= GMAC_MAC_CONFIGURATION_PS; reg &= ~GMAC_MAC_CONFIGURATION_FES; break; case IFM_100_TX: reg |= GMAC_MAC_CONFIGURATION_PS; reg |= GMAC_MAC_CONFIGURATION_FES; break; case IFM_1000_T: case IFM_1000_SX: reg &= ~GMAC_MAC_CONFIGURATION_PS; reg &= ~GMAC_MAC_CONFIGURATION_FES; break; case IFM_2500_T: case IFM_2500_SX: reg &= ~GMAC_MAC_CONFIGURATION_PS; reg |= GMAC_MAC_CONFIGURATION_FES; break; default: sc->link_up = false; return; } if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX)) reg |= GMAC_MAC_CONFIGURATION_DM; else reg &= ~GMAC_MAC_CONFIGURATION_DM; WR4(sc, GMAC_MAC_CONFIGURATION, reg); IF_EQOS_SET_SPEED(dev, IFM_SUBTYPE(mii->mii_media_active)); WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->csr_clock / 1000000) - 1); } static void eqos_media_status(if_t ifp, struct ifmediareq *ifmr) { struct eqos_softc *sc = if_getsoftc(ifp); struct mii_data *mii = device_get_softc(sc->miibus); EQOS_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; EQOS_UNLOCK(sc); } static int eqos_media_change(if_t ifp) { struct eqos_softc *sc = if_getsoftc(ifp); int error; EQOS_LOCK(sc); error = mii_mediachg(device_get_softc(sc->miibus)); EQOS_UNLOCK(sc); return (error); } static void eqos_setup_txdesc(struct eqos_softc *sc, int index, int flags, bus_addr_t paddr, u_int len, u_int total_len) { uint32_t tdes2, tdes3; if (!paddr || !len) { tdes2 = 0; tdes3 = flags; } else { tdes2 = (flags & EQOS_TDES3_LD) ? EQOS_TDES2_IOC : 0; tdes3 = flags; } bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE); sc->tx.desc_ring[index].des0 = htole32((uint32_t)paddr); sc->tx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32)); sc->tx.desc_ring[index].des2 = htole32(tdes2 | len); sc->tx.desc_ring[index].des3 = htole32(tdes3 | total_len); } static int eqos_setup_txbuf(struct eqos_softc *sc, struct mbuf *m) { bus_dma_segment_t segs[TX_MAX_SEGS]; int first = sc->tx.head; int error, nsegs, idx; uint32_t flags; error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, sc->tx.buf_map[first].map, m, segs, &nsegs, 0); if (error == EFBIG) { struct mbuf *mb; device_printf(sc->dev, "TX packet too big trying defrag\n"); bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map); if (!(mb = m_defrag(m, M_NOWAIT))) return (ENOMEM); m = mb; error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, sc->tx.buf_map[first].map, m, segs, &nsegs, 0); } if (error) return (ENOMEM); if (TX_QUEUED(sc->tx.head, sc->tx.tail) + nsegs > TX_DESC_COUNT) { bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map); device_printf(sc->dev, "TX packet no more queue space\n"); return (ENOMEM); } bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[first].map, BUS_DMASYNC_PREWRITE); sc->tx.buf_map[first].mbuf = m; for (flags = EQOS_TDES3_FD, idx = 0; idx < nsegs; idx++) { if (idx == (nsegs - 1)) flags |= EQOS_TDES3_LD; eqos_setup_txdesc(sc, sc->tx.head, flags, segs[idx].ds_addr, segs[idx].ds_len, m->m_pkthdr.len); flags &= ~EQOS_TDES3_FD; flags |= EQOS_TDES3_OWN; sc->tx.head = TX_NEXT(sc->tx.head); } /* * Defer setting OWN bit on the first descriptor * until all descriptors have been updated */ bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE); sc->tx.desc_ring[first].des3 |= htole32(EQOS_TDES3_OWN); return (0); } static void eqos_setup_rxdesc(struct eqos_softc *sc, int index, bus_addr_t paddr) { sc->rx.desc_ring[index].des0 = htole32((uint32_t)paddr); sc->rx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32)); sc->rx.desc_ring[index].des2 = htole32(0); bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_PREWRITE); sc->rx.desc_ring[index].des3 = htole32(EQOS_RDES3_OWN | EQOS_RDES3_IOC | EQOS_RDES3_BUF1V); } static int eqos_setup_rxbuf(struct eqos_softc *sc, int index, struct mbuf *m) { struct bus_dma_segment seg; int error, nsegs; m_adj(m, ETHER_ALIGN); error = bus_dmamap_load_mbuf_sg(sc->rx.buf_tag, sc->rx.buf_map[index].map, m, &seg, &nsegs, 0); if (error) return (error); bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, BUS_DMASYNC_PREREAD); sc->rx.buf_map[index].mbuf = m; eqos_setup_rxdesc(sc, index, seg.ds_addr); return (0); } static struct mbuf * eqos_alloc_mbufcl(struct eqos_softc *sc) { struct mbuf *m; if ((m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR))) m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; return (m); } static void eqos_enable_intr(struct eqos_softc *sc) { WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, GMAC_DMA_CHAN0_INTR_ENABLE_NIE | GMAC_DMA_CHAN0_INTR_ENABLE_AIE | GMAC_DMA_CHAN0_INTR_ENABLE_FBE | GMAC_DMA_CHAN0_INTR_ENABLE_RIE | GMAC_DMA_CHAN0_INTR_ENABLE_TIE); } static void eqos_disable_intr(struct eqos_softc *sc) { WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, 0); } static uint32_t eqos_bitrev32(uint32_t x) { x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); return ((x >> 16) | (x << 16)); } static u_int eqos_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint32_t crc, *hash = arg; crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN); crc &= 0x7f; crc = eqos_bitrev32(~crc) >> 26; hash[crc >> 5] |= 1 << (crc & 0x1f); return (1); } static void eqos_setup_rxfilter(struct eqos_softc *sc) { if_t ifp = sc->ifp; uint32_t pfil, hash[2]; const uint8_t *eaddr; uint32_t val; EQOS_ASSERT_LOCKED(sc); pfil = RD4(sc, GMAC_MAC_PACKET_FILTER); pfil &= ~(GMAC_MAC_PACKET_FILTER_PR | GMAC_MAC_PACKET_FILTER_PM | GMAC_MAC_PACKET_FILTER_HMC | GMAC_MAC_PACKET_FILTER_PCF_MASK); hash[0] = hash[1] = 0xffffffff; if ((if_getflags(ifp) & IFF_PROMISC)) { pfil |= GMAC_MAC_PACKET_FILTER_PR | GMAC_MAC_PACKET_FILTER_PCF_ALL; } else if ((if_getflags(ifp) & IFF_ALLMULTI)) { pfil |= GMAC_MAC_PACKET_FILTER_PM; } else { hash[0] = hash[1] = 0; pfil |= GMAC_MAC_PACKET_FILTER_HMC; if_foreach_llmaddr(ifp, eqos_hash_maddr, hash); } /* Write our unicast address */ eaddr = if_getlladdr(ifp); val = eaddr[4] | (eaddr[5] << 8); WR4(sc, GMAC_MAC_ADDRESS0_HIGH, val); val = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) | (eaddr[3] << 24); WR4(sc, GMAC_MAC_ADDRESS0_LOW, val); /* Multicast hash filters */ WR4(sc, GMAC_MAC_HASH_TABLE_REG0, hash[1]); WR4(sc, GMAC_MAC_HASH_TABLE_REG1, hash[0]); /* Packet filter config */ WR4(sc, GMAC_MAC_PACKET_FILTER, pfil); } static int eqos_reset(struct eqos_softc *sc) { uint32_t val; int retry; WR4(sc, GMAC_DMA_MODE, GMAC_DMA_MODE_SWR); for (retry = 2000; retry > 0; retry--) { DELAY(1000); val = RD4(sc, GMAC_DMA_MODE); if (!(val & GMAC_DMA_MODE_SWR)) return (0); } return (ETIMEDOUT); } static void eqos_init_rings(struct eqos_softc *sc) { WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR_HI, (uint32_t)(sc->tx.desc_ring_paddr >> 32)); WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR, (uint32_t)sc->tx.desc_ring_paddr); WR4(sc, GMAC_DMA_CHAN0_TX_RING_LEN, TX_DESC_COUNT - 1); WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR_HI, (uint32_t)(sc->rx.desc_ring_paddr >> 32)); WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR, (uint32_t)sc->rx.desc_ring_paddr); WR4(sc, GMAC_DMA_CHAN0_RX_RING_LEN, RX_DESC_COUNT - 1); WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR, (uint32_t)sc->rx.desc_ring_paddr + DESC_OFFSET(RX_DESC_COUNT)); } static void eqos_init(void *if_softc) { struct eqos_softc *sc = if_softc; if_t ifp = sc->ifp; struct mii_data *mii = device_get_softc(sc->miibus); uint32_t val; if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) return; EQOS_LOCK(sc); eqos_init_rings(sc); eqos_setup_rxfilter(sc); WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->csr_clock / 1000000) - 1); /* Enable transmit and receive DMA */ val = RD4(sc, GMAC_DMA_CHAN0_CONTROL); val &= ~GMAC_DMA_CHAN0_CONTROL_DSL_MASK; val |= ((DESC_ALIGN - 16) / 8) << GMAC_DMA_CHAN0_CONTROL_DSL_SHIFT; val |= GMAC_DMA_CHAN0_CONTROL_PBLX8; WR4(sc, GMAC_DMA_CHAN0_CONTROL, val); val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL); val |= GMAC_DMA_CHAN0_TX_CONTROL_OSP; val |= GMAC_DMA_CHAN0_TX_CONTROL_START; WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val); val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL); val &= ~GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_MASK; val |= (MCLBYTES << GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_SHIFT); val |= GMAC_DMA_CHAN0_RX_CONTROL_START; WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val); /* Disable counters */ WR4(sc, GMAC_MMC_CONTROL, GMAC_MMC_CONTROL_CNTFREEZ | GMAC_MMC_CONTROL_CNTPRST | GMAC_MMC_CONTROL_CNTPRSTLVL); /* Configure operation modes */ WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, GMAC_MTL_TXQ0_OPERATION_MODE_TSF | GMAC_MTL_TXQ0_OPERATION_MODE_TXQEN_EN); WR4(sc, GMAC_MTL_RXQ0_OPERATION_MODE, GMAC_MTL_RXQ0_OPERATION_MODE_RSF | GMAC_MTL_RXQ0_OPERATION_MODE_FEP | GMAC_MTL_RXQ0_OPERATION_MODE_FUP); /* Enable flow control */ val = RD4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL); val |= 0xFFFFU << GMAC_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT; val |= GMAC_MAC_Q0_TX_FLOW_CTRL_TFE; WR4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL, val); val = RD4(sc, GMAC_MAC_RX_FLOW_CTRL); val |= GMAC_MAC_RX_FLOW_CTRL_RFE; WR4(sc, GMAC_MAC_RX_FLOW_CTRL, val); /* set RX queue mode. must be in DCB mode. */ WR4(sc, GMAC_RXQ_CTRL0, (GMAC_RXQ_CTRL0_EN_MASK << 16) | GMAC_RXQ_CTRL0_EN_DCB); /* Enable transmitter and receiver */ val = RD4(sc, GMAC_MAC_CONFIGURATION); val |= GMAC_MAC_CONFIGURATION_BE; val |= GMAC_MAC_CONFIGURATION_JD; val |= GMAC_MAC_CONFIGURATION_JE; val |= GMAC_MAC_CONFIGURATION_DCRS; val |= GMAC_MAC_CONFIGURATION_TE; val |= GMAC_MAC_CONFIGURATION_RE; WR4(sc, GMAC_MAC_CONFIGURATION, val); eqos_enable_intr(sc); if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); mii_mediachg(mii); callout_reset(&sc->callout, hz, eqos_tick, sc); EQOS_UNLOCK(sc); } static void eqos_start_locked(if_t ifp) { struct eqos_softc *sc = if_getsoftc(ifp); struct mbuf *m; int pending = 0; if (!sc->link_up) return; if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; while (true) { if (TX_QUEUED(sc->tx.head, sc->tx.tail) >= TX_DESC_COUNT - TX_MAX_SEGS) { if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); break; } if (!(m = if_dequeue(ifp))) break; if (eqos_setup_txbuf(sc, m)) { if_sendq_prepend(ifp, m); if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); break; } bpf_mtap_if(ifp, m); pending++; } if (pending) { bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Start and run TX DMA */ WR4(sc, GMAC_DMA_CHAN0_TX_END_ADDR, (uint32_t)sc->tx.desc_ring_paddr + DESC_OFFSET(sc->tx.head)); sc->tx_watchdog = WATCHDOG_TIMEOUT_SECS; } } static void eqos_start(if_t ifp) { struct eqos_softc *sc = if_getsoftc(ifp); EQOS_LOCK(sc); eqos_start_locked(ifp); EQOS_UNLOCK(sc); } static void eqos_stop(struct eqos_softc *sc) { if_t ifp = sc->ifp; uint32_t val; int retry; EQOS_LOCK(sc); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE); callout_stop(&sc->callout); /* Disable receiver */ val = RD4(sc, GMAC_MAC_CONFIGURATION); val &= ~GMAC_MAC_CONFIGURATION_RE; WR4(sc, GMAC_MAC_CONFIGURATION, val); /* Stop receive DMA */ val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL); val &= ~GMAC_DMA_CHAN0_RX_CONTROL_START; WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val); /* Stop transmit DMA */ val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL); val &= ~GMAC_DMA_CHAN0_TX_CONTROL_START; WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val); /* Flush data in the TX FIFO */ val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); val |= GMAC_MTL_TXQ0_OPERATION_MODE_FTQ; WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, val); for (retry = 10000; retry > 0; retry--) { val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); if (!(val & GMAC_MTL_TXQ0_OPERATION_MODE_FTQ)) break; DELAY(10); } if (!retry) device_printf(sc->dev, "timeout flushing TX queue\n"); /* Disable transmitter */ val = RD4(sc, GMAC_MAC_CONFIGURATION); val &= ~GMAC_MAC_CONFIGURATION_TE; WR4(sc, GMAC_MAC_CONFIGURATION, val); eqos_disable_intr(sc); EQOS_UNLOCK(sc); } static void eqos_rxintr(struct eqos_softc *sc) { if_t ifp = sc->ifp; struct mbuf *m; uint32_t rdes3; int error, length; while (true) { rdes3 = le32toh(sc->rx.desc_ring[sc->rx.head].des3); if ((rdes3 & EQOS_RDES3_OWN)) break; if (rdes3 & (EQOS_RDES3_OE | EQOS_RDES3_RE)) printf("Receive errer rdes3=%08x\n", rdes3); bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[sc->rx.head].map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rx.buf_tag, sc->rx.buf_map[sc->rx.head].map); length = rdes3 & EQOS_RDES3_LENGTH_MASK; if (length) { m = sc->rx.buf_map[sc->rx.head].mbuf; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = length; m->m_len = length; m->m_nextpkt = NULL; /* Remove trailing FCS */ m_adj(m, -ETHER_CRC_LEN); EQOS_UNLOCK(sc); if_input(ifp, m); EQOS_LOCK(sc); } if ((m = eqos_alloc_mbufcl(sc))) { if ((error = eqos_setup_rxbuf(sc, sc->rx.head, m))) printf("ERROR: Hole in RX ring!!\n"); } else if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR, (uint32_t)sc->rx.desc_ring_paddr + DESC_OFFSET(sc->rx.head)); sc->rx.head = RX_NEXT(sc->rx.head); } } static void eqos_txintr(struct eqos_softc *sc) { if_t ifp = sc->ifp; struct eqos_bufmap *bmap; uint32_t tdes3; EQOS_ASSERT_LOCKED(sc); while (sc->tx.tail != sc->tx.head) { tdes3 = le32toh(sc->tx.desc_ring[sc->tx.tail].des3); if ((tdes3 & EQOS_TDES3_OWN)) break; bmap = &sc->tx.buf_map[sc->tx.tail]; if (bmap->mbuf) { bus_dmamap_sync(sc->tx.buf_tag, bmap->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->tx.buf_tag, bmap->map); m_freem(bmap->mbuf); bmap->mbuf = NULL; } eqos_setup_txdesc(sc, sc->tx.tail, 0, 0, 0, 0); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); /* Last descriptor in a packet contains DMA status */ if ((tdes3 & EQOS_TDES3_LD)) { if ((tdes3 & EQOS_TDES3_DE)) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else if ((tdes3 & EQOS_TDES3_ES)) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else { if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } } sc->tx.tail = TX_NEXT(sc->tx.tail); } if (sc->tx.tail == sc->tx.head) sc->tx_watchdog = 0; eqos_start_locked(sc->ifp); } static void eqos_intr_mtl(struct eqos_softc *sc, uint32_t mtl_status) { uint32_t mtl_istat = 0; if ((mtl_status & GMAC_MTL_INTERRUPT_STATUS_Q0IS)) { uint32_t mtl_clear = 0; mtl_istat = RD4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS); if ((mtl_istat & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS)) { mtl_clear |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS; } if ((mtl_istat & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS)) { mtl_clear |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS; } if (mtl_clear) { mtl_clear |= (mtl_istat & (GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOIE | GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUIE)); WR4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS, mtl_clear); } } if (bootverbose) device_printf(sc->dev, "GMAC_MTL_INTERRUPT_STATUS = 0x%08X, " "GMAC_MTL_INTERRUPT_STATUS_Q0IS = 0x%08X\n", mtl_status, mtl_istat); } static void eqos_tick(void *softc) { struct eqos_softc *sc = softc; struct mii_data *mii = device_get_softc(sc->miibus); bool link_status; EQOS_ASSERT_LOCKED(sc); if (sc->tx_watchdog > 0) if (!--sc->tx_watchdog) { device_printf(sc->dev, "watchdog timeout\n"); eqos_txintr(sc); } link_status = sc->link_up; mii_tick(mii); if (sc->link_up && !link_status) eqos_start_locked(sc->ifp); callout_reset(&sc->callout, hz, eqos_tick, sc); } static void eqos_intr(void *arg) { struct eqos_softc *sc = arg; uint32_t mac_status, mtl_status, dma_status, rx_tx_status; mac_status = RD4(sc, GMAC_MAC_INTERRUPT_STATUS); mac_status &= RD4(sc, GMAC_MAC_INTERRUPT_ENABLE); if (mac_status) device_printf(sc->dev, "MAC interrupt\n"); if ((mtl_status = RD4(sc, GMAC_MTL_INTERRUPT_STATUS))) eqos_intr_mtl(sc, mtl_status); dma_status = RD4(sc, GMAC_DMA_CHAN0_STATUS); dma_status &= RD4(sc, GMAC_DMA_CHAN0_INTR_ENABLE); if (dma_status) WR4(sc, GMAC_DMA_CHAN0_STATUS, dma_status); EQOS_LOCK(sc); if (dma_status & GMAC_DMA_CHAN0_STATUS_RI) eqos_rxintr(sc); if (dma_status & GMAC_DMA_CHAN0_STATUS_TI) eqos_txintr(sc); EQOS_UNLOCK(sc); if (!(mac_status | mtl_status | dma_status)) { device_printf(sc->dev, "spurious interrupt mac=%08x mtl=%08x dma=%08x\n", RD4(sc, GMAC_MAC_INTERRUPT_STATUS), RD4(sc, GMAC_MTL_INTERRUPT_STATUS), RD4(sc, GMAC_DMA_CHAN0_STATUS)); } if ((rx_tx_status = RD4(sc, GMAC_MAC_RX_TX_STATUS))) device_printf(sc->dev, "RX/TX status interrupt\n"); } static int eqos_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct eqos_softc *sc = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; int flags, mask; int error = 0; switch (cmd) { case SIOCSIFFLAGS: if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { flags = if_getflags(ifp); if ((flags & (IFF_PROMISC|IFF_ALLMULTI))) { EQOS_LOCK(sc); eqos_setup_rxfilter(sc); EQOS_UNLOCK(sc); } } else { eqos_init(sc); } } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) eqos_stop(sc); } break; case SIOCADDMULTI: case SIOCDELMULTI: if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { EQOS_LOCK(sc); eqos_setup_rxfilter(sc); EQOS_UNLOCK(sc); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: mii = device_get_softc(sc->miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); if (mask & IFCAP_VLAN_MTU) if_togglecapenable(ifp, IFCAP_VLAN_MTU); if (mask & IFCAP_RXCSUM) if_togglecapenable(ifp, IFCAP_RXCSUM); if (mask & IFCAP_TXCSUM) if_togglecapenable(ifp, IFCAP_TXCSUM); if ((if_getcapenable(ifp) & IFCAP_TXCSUM)) if_sethwassistbits(ifp, CSUM_IP | CSUM_UDP | CSUM_TCP, 0); else if_sethwassistbits(ifp, 0, CSUM_IP | CSUM_UDP | CSUM_TCP); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static void eqos_get_eaddr(struct eqos_softc *sc, uint8_t *eaddr) { uint32_t maclo, machi; maclo = htobe32(RD4(sc, GMAC_MAC_ADDRESS0_LOW)); machi = htobe16(RD4(sc, GMAC_MAC_ADDRESS0_HIGH) & 0xFFFF); /* if no valid MAC address generate random */ if (maclo == 0xffffffff && machi == 0xffff) { maclo = 0xf2 | (arc4random() & 0xffff0000); machi = arc4random() & 0x0000ffff; } eaddr[0] = maclo & 0xff; eaddr[1] = (maclo >> 8) & 0xff; eaddr[2] = (maclo >> 16) & 0xff; eaddr[3] = (maclo >> 24) & 0xff; eaddr[4] = machi & 0xff; eaddr[5] = (machi >> 8) & 0xff; } static void eqos_axi_configure(struct eqos_softc *sc) { uint32_t val; val = RD4(sc, GMAC_DMA_SYSBUS_MODE); /* Max Write Outstanding Req Limit */ val &= ~GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_MASK; val |= 0x03 << GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_SHIFT; /* Max Read Outstanding Req Limit */ val &= ~GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK; val |= 0x07 << GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT; /* Allowed Burst Length's */ val |= GMAC_DMA_SYSBUS_MODE_BLEN16; val |= GMAC_DMA_SYSBUS_MODE_BLEN8; val |= GMAC_DMA_SYSBUS_MODE_BLEN4; /* Fixed Burst Length */ val |= GMAC_DMA_SYSBUS_MODE_MB; WR4(sc, GMAC_DMA_SYSBUS_MODE, val); } static void eqos_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (!error) *(bus_addr_t *)arg = segs[0].ds_addr; } static int eqos_setup_dma(struct eqos_softc *sc) { struct mbuf *m; int error, i; /* Set up TX descriptor ring, descriptors, and dma maps */ if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), DESC_ALIGN, DESC_BOUNDARY, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, TX_DESC_SIZE, 1, TX_DESC_SIZE, 0, NULL, NULL, &sc->tx.desc_tag))) { device_printf(sc->dev, "could not create TX ring DMA tag\n"); return (error); } if ((error = bus_dmamem_alloc(sc->tx.desc_tag, (void**)&sc->tx.desc_ring, BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->tx.desc_map))) { device_printf(sc->dev, "could not allocate TX descriptor ring.\n"); return (error); } if ((error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map, sc->tx.desc_ring, TX_DESC_SIZE, eqos_get1paddr, &sc->tx.desc_ring_paddr, 0))) { device_printf(sc->dev, "could not load TX descriptor ring map.\n"); return (error); } if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES*TX_MAX_SEGS, TX_MAX_SEGS, MCLBYTES, 0, NULL, NULL, &sc->tx.buf_tag))) { device_printf(sc->dev, "could not create TX buffer DMA tag.\n"); return (error); } for (i = 0; i < TX_DESC_COUNT; i++) { if ((error = bus_dmamap_create(sc->tx.buf_tag, BUS_DMA_COHERENT, &sc->tx.buf_map[i].map))) { device_printf(sc->dev, "cannot create TX buffer map\n"); return (error); } eqos_setup_txdesc(sc, i, EQOS_TDES3_OWN, 0, 0, 0); } /* Set up RX descriptor ring, descriptors, dma maps, and mbufs */ if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), DESC_ALIGN, DESC_BOUNDARY, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, RX_DESC_SIZE, 1, RX_DESC_SIZE, 0, NULL, NULL, &sc->rx.desc_tag))) { device_printf(sc->dev, "could not create RX ring DMA tag.\n"); return (error); } if ((error = bus_dmamem_alloc(sc->rx.desc_tag, (void **)&sc->rx.desc_ring, BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rx.desc_map))) { device_printf(sc->dev, "could not allocate RX descriptor ring.\n"); return (error); } if ((error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map, sc->rx.desc_ring, RX_DESC_SIZE, eqos_get1paddr, &sc->rx.desc_ring_paddr, 0))) { device_printf(sc->dev, "could not load RX descriptor ring map.\n"); return (error); } if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rx.buf_tag))) { device_printf(sc->dev, "could not create RX buf DMA tag.\n"); return (error); } for (i = 0; i < RX_DESC_COUNT; i++) { if ((error = bus_dmamap_create(sc->rx.buf_tag, BUS_DMA_COHERENT, &sc->rx.buf_map[i].map))) { device_printf(sc->dev, "cannot create RX buffer map\n"); return (error); } if (!(m = eqos_alloc_mbufcl(sc))) { device_printf(sc->dev, "cannot allocate RX mbuf\n"); return (ENOMEM); } if ((error = eqos_setup_rxbuf(sc, i, m))) { device_printf(sc->dev, "cannot create RX buffer\n"); return (error); } } if (bootverbose) device_printf(sc->dev, "TX ring @ 0x%lx, RX ring @ 0x%lx\n", sc->tx.desc_ring_paddr, sc->rx.desc_ring_paddr); return (0); } static int eqos_attach(device_t dev) { struct eqos_softc *sc = device_get_softc(dev); if_t ifp; uint32_t ver; uint8_t eaddr[ETHER_ADDR_LEN]; u_int userver, snpsver; int error; int n; /* setup resources */ if (bus_alloc_resources(dev, eqos_spec, sc->res)) { device_printf(dev, "Could not allocate resources\n"); bus_release_resources(dev, eqos_spec, sc->res); return (ENXIO); } if ((error = IF_EQOS_INIT(dev))) return (error); sc->dev = dev; ver = RD4(sc, GMAC_MAC_VERSION); userver = (ver & GMAC_MAC_VERSION_USERVER_MASK) >> GMAC_MAC_VERSION_USERVER_SHIFT; snpsver = ver & GMAC_MAC_VERSION_SNPSVER_MASK; if (snpsver != 0x51) { device_printf(dev, "EQOS version 0x%02x not supported\n", snpsver); return (ENXIO); } for (n = 0; n < 4; n++) sc->hw_feature[n] = RD4(sc, GMAC_MAC_HW_FEATURE(n)); if (bootverbose) { device_printf(dev, "DesignWare EQOS ver 0x%02x (0x%02x)\n", snpsver, userver); device_printf(dev, "hw features %08x %08x %08x %08x\n", sc->hw_feature[0], sc->hw_feature[1], sc->hw_feature[2], sc->hw_feature[3]); } mtx_init(&sc->lock, "eqos lock", MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->callout, &sc->lock, 0); eqos_get_eaddr(sc, eaddr); if (bootverbose) device_printf(sc->dev, "Ethernet address %6D\n", eaddr, ":"); /* Soft reset EMAC core */ if ((error = eqos_reset(sc))) { device_printf(sc->dev, "reset timeout!\n"); return (error); } /* Configure AXI Bus mode parameters */ eqos_axi_configure(sc); /* Setup DMA descriptors */ if (eqos_setup_dma(sc)) { device_printf(sc->dev, "failed to setup DMA descriptors\n"); return (EINVAL); } /* setup interrupt delivery */ if ((bus_setup_intr(dev, sc->res[EQOS_RES_IRQ0], EQOS_INTR_FLAGS, NULL, eqos_intr, sc, &sc->irq_handle))) { device_printf(dev, "unable to setup 1st interrupt\n"); bus_release_resources(dev, eqos_spec, sc->res); return (ENXIO); } /* Setup ethernet interface */ ifp = sc->ifp = if_alloc(IFT_ETHER); if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setstartfn(ifp, eqos_start); if_setioctlfn(ifp, eqos_ioctl); if_setinitfn(ifp, eqos_init); if_setsendqlen(ifp, TX_DESC_COUNT - 1); if_setsendqready(ifp); if_setcapabilities(ifp, IFCAP_VLAN_MTU /*| IFCAP_HWCSUM*/); if_setcapenable(ifp, if_getcapabilities(ifp)); /* Attach MII driver */ if ((error = mii_attach(sc->dev, &sc->miibus, ifp, eqos_media_change, eqos_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0))) { device_printf(sc->dev, "PHY attach failed\n"); return (ENXIO); } /* Attach ethernet interface */ ether_ifattach(ifp, eaddr); return (0); } static int eqos_detach(device_t dev) { struct eqos_softc *sc = device_get_softc(dev); int i; if (device_is_attached(dev)) { EQOS_LOCK(sc); eqos_stop(sc); EQOS_UNLOCK(sc); if_setflagbits(sc->ifp, 0, IFF_UP); ether_ifdetach(sc->ifp); } if (sc->miibus) device_delete_child(dev, sc->miibus); bus_generic_detach(dev); if (sc->irq_handle) bus_teardown_intr(dev, sc->res[EQOS_RES_IRQ0], sc->irq_handle); if (sc->ifp) if_free(sc->ifp); bus_release_resources(dev, eqos_spec, sc->res); if (sc->tx.desc_tag) { if (sc->tx.desc_map) { bus_dmamap_unload(sc->tx.desc_tag, sc->tx.desc_map); bus_dmamem_free(sc->tx.desc_tag, sc->tx.desc_ring, sc->tx.desc_map); } bus_dma_tag_destroy(sc->tx.desc_tag); } if (sc->tx.buf_tag) { for (i = 0; i < TX_DESC_COUNT; i++) { m_free(sc->tx.buf_map[i].mbuf); bus_dmamap_destroy(sc->tx.buf_tag, sc->tx.buf_map[i].map); } bus_dma_tag_destroy(sc->tx.buf_tag); } if (sc->rx.desc_tag) { if (sc->rx.desc_map) { bus_dmamap_unload(sc->rx.desc_tag, sc->rx.desc_map); bus_dmamem_free(sc->rx.desc_tag, sc->rx.desc_ring, sc->rx.desc_map); } bus_dma_tag_destroy(sc->rx.desc_tag); } if (sc->rx.buf_tag) { for (i = 0; i < RX_DESC_COUNT; i++) { m_free(sc->rx.buf_map[i].mbuf); bus_dmamap_destroy(sc->rx.buf_tag, sc->rx.buf_map[i].map); } bus_dma_tag_destroy(sc->rx.buf_tag); } mtx_destroy(&sc->lock); return (0); } static device_method_t eqos_methods[] = { /* Device Interface */ DEVMETHOD(device_attach, eqos_attach), DEVMETHOD(device_detach, eqos_detach), /* MII Interface */ DEVMETHOD(miibus_readreg, eqos_miibus_readreg), DEVMETHOD(miibus_writereg, eqos_miibus_writereg), DEVMETHOD(miibus_statchg, eqos_miibus_statchg), DEVMETHOD_END }; driver_t eqos_driver = { "eqos", eqos_methods, sizeof(struct eqos_softc), }; DRIVER_MODULE(miibus, eqos, miibus_driver, 0, 0); diff --git a/sys/dev/eqos/if_eqos_fdt.c b/sys/dev/eqos/if_eqos_fdt.c index 68fcbc1ea706..9c36f658bad1 100644 --- a/sys/dev/eqos/if_eqos_fdt.c +++ b/sys/dev/eqos/if_eqos_fdt.c @@ -1,307 +1,307 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Soren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: eqos_fdt.c 1049 2022-12-03 14:25:46Z sos $ */ #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include "if_eqos_if.h" #include "syscon_if.h" #include "gpio_if.h" #include "rk_otp_if.h" #define RK356XGMAC0 0xfe2a0000 #define RK356XGMAC1 0xfe010000 #define RK3588GMAC0 0xfe1b0000 #define RK3588GMAC1 0xfe1c0000 #define EQOS_GRF_GMAC0 0x0380 #define EQOS_GRF_GMAC1 0x0388 #define EQOS_CON0_OFFSET 0 #define EQOS_CON1_OFFSET 4 #define EQOS_GMAC_PHY_INTF_SEL_RGMII 0x00fc0010 #define EQOS_GMAC_PHY_INTF_SEL_RMII 0x00fc0040 #define EQOS_GMAC_RXCLK_DLY_ENABLE 0x00020002 #define EQOS_GMAC_RXCLK_DLY_DISABLE 0x00020000 #define EQOS_GMAC_TXCLK_DLY_ENABLE 0x00010001 #define EQOS_GMAC_TXCLK_DLY_DISABLE 0x00010000 #define EQOS_GMAC_CLK_RX_DL_CFG(val) (0x7f000000 | val << 8) #define EQOS_GMAC_CLK_TX_DL_CFG(val) (0x007f0000 | val) #define WR4(sc, o, v) bus_write_4(sc->res[EQOS_RES_MEM], (o), (v)) static const struct ofw_compat_data compat_data[] = { {"snps,dwmac-4.20a", 1}, { NULL, 0 } }; static int eqos_phy_reset(device_t dev) { pcell_t gpio_prop[4]; pcell_t delay_prop[3]; phandle_t node, gpio_node; device_t gpio; uint32_t pin, flags; uint32_t pin_value; node = ofw_bus_get_node(dev); if (OF_getencprop(node, "snps,reset-gpio", gpio_prop, sizeof(gpio_prop)) <= 0) return (0); if (OF_getencprop(node, "snps,reset-delays-us", delay_prop, sizeof(delay_prop)) <= 0) { device_printf(dev, "Wrong property for snps,reset-delays-us"); return (ENXIO); } gpio_node = OF_node_from_xref(gpio_prop[0]); if ((gpio = OF_device_from_xref(gpio_prop[0])) == NULL) { device_printf(dev, "Can't find gpio controller for phy reset\n"); return (ENXIO); } if (GPIO_MAP_GPIOS(gpio, node, gpio_node, nitems(gpio_prop) - 1, gpio_prop + 1, &pin, &flags) != 0) { device_printf(dev, "Can't map gpio for phy reset\n"); return (ENXIO); } pin_value = GPIO_PIN_LOW; if (OF_hasprop(node, "snps,reset-active-low")) pin_value = GPIO_PIN_HIGH; GPIO_PIN_SETFLAGS(gpio, pin, GPIO_PIN_OUTPUT); GPIO_PIN_SET(gpio, pin, pin_value); DELAY(delay_prop[0]); GPIO_PIN_SET(gpio, pin, !pin_value); DELAY(delay_prop[1]); GPIO_PIN_SET(gpio, pin, pin_value); DELAY(delay_prop[2]); return (0); } static int eqos_fdt_init(device_t dev) { struct eqos_softc *sc = device_get_softc(dev); phandle_t node = ofw_bus_get_node(dev); hwreset_t eqos_reset; regulator_t eqos_supply; uint32_t rx_delay, tx_delay; uint8_t buffer[16]; clk_t stmmaceth, mac_clk_rx, mac_clk_tx, aclk_mac, pclk_mac; uint64_t freq; int error; if (OF_hasprop(node, "rockchip,grf") && syscon_get_by_ofw_property(dev, node, "rockchip,grf", &sc->grf)) { device_printf(dev, "cannot get grf driver handle\n"); return (ENXIO); } /* figure out if gmac0 or gmac1 offset */ switch (rman_get_start(sc->res[EQOS_RES_MEM])) { case RK356XGMAC0: /* RK356X gmac0 */ sc->grf_offset = EQOS_GRF_GMAC0; break; case RK356XGMAC1: /* RK356X gmac1 */ sc->grf_offset = EQOS_GRF_GMAC1; break; case RK3588GMAC0: /* RK3588 gmac0 */ case RK3588GMAC1: /* RK3588 gmac1 */ default: device_printf(dev, "Unknown eqos address\n"); return (ENXIO); } if (hwreset_get_by_ofw_idx(dev, node, 0, &eqos_reset)) { device_printf(dev, "cannot get reset\n"); return (ENXIO); } hwreset_assert(eqos_reset); error = clk_set_assigned(dev, ofw_bus_get_node(dev)); if (error != 0) { device_printf(dev, "clk_set_assigned failed\n"); return (error); } if (clk_get_by_ofw_name(dev, 0, "stmmaceth", &stmmaceth) == 0) { error = clk_enable(stmmaceth); if (error != 0) { device_printf(dev, "could not enable main clock\n"); return (error); } if (bootverbose) { clk_get_freq(stmmaceth, &freq); device_printf(dev, "MAC clock(%s) freq: %jd\n", clk_get_name(stmmaceth), (intmax_t)freq); } } else { device_printf(dev, "could not find clock stmmaceth\n"); } if (clk_get_by_ofw_name(dev, 0, "mac_clk_rx", &mac_clk_rx) != 0) { device_printf(dev, "could not get mac_clk_rx clock\n"); mac_clk_rx = NULL; } if (clk_get_by_ofw_name(dev, 0, "mac_clk_tx", &mac_clk_tx) != 0) { device_printf(dev, "could not get mac_clk_tx clock\n"); mac_clk_tx = NULL; } if (clk_get_by_ofw_name(dev, 0, "aclk_mac", &aclk_mac) != 0) { device_printf(dev, "could not get aclk_mac clock\n"); aclk_mac = NULL; } if (clk_get_by_ofw_name(dev, 0, "pclk_mac", &pclk_mac) != 0) { device_printf(dev, "could not get pclk_mac clock\n"); pclk_mac = NULL; } if (aclk_mac) clk_enable(aclk_mac); if (pclk_mac) clk_enable(pclk_mac); if (mac_clk_tx) clk_enable(mac_clk_tx); sc->csr_clock = 125000000; sc->csr_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_100_150; if (OF_getencprop(node, "tx_delay", &tx_delay, sizeof(tx_delay)) <= 0) tx_delay = 0x30; if (OF_getencprop(node, "rx_delay", &rx_delay, sizeof(rx_delay)) <= 0) rx_delay = 0x10; SYSCON_WRITE_4(sc->grf, sc->grf_offset + EQOS_CON0_OFFSET, EQOS_GMAC_CLK_RX_DL_CFG(rx_delay) | EQOS_GMAC_CLK_TX_DL_CFG(tx_delay)); SYSCON_WRITE_4(sc->grf, sc->grf_offset + EQOS_CON1_OFFSET, EQOS_GMAC_PHY_INTF_SEL_RGMII | EQOS_GMAC_RXCLK_DLY_ENABLE | EQOS_GMAC_TXCLK_DLY_ENABLE); if (!regulator_get_by_ofw_property(dev, 0, "phy-supply", &eqos_supply)) { if (regulator_enable(eqos_supply)) device_printf(dev, "cannot enable 'phy' regulator\n"); } else device_printf(dev, "no phy-supply property\n"); if (eqos_phy_reset(dev)) return (ENXIO); if (eqos_reset) hwreset_deassert(eqos_reset); /* set the MAC address if we have OTP data handy */ if (!RK_OTP_READ(dev, buffer, 0, sizeof(buffer))) { uint32_t mac; mac = hash32_buf(buffer, sizeof(buffer), HASHINIT); WR4(sc, GMAC_MAC_ADDRESS0_LOW, htobe32((mac & 0xffffff00) | 0x22)); mac = hash32_buf(buffer, sizeof(buffer), mac); WR4(sc, GMAC_MAC_ADDRESS0_HIGH, htobe16((mac & 0x0000ffff) + (device_get_unit(dev) << 8))); } return (0); } static int eqos_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "DesignWare EQOS Gigabit ethernet"); return (BUS_PROBE_DEFAULT); } static device_method_t eqos_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, eqos_fdt_probe), /* EQOS interface */ DEVMETHOD(if_eqos_init, eqos_fdt_init), DEVMETHOD_END }; DEFINE_CLASS_1(eqos, eqos_fdt_driver, eqos_fdt_methods, sizeof(struct eqos_softc), eqos_driver); DRIVER_MODULE(eqos, simplebus, eqos_fdt_driver, 0, 0); MODULE_DEPEND(eqos, ether, 1, 1, 1); MODULE_DEPEND(eqos, miibus, 1, 1, 1); diff --git a/sys/dev/etherswitch/ar40xx/ar40xx_hw.c b/sys/dev/etherswitch/ar40xx/ar40xx_hw.c index 4e1f803a92fe..c017419be0ff 100644 --- a/sys/dev/etherswitch/ar40xx/ar40xx_hw.c +++ b/sys/dev/etherswitch/ar40xx/ar40xx_hw.c @@ -1,357 +1,357 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Adrian Chadd . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include /* * XXX these are here for now; move the code using these * into main.c once this is all done! */ #include #include #include #include "mdio_if.h" #include "miibus_if.h" #include "etherswitch_if.h" /* * Reset the ESS switch. This also resets the ESS ethernet * and PSGMII block. */ int ar40xx_hw_ess_reset(struct ar40xx_softc *sc) { int ret; AR40XX_DPRINTF(sc, AR40XX_DBG_HW_RESET, "%s: called\n", __func__); ret = hwreset_assert(sc->sc_ess_rst); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to assert reset\n"); return ret; } DELAY(10*1000); ret = hwreset_deassert(sc->sc_ess_rst); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to deassert reset\n"); return ret; } DELAY(10*1000); return (0); } int ar40xx_hw_init_globals(struct ar40xx_softc *sc) { uint32_t reg; AR40XX_DPRINTF(sc, AR40XX_DBG_HW_INIT, "%s: called\n", __func__); /* enable CPU port and disable mirror port */ reg = AR40XX_FWD_CTRL0_CPU_PORT_EN | AR40XX_FWD_CTRL0_MIRROR_PORT; AR40XX_REG_WRITE(sc, AR40XX_REG_FWD_CTRL0, reg); /* forward multicast and broadcast frames to CPU */ reg = (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_UC_FLOOD_S) | (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_MC_FLOOD_S) | (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_BC_FLOOD_S); AR40XX_REG_WRITE(sc, AR40XX_REG_FWD_CTRL1, reg); /* enable jumbo frames */ reg = AR40XX_REG_READ(sc, AR40XX_REG_MAX_FRAME_SIZE); reg &= ~AR40XX_MAX_FRAME_SIZE_MTU; reg |= 9018 + 8 + 2; AR40XX_REG_WRITE(sc, AR40XX_REG_MAX_FRAME_SIZE, reg); /* Enable MIB counters */ reg = AR40XX_REG_READ(sc, AR40XX_REG_MODULE_EN); reg |= AR40XX_MODULE_EN_MIB; AR40XX_REG_WRITE(sc, AR40XX_REG_MODULE_EN, reg); /* Disable AZ */ AR40XX_REG_WRITE(sc, AR40XX_REG_EEE_CTRL, 0); /* set flowctrl thershold for cpu port */ reg = (AR40XX_PORT0_FC_THRESH_ON_DFLT << 16) | AR40XX_PORT0_FC_THRESH_OFF_DFLT; AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_FLOWCTRL_THRESH(0), reg); AR40XX_REG_BARRIER_WRITE(sc); return (0); } int ar40xx_hw_vlan_init(struct ar40xx_softc *sc) { int i; AR40XX_DPRINTF(sc, AR40XX_DBG_HW_INIT, "%s: called\n", __func__); /* Enable VLANs by default */ sc->sc_vlan.vlan = 1; /* Configure initial LAN/WAN bitmap and include CPU port as tagged */ sc->sc_vlan.vlan_id[AR40XX_LAN_VLAN] = AR40XX_LAN_VLAN | ETHERSWITCH_VID_VALID; sc->sc_vlan.vlan_id[AR40XX_WAN_VLAN] = AR40XX_WAN_VLAN | ETHERSWITCH_VID_VALID; sc->sc_vlan.vlan_ports[AR40XX_LAN_VLAN] = sc->sc_config.switch_cpu_bmp | sc->sc_config.switch_lan_bmp; sc->sc_vlan.vlan_untagged[AR40XX_LAN_VLAN] = sc->sc_config.switch_lan_bmp; sc->sc_vlan.vlan_ports[AR40XX_WAN_VLAN] = sc->sc_config.switch_cpu_bmp | sc->sc_config.switch_wan_bmp; sc->sc_vlan.vlan_untagged[AR40XX_WAN_VLAN] = sc->sc_config.switch_wan_bmp; /* Populate the per-port PVID - pvid[] is an index into vlan_id[] */ for (i = 0; i < AR40XX_NUM_PORTS; i++) { if (sc->sc_config.switch_lan_bmp & (1U << i)) sc->sc_vlan.pvid[i] = AR40XX_LAN_VLAN; if (sc->sc_config.switch_wan_bmp & (1U << i)) sc->sc_vlan.pvid[i] = AR40XX_WAN_VLAN; } return (0); } /* * Apply the per-port and global configuration from software. * * This is useful if we ever start doing the linux switch framework * thing of updating the config in one hit and pushing it to the * hardware. For now it's just used in the reset path. */ int ar40xx_hw_sw_hw_apply(struct ar40xx_softc *sc) { uint8_t portmask[AR40XX_NUM_PORTS]; int i, j, ret; AR40XX_DPRINTF(sc, AR40XX_DBG_HW_INIT, "%s: called\n", __func__); /* * Flush the VTU configuration. */ ret = ar40xx_hw_vtu_flush(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: couldn't apply config; vtu flush failed (%d)\n", ret); return (ret); } memset(portmask, 0, sizeof(portmask)); /* * Configure the ports based on whether it's 802.1q * VLANs, or just straight up per-port VLANs. */ if (sc->sc_vlan.vlan) { device_printf(sc->sc_dev, "%s: configuring 802.1q VLANs\n", __func__); for (j = 0; j < AR40XX_NUM_VTU_ENTRIES; j++) { uint8_t vp = sc->sc_vlan.vlan_ports[j]; if (!vp) continue; if ((sc->sc_vlan.vlan_id[j] & ETHERSWITCH_VID_VALID) == 0) continue; for (i = 0; i < AR40XX_NUM_PORTS; i++) { uint8_t mask = (1U << i); if (vp & mask) portmask[i] |= vp & ~mask; } ar40xx_hw_vtu_load_vlan(sc, sc->sc_vlan.vlan_id[j] & ETHERSWITCH_VID_MASK, sc->sc_vlan.vlan_ports[j], sc->sc_vlan.vlan_untagged[j]); } } else { device_printf(sc->sc_dev, "%s: configuring per-port VLANs\n", __func__); for (i = 0; i < AR40XX_NUM_PORTS; i++) { if (i == AR40XX_PORT_CPU) continue; portmask[i] = (1U << AR40XX_PORT_CPU); portmask[AR40XX_PORT_CPU] |= (1U << i); } } /* * Update per-port destination mask, vlan tag settings */ for (i = 0; i < AR40XX_NUM_PORTS; i++) (void) ar40xx_hw_port_setup(sc, i, portmask[i]); /* Set the mirror register config */ ret = ar40xx_hw_mirror_set_registers(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: couldn't apply config; mirror config failed" " (%d)\n", ret); return (ret); } return (0); } int ar40xx_hw_wait_bit(struct ar40xx_softc *sc, int reg, uint32_t mask, uint32_t val) { int timeout = 20; uint32_t t; while (true) { AR40XX_REG_BARRIER_READ(sc); t = AR40XX_REG_READ(sc, reg); if ((t & mask) == val) return 0; if (timeout-- <= 0) break; DELAY(20); } device_printf(sc->sc_dev, "ERROR: timeout for reg " "%08x: %08x & %08x != %08x\n", (unsigned int)reg, t, mask, val); return (ETIMEDOUT); } /* * Read the switch MAC address. */ int ar40xx_hw_read_switch_mac_address(struct ar40xx_softc *sc, struct ether_addr *ea) { uint32_t ret0, ret1; char *s; s = (void *) ea; AR40XX_LOCK_ASSERT(sc); AR40XX_REG_BARRIER_READ(sc); ret0 = AR40XX_REG_READ(sc, AR40XX_REG_SW_MAC_ADDR0); ret1 = AR40XX_REG_READ(sc, AR40XX_REG_SW_MAC_ADDR1); s[5] = MS(ret0, AR40XX_REG_SW_MAC_ADDR0_BYTE5); s[4] = MS(ret0, AR40XX_REG_SW_MAC_ADDR0_BYTE4); s[3] = MS(ret1, AR40XX_REG_SW_MAC_ADDR1_BYTE3); s[2] = MS(ret1, AR40XX_REG_SW_MAC_ADDR1_BYTE2); s[1] = MS(ret1, AR40XX_REG_SW_MAC_ADDR1_BYTE1); s[0] = MS(ret1, AR40XX_REG_SW_MAC_ADDR1_BYTE0); return (0); } /* * Set the switch MAC address. */ int ar40xx_hw_write_switch_mac_address(struct ar40xx_softc *sc, struct ether_addr *ea) { uint32_t ret0 = 0, ret1 = 0; char *s; s = (void *) ea; AR40XX_LOCK_ASSERT(sc); ret0 |= SM(s[5], AR40XX_REG_SW_MAC_ADDR0_BYTE5); ret0 |= SM(s[4], AR40XX_REG_SW_MAC_ADDR0_BYTE4); ret1 |= SM(s[3], AR40XX_REG_SW_MAC_ADDR1_BYTE3); ret1 |= SM(s[2], AR40XX_REG_SW_MAC_ADDR1_BYTE2); ret1 |= SM(s[1], AR40XX_REG_SW_MAC_ADDR1_BYTE1); ret1 |= SM(s[0], AR40XX_REG_SW_MAC_ADDR1_BYTE0); AR40XX_REG_WRITE(sc, AR40XX_REG_SW_MAC_ADDR0, ret0); AR40XX_REG_WRITE(sc, AR40XX_REG_SW_MAC_ADDR1, ret1); AR40XX_REG_BARRIER_WRITE(sc); return (0); } diff --git a/sys/dev/etherswitch/ar40xx/ar40xx_hw_atu.c b/sys/dev/etherswitch/ar40xx/ar40xx_hw_atu.c index 4cbe65025111..a3facf4a6199 100644 --- a/sys/dev/etherswitch/ar40xx/ar40xx_hw_atu.c +++ b/sys/dev/etherswitch/ar40xx/ar40xx_hw_atu.c @@ -1,216 +1,216 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Adrian Chadd . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include #include "mdio_if.h" #include "miibus_if.h" #include "etherswitch_if.h" int ar40xx_hw_atu_wait_busy(struct ar40xx_softc *sc) { int ret; ret = ar40xx_hw_wait_bit(sc, AR40XX_REG_ATU_FUNC, AR40XX_ATU_FUNC_BUSY, 0); return (ret); } int ar40xx_hw_atu_flush_all(struct ar40xx_softc *sc) { int ret; AR40XX_LOCK_ASSERT(sc); AR40XX_DPRINTF(sc, AR40XX_DBG_ATU_OP, "%s: called\n", __func__); ret = ar40xx_hw_atu_wait_busy(sc); if (ret != 0) return (ret); AR40XX_REG_WRITE(sc, AR40XX_REG_ATU_FUNC, AR40XX_ATU_FUNC_OP_FLUSH | AR40XX_ATU_FUNC_BUSY); AR40XX_REG_BARRIER_WRITE(sc); return (ret); } int ar40xx_hw_atu_flush_port(struct ar40xx_softc *sc, int port) { uint32_t val; int ret; AR40XX_LOCK_ASSERT(sc); AR40XX_DPRINTF(sc, AR40XX_DBG_ATU_OP, "%s: called, port=%d\n", __func__, port); if (port >= AR40XX_NUM_PORTS) { return (EINVAL); } ret = ar40xx_hw_atu_wait_busy(sc); if (ret != 0) return (ret); val = AR40XX_ATU_FUNC_OP_FLUSH_UNICAST; val |= (port << AR40XX_ATU_FUNC_PORT_NUM_S) & AR40XX_ATU_FUNC_PORT_NUM; AR40XX_REG_WRITE(sc, AR40XX_REG_ATU_FUNC, val | AR40XX_ATU_FUNC_BUSY); AR40XX_REG_BARRIER_WRITE(sc); return (0); } int ar40xx_hw_atu_fetch_entry(struct ar40xx_softc *sc, etherswitch_atu_entry_t *e, int atu_fetch_op) { uint32_t ret0, ret1, ret2, val; int ret; AR40XX_LOCK_ASSERT(sc); switch (atu_fetch_op) { case 0: /* Initialise things for the first fetch */ AR40XX_DPRINTF(sc, AR40XX_DBG_ATU_OP, "%s: initializing\n", __func__); ret = ar40xx_hw_atu_wait_busy(sc); if (ret != 0) return (ret); AR40XX_REG_WRITE(sc, AR40XX_REG_ATU_FUNC, AR40XX_ATU_FUNC_OP_GET_NEXT); AR40XX_REG_WRITE(sc, AR40XX_REG_ATU_DATA0, 0); AR40XX_REG_WRITE(sc, AR40XX_REG_ATU_DATA1, 0); AR40XX_REG_WRITE(sc, AR40XX_REG_ATU_DATA2, 0); AR40XX_REG_BARRIER_WRITE(sc); return (0); case 1: AR40XX_DPRINTF(sc, AR40XX_DBG_ATU_OP, "%s: reading next\n", __func__); /* * Attempt to read the next address entry; don't modify what * is there in these registers as its used for the next fetch */ ret = ar40xx_hw_atu_wait_busy(sc); if (ret != 0) return (ret); /* Begin the next read event; not modifying anything */ AR40XX_REG_BARRIER_READ(sc); val = AR40XX_REG_READ(sc, AR40XX_REG_ATU_FUNC); val |= AR40XX_ATU_FUNC_BUSY; AR40XX_REG_WRITE(sc, AR40XX_REG_ATU_FUNC, val); AR40XX_REG_BARRIER_WRITE(sc); /* Wait for it to complete */ ret = ar40xx_hw_atu_wait_busy(sc); if (ret != 0) return (ret); /* Fetch the ethernet address and ATU status */ AR40XX_REG_BARRIER_READ(sc); ret0 = AR40XX_REG_READ(sc, AR40XX_REG_ATU_DATA0); ret1 = AR40XX_REG_READ(sc, AR40XX_REG_ATU_DATA1); ret2 = AR40XX_REG_READ(sc, AR40XX_REG_ATU_DATA2); /* If the status is zero, then we're done */ if (MS(ret2, AR40XX_ATU_FUNC_DATA2_STATUS) == 0) return (ENOENT); /* MAC address */ e->es_macaddr[5] = MS(ret0, AR40XX_ATU_DATA0_MAC_ADDR3); e->es_macaddr[4] = MS(ret0, AR40XX_ATU_DATA0_MAC_ADDR2); e->es_macaddr[3] = MS(ret0, AR40XX_ATU_DATA0_MAC_ADDR1); e->es_macaddr[2] = MS(ret0, AR40XX_ATU_DATA0_MAC_ADDR0); e->es_macaddr[0] = MS(ret1, AR40XX_ATU_DATA1_MAC_ADDR5); e->es_macaddr[1] = MS(ret1, AR40XX_ATU_DATA1_MAC_ADDR4); /* Bitmask of ports this entry is for */ e->es_portmask = MS(ret1, AR40XX_ATU_DATA1_DEST_PORT); /* TODO: other flags that are interesting */ AR40XX_DPRINTF(sc, AR40XX_DBG_ATU_OP, "%s: MAC %6D portmask 0x%08x\n", __func__, e->es_macaddr, ":", e->es_portmask); return (0); default: return (EINVAL); } return (EINVAL); } diff --git a/sys/dev/etherswitch/ar40xx/ar40xx_hw_mdio.c b/sys/dev/etherswitch/ar40xx/ar40xx_hw_mdio.c index 39296fc3d444..43c2d8744054 100644 --- a/sys/dev/etherswitch/ar40xx/ar40xx_hw_mdio.c +++ b/sys/dev/etherswitch/ar40xx/ar40xx_hw_mdio.c @@ -1,129 +1,129 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Adrian Chadd . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include "mdio_if.h" #include "miibus_if.h" #include "etherswitch_if.h" int ar40xx_hw_phy_dbg_write(struct ar40xx_softc *sc, int phy, uint16_t dbg, uint16_t data) { AR40XX_LOCK_ASSERT(sc); device_printf(sc->sc_dev, "%s: TODO\n", __func__); return (0); } int ar40xx_hw_phy_dbg_read(struct ar40xx_softc *sc, int phy, uint16_t dbg) { AR40XX_LOCK_ASSERT(sc); device_printf(sc->sc_dev, "%s: TODO\n", __func__); return (-1); } int ar40xx_hw_phy_mmd_write(struct ar40xx_softc *sc, uint32_t phy_id, uint16_t mmd_num, uint16_t reg_id, uint16_t reg_val) { AR40XX_LOCK_ASSERT(sc); MDIO_WRITEREG(sc->sc_mdio_dev, phy_id, AR40XX_MII_ATH_MMD_ADDR, mmd_num); MDIO_WRITEREG(sc->sc_mdio_dev, phy_id, AR40XX_MII_ATH_MMD_DATA, reg_id); MDIO_WRITEREG(sc->sc_mdio_dev, phy_id, AR40XX_MII_ATH_MMD_ADDR, 0x4000 | mmd_num); MDIO_WRITEREG(sc->sc_mdio_dev, phy_id, AR40XX_MII_ATH_MMD_DATA, reg_val); return (0); } int ar40xx_hw_phy_mmd_read(struct ar40xx_softc *sc, uint32_t phy_id, uint16_t mmd_num, uint16_t reg_id) { uint16_t value; AR40XX_LOCK_ASSERT(sc); MDIO_WRITEREG(sc->sc_mdio_dev, phy_id, AR40XX_MII_ATH_MMD_ADDR, mmd_num); MDIO_WRITEREG(sc->sc_mdio_dev, phy_id, AR40XX_MII_ATH_MMD_DATA, reg_id); MDIO_WRITEREG(sc->sc_mdio_dev, phy_id, AR40XX_MII_ATH_MMD_ADDR, 0x4000 | mmd_num); value = MDIO_READREG(sc->sc_mdio_dev, phy_id, AR40XX_MII_ATH_MMD_DATA); return value; } diff --git a/sys/dev/etherswitch/ar40xx/ar40xx_hw_mib.c b/sys/dev/etherswitch/ar40xx/ar40xx_hw_mib.c index 55c0003766c5..6fdc9e96fd81 100644 --- a/sys/dev/etherswitch/ar40xx/ar40xx_hw_mib.c +++ b/sys/dev/etherswitch/ar40xx/ar40xx_hw_mib.c @@ -1,194 +1,194 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Adrian Chadd . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include "mdio_if.h" #include "miibus_if.h" #include "etherswitch_if.h" #define MIB_DESC(_s , _o, _n) \ { \ .size = (_s), \ .offset = (_o), \ .name = (_n), \ } static const struct ar40xx_mib_desc ar40xx_mibs[] = { MIB_DESC(1, AR40XX_STATS_RXBROAD, "RxBroad"), MIB_DESC(1, AR40XX_STATS_RXPAUSE, "RxPause"), MIB_DESC(1, AR40XX_STATS_RXMULTI, "RxMulti"), MIB_DESC(1, AR40XX_STATS_RXFCSERR, "RxFcsErr"), MIB_DESC(1, AR40XX_STATS_RXALIGNERR, "RxAlignErr"), MIB_DESC(1, AR40XX_STATS_RXRUNT, "RxRunt"), MIB_DESC(1, AR40XX_STATS_RXFRAGMENT, "RxFragment"), MIB_DESC(1, AR40XX_STATS_RX64BYTE, "Rx64Byte"), MIB_DESC(1, AR40XX_STATS_RX128BYTE, "Rx128Byte"), MIB_DESC(1, AR40XX_STATS_RX256BYTE, "Rx256Byte"), MIB_DESC(1, AR40XX_STATS_RX512BYTE, "Rx512Byte"), MIB_DESC(1, AR40XX_STATS_RX1024BYTE, "Rx1024Byte"), MIB_DESC(1, AR40XX_STATS_RX1518BYTE, "Rx1518Byte"), MIB_DESC(1, AR40XX_STATS_RXMAXBYTE, "RxMaxByte"), MIB_DESC(1, AR40XX_STATS_RXTOOLONG, "RxTooLong"), MIB_DESC(2, AR40XX_STATS_RXGOODBYTE, "RxGoodByte"), MIB_DESC(2, AR40XX_STATS_RXBADBYTE, "RxBadByte"), MIB_DESC(1, AR40XX_STATS_RXOVERFLOW, "RxOverFlow"), MIB_DESC(1, AR40XX_STATS_FILTERED, "Filtered"), MIB_DESC(1, AR40XX_STATS_TXBROAD, "TxBroad"), MIB_DESC(1, AR40XX_STATS_TXPAUSE, "TxPause"), MIB_DESC(1, AR40XX_STATS_TXMULTI, "TxMulti"), MIB_DESC(1, AR40XX_STATS_TXUNDERRUN, "TxUnderRun"), MIB_DESC(1, AR40XX_STATS_TX64BYTE, "Tx64Byte"), MIB_DESC(1, AR40XX_STATS_TX128BYTE, "Tx128Byte"), MIB_DESC(1, AR40XX_STATS_TX256BYTE, "Tx256Byte"), MIB_DESC(1, AR40XX_STATS_TX512BYTE, "Tx512Byte"), MIB_DESC(1, AR40XX_STATS_TX1024BYTE, "Tx1024Byte"), MIB_DESC(1, AR40XX_STATS_TX1518BYTE, "Tx1518Byte"), MIB_DESC(1, AR40XX_STATS_TXMAXBYTE, "TxMaxByte"), MIB_DESC(1, AR40XX_STATS_TXOVERSIZE, "TxOverSize"), MIB_DESC(2, AR40XX_STATS_TXBYTE, "TxByte"), MIB_DESC(1, AR40XX_STATS_TXCOLLISION, "TxCollision"), MIB_DESC(1, AR40XX_STATS_TXABORTCOL, "TxAbortCol"), MIB_DESC(1, AR40XX_STATS_TXMULTICOL, "TxMultiCol"), MIB_DESC(1, AR40XX_STATS_TXSINGLECOL, "TxSingleCol"), MIB_DESC(1, AR40XX_STATS_TXEXCDEFER, "TxExcDefer"), MIB_DESC(1, AR40XX_STATS_TXDEFER, "TxDefer"), MIB_DESC(1, AR40XX_STATS_TXLATECOL, "TxLateCol"), }; int ar40xx_hw_mib_op(struct ar40xx_softc *sc, uint32_t op) { uint32_t reg; int ret; AR40XX_LOCK_ASSERT(sc); /* Trigger capturing statistics on all ports */ AR40XX_REG_BARRIER_READ(sc); reg = AR40XX_REG_READ(sc, AR40XX_REG_MIB_FUNC); reg &= ~AR40XX_MIB_FUNC; reg |= (op << AR40XX_MIB_FUNC_S); AR40XX_REG_WRITE(sc, AR40XX_REG_MIB_FUNC, reg); AR40XX_REG_BARRIER_WRITE(sc); /* Now wait */ ret = ar40xx_hw_wait_bit(sc, AR40XX_REG_MIB_FUNC, AR40XX_MIB_BUSY, 0); if (ret != 0) { device_printf(sc->sc_dev, "%s: ERROR: timeout waiting for MIB load\n", __func__); } return ret; } int ar40xx_hw_mib_capture(struct ar40xx_softc *sc) { int ret; ret = ar40xx_hw_mib_op(sc, AR40XX_MIB_FUNC_CAPTURE); return (ret); } int ar40xx_hw_mib_flush(struct ar40xx_softc *sc) { int ret; ret = ar40xx_hw_mib_op(sc, AR40XX_MIB_FUNC_FLUSH); return (ret); } int ar40xx_hw_mib_fetch(struct ar40xx_softc *sc, int port) { uint64_t val; uint32_t base, reg; int i; base = AR40XX_REG_PORT_STATS_START + (AR40XX_REG_PORT_STATS_LEN * port); /* For now just print them out, we'll store them later */ AR40XX_REG_BARRIER_READ(sc); for (i = 0; i < nitems(ar40xx_mibs); i++) { val = 0; val = AR40XX_REG_READ(sc, base + ar40xx_mibs[i].offset); if (ar40xx_mibs[i].size == 2) { reg = AR40XX_REG_READ(sc, base + ar40xx_mibs[i].offset + 4); val |= ((uint64_t) reg << 32); } device_printf(sc->sc_dev, "%s[%d] = %llu\n", ar40xx_mibs[i].name, port, val); } return (0); } diff --git a/sys/dev/etherswitch/ar40xx/ar40xx_hw_mirror.c b/sys/dev/etherswitch/ar40xx/ar40xx_hw_mirror.c index 011cf8607343..7e440f54e3ac 100644 --- a/sys/dev/etherswitch/ar40xx/ar40xx_hw_mirror.c +++ b/sys/dev/etherswitch/ar40xx/ar40xx_hw_mirror.c @@ -1,132 +1,132 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Adrian Chadd . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include "mdio_if.h" #include "miibus_if.h" #include "etherswitch_if.h" int ar40xx_hw_mirror_set_registers(struct ar40xx_softc *sc) { uint32_t reg; int port; /* Reset the mirror registers before configuring */ reg = AR40XX_REG_READ(sc, AR40XX_REG_FWD_CTRL0); reg &= ~(AR40XX_FWD_CTRL0_MIRROR_PORT); reg |= (0xF << AR40XX_FWD_CTRL0_MIRROR_PORT_S); AR40XX_REG_WRITE(sc, AR40XX_REG_FWD_CTRL0, reg); AR40XX_REG_BARRIER_WRITE(sc); for (port = 0; port < AR40XX_NUM_PORTS; port++) { reg = AR40XX_REG_READ(sc, AR40XX_REG_PORT_LOOKUP(port)); reg &= ~AR40XX_PORT_LOOKUP_ING_MIRROR_EN; AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_LOOKUP(port), reg); reg = AR40XX_REG_READ(sc, AR40XX_REG_PORT_HOL_CTRL1(port)); reg &= ~AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN; AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_HOL_CTRL1(port), reg); AR40XX_REG_BARRIER_WRITE(sc); } /* Now, enable mirroring if requested */ if (sc->sc_monitor.source_port >= AR40XX_NUM_PORTS || sc->sc_monitor.monitor_port >= AR40XX_NUM_PORTS || sc->sc_monitor.source_port == sc->sc_monitor.monitor_port) { return (0); } reg = AR40XX_REG_READ(sc, AR40XX_REG_FWD_CTRL0); reg &= ~AR40XX_FWD_CTRL0_MIRROR_PORT; reg |= (sc->sc_monitor.monitor_port << AR40XX_FWD_CTRL0_MIRROR_PORT_S); AR40XX_REG_WRITE(sc, AR40XX_REG_FWD_CTRL0, reg); if (sc->sc_monitor.mirror_rx) { reg = AR40XX_REG_READ(sc, AR40XX_REG_PORT_LOOKUP(sc->sc_monitor.source_port)); reg |= AR40XX_PORT_LOOKUP_ING_MIRROR_EN; AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_LOOKUP(sc->sc_monitor.source_port), reg); AR40XX_REG_BARRIER_WRITE(sc); } if (sc->sc_monitor.mirror_tx) { reg = AR40XX_REG_READ(sc, AR40XX_REG_PORT_HOL_CTRL1(sc->sc_monitor.source_port)); reg |= AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN; AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_HOL_CTRL1(sc->sc_monitor.source_port), reg); AR40XX_REG_BARRIER_WRITE(sc); } return (0); } diff --git a/sys/dev/etherswitch/ar40xx/ar40xx_hw_port.c b/sys/dev/etherswitch/ar40xx/ar40xx_hw_port.c index e207a6da8b8f..e701f3ae28ba 100644 --- a/sys/dev/etherswitch/ar40xx/ar40xx_hw_port.c +++ b/sys/dev/etherswitch/ar40xx/ar40xx_hw_port.c @@ -1,287 +1,287 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Adrian Chadd . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include "mdio_if.h" #include "miibus_if.h" #include "etherswitch_if.h" int ar40xx_hw_port_init(struct ar40xx_softc *sc, int port) { uint32_t reg; AR40XX_DPRINTF(sc, AR40XX_DBG_HW_PORT_INIT, "%s: called; port %d\n", __func__, port); AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_STATUS(port), 0); AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_HEADER(port), 0); AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_VLAN0(port), 0); AR40XX_REG_BARRIER_WRITE(sc); DELAY(20); /* * Ok! Here is where things get super fun in the AR40xx * driver in uboot/linux. * * The earlier chipset switch drivers enable auto link enable here. * The switch will poll the PHYs too, and configure appropriately. * * The ar40xx code in linux/u-boot instead has a whole workaround * path that polls things directly and does some weird hijinx. * NOTABLY - they do NOT enable the TX/RX MAC here or autoneg - * it's done in the work around path. * * SO - for now the port is left off until the PHY state changes. * And then we flip it on and off based on the PHY state. */ #if 0 AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_STATUS(port), AR40XX_PORT_AUTO_LINK_EN); #endif /* * Configure the VLAN egress mode (don't touch them) and * learning state for STP/ATU. This isn't currently * configurable so it's just nailed up here and left alone. */ reg = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH << AR40XX_PORT_VLAN1_OUT_MODE_S; AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_VLAN1(port), reg); reg = AR40XX_PORT_LOOKUP_LEARN; reg |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S; AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_LOOKUP(port), reg); AR40XX_REG_BARRIER_WRITE(sc); return (0); } /* * Call when the link for a non-CPU port is down. * * This will turn off the MAC/forwarding path for this port. */ int ar40xx_hw_port_link_down(struct ar40xx_softc *sc, int port) { AR40XX_DPRINTF(sc, AR40XX_DBG_HW_PORT_INIT, "%s: called; port %d\n", __func__, port); AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_STATUS(port), 0); return (0); } /* * Call when the link for a non-CPU port is up. * * This will turn on the default auto-link checking and * eventually enable the TX/RX MAC. */ int ar40xx_hw_port_link_up(struct ar40xx_softc *sc, int port) { uint32_t reg; AR40XX_DPRINTF(sc, AR40XX_DBG_HW_PORT_INIT, "%s: called; port %d\n", __func__, port); /* Auto-link enable */ AR40XX_REG_BARRIER_READ(sc); reg = AR40XX_REG_READ(sc, AR40XX_REG_PORT_STATUS(port)); reg |= AR40XX_PORT_AUTO_LINK_EN; AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_STATUS(port), reg); AR40XX_REG_BARRIER_WRITE(sc); return (0); } /* * Setup the CPU facing port. For this device it'll only * be port 0. */ int ar40xx_hw_port_cpuport_setup(struct ar40xx_softc *sc) { uint32_t reg; AR40XX_DPRINTF(sc, AR40XX_DBG_HW_PORT_INIT, "%s: called\n", __func__); reg = AR40XX_PORT_STATUS_TXFLOW | AR40XX_PORT_STATUS_RXFLOW | AR40XX_PORT_TXHALF_FLOW | AR40XX_PORT_DUPLEX | AR40XX_PORT_SPEED_1000M; AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_STATUS(0), reg); DELAY(20); reg |= AR40XX_PORT_TX_EN | AR40XX_PORT_RX_EN; AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_STATUS(0), reg); AR40XX_REG_BARRIER_WRITE(sc); return (0); } /* * Fetch the port PVID. * * For 802.1q mode this is the default VLAN ID for the port. * Frames without an 802.1q VLAN will assume this VLAN ID for * transmit/receive. */ int ar40xx_hw_get_port_pvid(struct ar40xx_softc *sc, int port, int *pvid) { uint32_t reg; AR40XX_LOCK_ASSERT(sc); AR40XX_REG_BARRIER_READ(sc); reg = AR40XX_REG_READ(sc, AR40XX_REG_PORT_VLAN0(port)); reg = reg >> AR40XX_PORT_VLAN0_DEF_CVID_S; reg = reg & 0x0fff; /* XXX */ *pvid = reg; return (0); } /* * Set the port PVID. * * For now, since double-tagged frames aren't currently supported, * CVID=SVID here. */ int ar40xx_hw_set_port_pvid(struct ar40xx_softc *sc, int port, int pvid) { uint32_t reg; AR40XX_LOCK_ASSERT(sc); pvid &= ETHERSWITCH_VID_MASK; reg = pvid << AR40XX_PORT_VLAN0_DEF_SVID_S; reg |= pvid << AR40XX_PORT_VLAN0_DEF_CVID_S; AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_VLAN0(port), reg); AR40XX_REG_BARRIER_WRITE(sc); return (0); } /* * Setup the default port membership configuration. * * This configures the PVID for the port in the sc_vlan config, * along with a set of ports that constitute the "membership" * of this particular VID. * * For 802.1q mode the membership can be viewed as the default * learning port group, but this can be added to via VLAN membership. * (Eg you could in theory split two LAN ports into separate "member" * groups and they'd not learn MAC addresses from each other even * inside a VLAN; you'd then end up with the traffic being flooded to * the CPU port.) */ int ar40xx_hw_port_setup(struct ar40xx_softc *sc, int port, uint32_t members) { uint32_t egress, ingress, reg; uint32_t pvid = sc->sc_vlan.vlan_id[sc->sc_vlan.pvid[port]] & ETHERSWITCH_VID_MASK; if (sc->sc_vlan.vlan) { egress = AR40XX_PORT_VLAN1_OUT_MODE_UNMOD; ingress = AR40XX_IN_SECURE; } else { egress = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH; ingress = AR40XX_IN_PORT_ONLY; } reg = pvid << AR40XX_PORT_VLAN0_DEF_SVID_S; reg |= pvid << AR40XX_PORT_VLAN0_DEF_CVID_S; AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_VLAN0(port), reg); AR40XX_REG_BARRIER_WRITE(sc); reg = AR40XX_PORT_VLAN1_PORT_VLAN_PROP; reg |= egress << AR40XX_PORT_VLAN1_OUT_MODE_S; AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_VLAN1(port), reg); AR40XX_REG_BARRIER_WRITE(sc); reg = members; reg |= AR40XX_PORT_LOOKUP_LEARN; reg |= ingress << AR40XX_PORT_LOOKUP_IN_MODE_S; reg |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S; AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_LOOKUP(port), reg); AR40XX_REG_BARRIER_WRITE(sc); return (0); } diff --git a/sys/dev/etherswitch/ar40xx/ar40xx_hw_psgmii.c b/sys/dev/etherswitch/ar40xx/ar40xx_hw_psgmii.c index 24460d8b0b43..0f0704c41aba 100644 --- a/sys/dev/etherswitch/ar40xx/ar40xx_hw_psgmii.c +++ b/sys/dev/etherswitch/ar40xx/ar40xx_hw_psgmii.c @@ -1,437 +1,437 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Adrian Chadd . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include "mdio_if.h" #include "miibus_if.h" #include "etherswitch_if.h" /* * Routines that control the ess-psgmii block - the interconnect * between the ess-switch and the external multi-port PHY * (eg Maple.) */ static void ar40xx_hw_psgmii_reg_write(struct ar40xx_softc *sc, uint32_t reg, uint32_t val) { bus_space_write_4(sc->sc_psgmii_mem_tag, sc->sc_psgmii_mem_handle, reg, val); bus_space_barrier(sc->sc_psgmii_mem_tag, sc->sc_psgmii_mem_handle, 0, sc->sc_psgmii_mem_size, BUS_SPACE_BARRIER_WRITE); } static int ar40xx_hw_psgmii_reg_read(struct ar40xx_softc *sc, uint32_t reg) { int ret; bus_space_barrier(sc->sc_psgmii_mem_tag, sc->sc_psgmii_mem_handle, 0, sc->sc_psgmii_mem_size, BUS_SPACE_BARRIER_READ); ret = bus_space_read_4(sc->sc_psgmii_mem_tag, sc->sc_psgmii_mem_handle, reg); return (ret); } int ar40xx_hw_psgmii_set_mac_mode(struct ar40xx_softc *sc, uint32_t mac_mode) { if (mac_mode == PORT_WRAPPER_PSGMII) { ar40xx_hw_psgmii_reg_write(sc, AR40XX_PSGMII_MODE_CONTROL, 0x2200); ar40xx_hw_psgmii_reg_write(sc, AR40XX_PSGMIIPHY_TX_CONTROL, 0x8380); } else { device_printf(sc->sc_dev, "WARNING: unknown MAC_MODE=%u\n", mac_mode); } return (0); } int ar40xx_hw_psgmii_single_phy_testing(struct ar40xx_softc *sc, int phy) { int j; uint32_t tx_ok, tx_error; uint32_t rx_ok, rx_error; uint32_t tx_ok_high16; uint32_t rx_ok_high16; uint32_t tx_all_ok, rx_all_ok; MDIO_WRITEREG(sc->sc_mdio_dev, phy, 0x0, 0x9000); MDIO_WRITEREG(sc->sc_mdio_dev, phy, 0x0, 0x4140); for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) { uint16_t status; status = MDIO_READREG(sc->sc_mdio_dev, phy, 0x11); if (status & AR40XX_PHY_SPEC_STATUS_LINK) break; /* * the polling interval to check if the PHY link up * or not * maxwait_timer: 750 ms +/-10 ms * minwait_timer : 1 us +/- 0.1us * time resides in minwait_timer ~ maxwait_timer * see IEEE 802.3 section 40.4.5.2 */ DELAY(8 * 1000); } /* enable check */ ar40xx_hw_phy_mmd_write(sc, phy, 7, 0x8029, 0x0000); ar40xx_hw_phy_mmd_write(sc, phy, 7, 0x8029, 0x0003); /* start traffic */ ar40xx_hw_phy_mmd_write(sc, phy, 7, 0x8020, 0xa000); /* *wait for all traffic end * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms */ DELAY(60 * 1000); /* check counter */ tx_ok = ar40xx_hw_phy_mmd_read(sc, phy, 7, 0x802e); tx_ok_high16 = ar40xx_hw_phy_mmd_read(sc, phy, 7, 0x802d); tx_error = ar40xx_hw_phy_mmd_read(sc, phy, 7, 0x802f); rx_ok = ar40xx_hw_phy_mmd_read(sc, phy, 7, 0x802b); rx_ok_high16 = ar40xx_hw_phy_mmd_read(sc, phy, 7, 0x802a); rx_error = ar40xx_hw_phy_mmd_read(sc, phy, 7, 0x802c); tx_all_ok = tx_ok + (tx_ok_high16 << 16); rx_all_ok = rx_ok + (rx_ok_high16 << 16); if (tx_all_ok == 0x1000 && tx_error == 0) { /* success */ sc->sc_psgmii.phy_t_status &= ~(1U << phy); } else { device_printf(sc->sc_dev, "TX_OK=%d, tx_error=%d RX_OK=%d" " rx_error=%d\n", tx_all_ok, tx_error, rx_all_ok, rx_error); device_printf(sc->sc_dev, "PHY %d single test PSGMII issue happen!\n", phy); sc->sc_psgmii.phy_t_status |= BIT(phy); } MDIO_WRITEREG(sc->sc_mdio_dev, phy, 0x0, 0x1840); return (0); } int ar40xx_hw_psgmii_all_phy_testing(struct ar40xx_softc *sc) { int phy, j; MDIO_WRITEREG(sc->sc_mdio_dev, 0x1f, 0x0, 0x9000); MDIO_WRITEREG(sc->sc_mdio_dev, 0x1f, 0x0, 0x4140); for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) { for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) { uint16_t status; status = MDIO_READREG(sc->sc_mdio_dev, phy, 0x11); if (!(status & (1U << 10))) break; } if (phy >= (AR40XX_NUM_PORTS - 1)) break; /* The polling interval to check if the PHY link up or not */ DELAY(8*1000); } /* enable check */ ar40xx_hw_phy_mmd_write(sc, 0x1f, 7, 0x8029, 0x0000); ar40xx_hw_phy_mmd_write(sc, 0x1f, 7, 0x8029, 0x0003); /* start traffic */ ar40xx_hw_phy_mmd_write(sc, 0x1f, 7, 0x8020, 0xa000); /* * wait for all traffic end * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms */ DELAY(60*1000); /* was 50ms */ for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) { uint32_t tx_ok, tx_error; uint32_t rx_ok, rx_error; uint32_t tx_ok_high16; uint32_t rx_ok_high16; uint32_t tx_all_ok, rx_all_ok; /* check counter */ tx_ok = ar40xx_hw_phy_mmd_read(sc, phy, 7, 0x802e); tx_ok_high16 = ar40xx_hw_phy_mmd_read(sc, phy, 7, 0x802d); tx_error = ar40xx_hw_phy_mmd_read(sc, phy, 7, 0x802f); rx_ok = ar40xx_hw_phy_mmd_read(sc, phy, 7, 0x802b); rx_ok_high16 = ar40xx_hw_phy_mmd_read(sc, phy, 7, 0x802a); rx_error = ar40xx_hw_phy_mmd_read(sc, phy, 7, 0x802c); tx_all_ok = tx_ok + (tx_ok_high16<<16); rx_all_ok = rx_ok + (rx_ok_high16<<16); if (tx_all_ok == 0x1000 && tx_error == 0) { /* success */ sc->sc_psgmii.phy_t_status &= ~(1U << (phy + 8)); } else { device_printf(sc->sc_dev, "PHY%d test see issue! (tx_all_ok=%u," " rx_all_ok=%u, tx_error=%u, rx_error=%u)\n", phy, tx_all_ok, rx_all_ok, tx_error, rx_error); sc->sc_psgmii.phy_t_status |= (1U << (phy + 8)); } } device_printf(sc->sc_dev, "PHY all test 0x%x\n", sc->sc_psgmii.phy_t_status); return (0); } /* * Reset PSGMII in the Malibu PHY. */ int ar40xx_hw_malibu_psgmii_ess_reset(struct ar40xx_softc *sc) { device_printf(sc->sc_dev, "%s: called\n", __func__); uint32_t i; /* reset phy psgmii */ /* fix phy psgmii RX 20bit */ MDIO_WRITEREG(sc->sc_mdio_dev, 5, 0x0, 0x005b); /* reset phy psgmii */ MDIO_WRITEREG(sc->sc_mdio_dev, 5, 0x0, 0x001b); /* release reset phy psgmii */ MDIO_WRITEREG(sc->sc_mdio_dev, 5, 0x0, 0x005b); for (i = 0; i < AR40XX_PSGMII_CALB_NUM; i++) { uint32_t status; status = ar40xx_hw_phy_mmd_read(sc, 5, 1, 0x28); if (status & (1U << 0)) break; /* * Polling interval to check PSGMII PLL in malibu is ready * the worst time is 8.67ms * for 25MHz reference clock * [512+(128+2048)*49]*80ns+100us */ DELAY(2000); } /* XXX TODO ;see if it timed out? */ /*check malibu psgmii calibration done end..*/ /*freeze phy psgmii RX CDR*/ MDIO_WRITEREG(sc->sc_mdio_dev, 5, 0x1a, 0x2230); ar40xx_hw_ess_reset(sc); /*check psgmii calibration done start*/ for (i = 0; i < AR40XX_PSGMII_CALB_NUM; i++) { uint32_t status; status = ar40xx_hw_psgmii_reg_read(sc, 0xa0); if (status & (1U << 0)) break; /* Polling interval to check PSGMII PLL in ESS is ready */ DELAY(2000); } /* XXX TODO ;see if it timed out? */ /* check dakota psgmii calibration done end..*/ /* release phy psgmii RX CDR */ MDIO_WRITEREG(sc->sc_mdio_dev, 5, 0x1a, 0x3230); /* release phy psgmii RX 20bit */ MDIO_WRITEREG(sc->sc_mdio_dev, 5, 0x0, 0x005f); return (0); } int ar40xx_hw_psgmii_self_test(struct ar40xx_softc *sc) { uint32_t i, phy, reg; device_printf(sc->sc_dev, "%s: called\n", __func__); ar40xx_hw_malibu_psgmii_ess_reset(sc); /* switch to access MII reg for copper */ MDIO_WRITEREG(sc->sc_mdio_dev, 4, 0x1f, 0x8500); for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) { /*enable phy mdio broadcast write*/ ar40xx_hw_phy_mmd_write(sc, phy, 7, 0x8028, 0x801f); } /* force no link by power down */ MDIO_WRITEREG(sc->sc_mdio_dev, 0x1f, 0x0, 0x1840); /* packet number*/ ar40xx_hw_phy_mmd_write(sc, 0x1f, 7, 0x8021, 0x1000); ar40xx_hw_phy_mmd_write(sc, 0x1f, 7, 0x8062, 0x05e0); /* fix mdi status */ MDIO_WRITEREG(sc->sc_mdio_dev, 0x1f, 0x10, 0x6800); for (i = 0; i < AR40XX_PSGMII_CALB_NUM; i++) { sc->sc_psgmii.phy_t_status = 0; for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) { /* Enable port loopback for testing */ AR40XX_REG_BARRIER_READ(sc); reg = AR40XX_REG_READ(sc, AR40XX_REG_PORT_LOOKUP(phy + 1)); reg |= AR40XX_PORT_LOOKUP_LOOPBACK; AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_LOOKUP(phy + 1), reg); AR40XX_REG_BARRIER_WRITE(sc); } for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) ar40xx_hw_psgmii_single_phy_testing(sc, phy); ar40xx_hw_psgmii_all_phy_testing(sc); if (sc->sc_psgmii.phy_t_status) ar40xx_hw_malibu_psgmii_ess_reset(sc); else break; } if (i >= AR40XX_PSGMII_CALB_NUM) device_printf(sc->sc_dev, "PSGMII cannot recover\n"); else device_printf(sc->sc_dev, "PSGMII recovered after %d times reset\n", i); /* configuration recover */ /* packet number */ ar40xx_hw_phy_mmd_write(sc, 0x1f, 7, 0x8021, 0x0); /* disable check */ ar40xx_hw_phy_mmd_write(sc, 0x1f, 7, 0x8029, 0x0); /* disable traffic */ ar40xx_hw_phy_mmd_write(sc, 0x1f, 7, 0x8020, 0x0); return (0); } int ar40xx_hw_psgmii_self_test_clean(struct ar40xx_softc *sc) { uint32_t reg; int phy; device_printf(sc->sc_dev, "%s: called\n", __func__); /* disable phy internal loopback */ MDIO_WRITEREG(sc->sc_mdio_dev, 0x1f, 0x10, 0x6860); MDIO_WRITEREG(sc->sc_mdio_dev, 0x1f, 0x0, 0x9040); for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) { /* disable mac loop back */ reg = AR40XX_REG_READ(sc, AR40XX_REG_PORT_LOOKUP(phy + 1)); reg &= ~AR40XX_PORT_LOOKUP_LOOPBACK; AR40XX_REG_WRITE(sc, AR40XX_REG_PORT_LOOKUP(phy + 1), reg); AR40XX_REG_BARRIER_WRITE(sc); /* disable phy mdio broadcast write */ ar40xx_hw_phy_mmd_write(sc, phy, 7, 0x8028, 0x001f); } /* clear fdb entry */ ar40xx_hw_atu_flush_all(sc); return (0); } int ar40xx_hw_psgmii_init_config(struct ar40xx_softc *sc) { uint32_t reg; /* * This is based on what I found in uboot - it configures * the initial ESS interconnect to either be PSGMII * or RGMII. */ /* For now, just assume PSGMII and fix it in post. */ /* PSGMIIPHY_PLL_VCO_RELATED_CTRL */ reg = ar40xx_hw_psgmii_reg_read(sc, 0x78c); device_printf(sc->sc_dev, "%s: PSGMIIPHY_PLL_VCO_RELATED_CTRL=0x%08x\n", __func__, reg); /* PSGMIIPHY_VCO_CALIBRATION_CTRL */ reg = ar40xx_hw_psgmii_reg_read(sc, 0x09c); device_printf(sc->sc_dev, "%s: PSGMIIPHY_VCO_CALIBRATION_CTRL=0x%08x\n", __func__, reg); return (0); } diff --git a/sys/dev/etherswitch/ar40xx/ar40xx_hw_vtu.c b/sys/dev/etherswitch/ar40xx/ar40xx_hw_vtu.c index e471dd4746dc..3c3800847438 100644 --- a/sys/dev/etherswitch/ar40xx/ar40xx_hw_vtu.c +++ b/sys/dev/etherswitch/ar40xx/ar40xx_hw_vtu.c @@ -1,196 +1,196 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Adrian Chadd . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include #include "mdio_if.h" #include "miibus_if.h" #include "etherswitch_if.h" /* * Perform a VTU (vlan table unit) operation. */ int ar40xx_hw_vtu_op(struct ar40xx_softc *sc, uint32_t op, uint32_t val) { int ret; AR40XX_DPRINTF(sc, AR40XX_DBG_VTU_OP, "%s: called; op=0x%08x, val=0x%08x\n", __func__, op, val); ret = (ar40xx_hw_wait_bit(sc, AR40XX_REG_VTU_FUNC1, AR40XX_VTU_FUNC1_BUSY, 0)); if (ret != 0) return (ret); if ((op & AR40XX_VTU_FUNC1_OP) == AR40XX_VTU_FUNC1_OP_LOAD) { AR40XX_REG_WRITE(sc, AR40XX_REG_VTU_FUNC0, val); AR40XX_REG_BARRIER_WRITE(sc); } op |= AR40XX_VTU_FUNC1_BUSY; AR40XX_REG_WRITE(sc, AR40XX_REG_VTU_FUNC1, op); AR40XX_REG_BARRIER_WRITE(sc); return (0); } /* * Load in a VLAN table map / port configuration for the given * vlan ID. */ int ar40xx_hw_vtu_load_vlan(struct ar40xx_softc *sc, uint32_t vid, uint32_t port_mask, uint32_t untagged_mask) { uint32_t op, val, mode; int i, ret; AR40XX_DPRINTF(sc, AR40XX_DBG_VTU_OP, "%s: called; vid=%d port_mask=0x%08x, untagged_mask=0x%08x\n", __func__, vid, port_mask, untagged_mask); op = AR40XX_VTU_FUNC1_OP_LOAD | (vid << AR40XX_VTU_FUNC1_VID_S); val = AR40XX_VTU_FUNC0_VALID | AR40XX_VTU_FUNC0_IVL; for (i = 0; i < AR40XX_NUM_PORTS; i++) { if ((port_mask & (1U << i)) == 0) /* Not in the VLAN at all */ mode = AR40XX_VTU_FUNC0_EG_MODE_NOT; else if (sc->sc_vlan.vlan == 0) /* VLAN mode disabled; keep the provided VLAN tag */ mode = AR40XX_VTU_FUNC0_EG_MODE_KEEP; else if (untagged_mask & (1U << i)) /* Port in the VLAN; is untagged */ mode = AR40XX_VTU_FUNC0_EG_MODE_UNTAG; else /* Port is in the VLAN; is tagged */ mode = AR40XX_VTU_FUNC0_EG_MODE_TAG; val |= mode << AR40XX_VTU_FUNC0_EG_MODE_S(i); } ret = ar40xx_hw_vtu_op(sc, op, val); return (ret); } /* * Flush all VLAN port entries. */ int ar40xx_hw_vtu_flush(struct ar40xx_softc *sc) { int ret; AR40XX_DPRINTF(sc, AR40XX_DBG_VTU_OP, "%s: called\n", __func__); ret = ar40xx_hw_vtu_op(sc, AR40XX_VTU_FUNC1_OP_FLUSH, 0); return (ret); } /* * Get the VLAN port map for the given vlan ID. */ int ar40xx_hw_vtu_get_vlan(struct ar40xx_softc *sc, int vid, uint32_t *ports, uint32_t *untagged_ports) { uint32_t op, reg, val; int i, r; op = AR40XX_VTU_FUNC1_OP_GET_ONE; /* Filter out any etherswitch VID flags; only grab the VLAN ID */ vid &= ETHERSWITCH_VID_MASK; /* XXX TODO: the VTU here stores egress mode - keep, tag, untagged, none */ op |= (vid << AR40XX_VTU_FUNC1_VID_S); r = ar40xx_hw_vtu_op(sc, op, 0); if (r != 0) { device_printf(sc->sc_dev, "%s: %d: op failed\n", __func__, vid); return (r); } AR40XX_REG_BARRIER_READ(sc); reg = AR40XX_REG_READ(sc, AR40XX_REG_VTU_FUNC0); *ports = 0; for (i = 0; i < AR40XX_NUM_PORTS; i++) { val = reg >> AR40XX_VTU_FUNC0_EG_MODE_S(i); val = val & 0x3; /* XXX KEEP (unmodified? For non-dot1q operation?) */ if (val == AR40XX_VTU_FUNC0_EG_MODE_TAG) { *ports |= (1 << i); } else if (val == AR40XX_VTU_FUNC0_EG_MODE_UNTAG) { *ports |= (1 << i); *untagged_ports |= (1 << i); } } return (0); } diff --git a/sys/dev/etherswitch/ar40xx/ar40xx_main.c b/sys/dev/etherswitch/ar40xx/ar40xx_main.c index ed17d2789bf4..d8cbfd836a9b 100644 --- a/sys/dev/etherswitch/ar40xx/ar40xx_main.c +++ b/sys/dev/etherswitch/ar40xx/ar40xx_main.c @@ -1,967 +1,967 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Adrian Chadd . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mdio_if.h" #include "miibus_if.h" #include "etherswitch_if.h" static struct ofw_compat_data compat_data[] = { { "qcom,ess-switch", 1 }, { NULL, 0 }, }; static int ar40xx_probe(device_t dev) { if (! ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "IPQ4018 ESS Switch fabric / PSGMII PHY"); return (BUS_PROBE_DEFAULT); } static void ar40xx_tick(void *arg) { struct ar40xx_softc *sc = arg; (void) ar40xx_phy_tick(sc); callout_reset(&sc->sc_phy_callout, hz, ar40xx_tick, sc); } static void ar40xx_statchg(device_t dev) { struct ar40xx_softc *sc = device_get_softc(dev); AR40XX_DPRINTF(sc, AR40XX_DBG_PORT_STATUS, "%s\n", __func__); } static int ar40xx_readphy(device_t dev, int phy, int reg) { struct ar40xx_softc *sc = device_get_softc(dev); return MDIO_READREG(sc->sc_mdio_dev, phy, reg); } static int ar40xx_writephy(device_t dev, int phy, int reg, int val) { struct ar40xx_softc *sc = device_get_softc(dev); return MDIO_WRITEREG(sc->sc_mdio_dev, phy, reg, val); } /* * Do the initial switch configuration. */ static int ar40xx_reset_switch(struct ar40xx_softc *sc) { int ret, i; AR40XX_DPRINTF(sc, AR40XX_DBG_HW_INIT, "%s: called\n", __func__); /* blank the VLAN config */ memset(&sc->sc_vlan, 0, sizeof(sc->sc_vlan)); /* initial vlan port mapping */ for (i = 0; i < AR40XX_NUM_VTU_ENTRIES; i++) sc->sc_vlan.vlan_id[i] = 0; /* init vlan config */ ret = ar40xx_hw_vlan_init(sc); /* init monitor config */ sc->sc_monitor.mirror_tx = false; sc->sc_monitor.mirror_rx = false; sc->sc_monitor.source_port = 0; sc->sc_monitor.monitor_port = 0; /* apply switch config */ ret = ar40xx_hw_sw_hw_apply(sc); return (ret); } static int ar40xx_sysctl_dump_port_state(SYSCTL_HANDLER_ARGS) { struct ar40xx_softc *sc = arg1; int val = 0; int error; int i; (void) i; (void) sc; error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return (error); if (val < 0 || val > 5) { return (EINVAL); } AR40XX_LOCK(sc); device_printf(sc->sc_dev, "port %d: PORT_STATUS=0x%08x\n", val, AR40XX_REG_READ(sc, AR40XX_REG_PORT_STATUS(val))); device_printf(sc->sc_dev, "port %d: PORT_HEADER=0x%08x\n", val, AR40XX_REG_READ(sc, AR40XX_REG_PORT_HEADER(val))); device_printf(sc->sc_dev, "port %d: PORT_VLAN0=0x%08x\n", val, AR40XX_REG_READ(sc, AR40XX_REG_PORT_VLAN0(val))); device_printf(sc->sc_dev, "port %d: PORT_VLAN1=0x%08x\n", val, AR40XX_REG_READ(sc, AR40XX_REG_PORT_VLAN1(val))); device_printf(sc->sc_dev, "port %d: PORT_LOOKUP=0x%08x\n", val, AR40XX_REG_READ(sc, AR40XX_REG_PORT_LOOKUP(val))); device_printf(sc->sc_dev, "port %d: PORT_HOL_CTRL1=0x%08x\n", val, AR40XX_REG_READ(sc, AR40XX_REG_PORT_HOL_CTRL1(val))); device_printf(sc->sc_dev, "port %d: PORT_FLOWCTRL_THRESH=0x%08x\n", val, AR40XX_REG_READ(sc, AR40XX_REG_PORT_FLOWCTRL_THRESH(val))); AR40XX_UNLOCK(sc); return (0); } static int ar40xx_sysctl_dump_port_mibstats(SYSCTL_HANDLER_ARGS) { struct ar40xx_softc *sc = arg1; int val = 0; int error; int i; (void) i; (void) sc; error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return (error); if (val < 0 || val > 5) { return (EINVAL); } AR40XX_LOCK(sc); /* Yes, this snapshots all ports */ (void) ar40xx_hw_mib_capture(sc); (void) ar40xx_hw_mib_fetch(sc, val); AR40XX_UNLOCK(sc); return (0); } static int ar40xx_sysctl_attach(struct ar40xx_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "debugging flags"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "port_state", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ar40xx_sysctl_dump_port_state, "I", ""); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "port_mibstats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ar40xx_sysctl_dump_port_mibstats, "I", ""); return (0); } static int ar40xx_detach(device_t dev) { struct ar40xx_softc *sc = device_get_softc(dev); int i; device_printf(sc->sc_dev, "%s: called\n", __func__); callout_drain(&sc->sc_phy_callout); /* Free PHYs */ for (i = 0; i < AR40XX_NUM_PHYS; i++) { if (sc->sc_phys.miibus[i] != NULL) device_delete_child(dev, sc->sc_phys.miibus[i]); if (sc->sc_phys.ifp[i] != NULL) if_free(sc->sc_phys.ifp[i]); free(sc->sc_phys.ifname[i], M_DEVBUF); } bus_generic_detach(dev); mtx_destroy(&sc->sc_mtx); return (0); } static int ar40xx_attach(device_t dev) { struct ar40xx_softc *sc = device_get_softc(dev); phandle_t psgmii_p, root_p, mdio_p; int ret, i; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, "ar40xx_switch", NULL, MTX_DEF); psgmii_p = OF_finddevice("/soc/ess-psgmii"); if (psgmii_p == -1) { device_printf(dev, "%s: couldn't find /soc/ess-psgmii DT node\n", __func__); goto error; } /* * Get the ipq4019-mdio node here, to talk to our local PHYs * if needed */ root_p = OF_finddevice("/soc"); mdio_p = ofw_bus_find_compatible(root_p, "qcom,ipq4019-mdio"); if (mdio_p == -1) { device_printf(dev, "%s: couldn't find ipq4019-mdio DT node\n", __func__); goto error; } sc->sc_mdio_phandle = mdio_p; sc->sc_mdio_dev = OF_device_from_xref(OF_xref_from_node(mdio_p)); if (sc->sc_mdio_dev == NULL) { device_printf(dev, "%s: couldn't get mdio device (mdio_p=%u)\n", __func__, mdio_p); goto error; } /* get psgmii base address from psgmii node */ ret = OF_decode_addr(psgmii_p, 0, &sc->sc_psgmii_mem_tag, &sc->sc_psgmii_mem_handle, &sc->sc_psgmii_mem_size); if (ret != 0) { device_printf(dev, "%s: couldn't map psgmii mem (%d)\n", __func__, ret); goto error; } /* get switch base address */ sc->sc_ess_mem_rid = 0; sc->sc_ess_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_ess_mem_rid, RF_ACTIVE); if (sc->sc_ess_mem_res == NULL) { device_printf(dev, "%s: failed to find memory resource\n", __func__); goto error; } sc->sc_ess_mem_size = (size_t) bus_get_resource_count(dev, SYS_RES_MEMORY, sc->sc_ess_mem_rid); if (sc->sc_ess_mem_size == 0) { device_printf(dev, "%s: failed to get device memory size\n", __func__); goto error; } ret = OF_getencprop(ofw_bus_get_node(dev), "switch_mac_mode", &sc->sc_config.switch_mac_mode, sizeof(sc->sc_config.switch_mac_mode)); if (ret < 0) { device_printf(dev, "%s: missing switch_mac_mode property\n", __func__); goto error; } ret = OF_getencprop(ofw_bus_get_node(dev), "switch_cpu_bmp", &sc->sc_config.switch_cpu_bmp, sizeof(sc->sc_config.switch_cpu_bmp)); if (ret < 0) { device_printf(dev, "%s: missing switch_cpu_bmp property\n", __func__); goto error; } ret = OF_getencprop(ofw_bus_get_node(dev), "switch_lan_bmp", &sc->sc_config.switch_lan_bmp, sizeof(sc->sc_config.switch_lan_bmp)); if (ret < 0) { device_printf(dev, "%s: missing switch_lan_bmp property\n", __func__); goto error; } ret = OF_getencprop(ofw_bus_get_node(dev), "switch_wan_bmp", &sc->sc_config.switch_wan_bmp, sizeof(sc->sc_config.switch_wan_bmp)); if (ret < 0) { device_printf(dev, "%s: missing switch_wan_bmp property\n", __func__); goto error; } ret = clk_get_by_ofw_name(dev, 0, "ess_clk", &sc->sc_ess_clk); if (ret != 0) { device_printf(dev, "%s: failed to find ess_clk (%d)\n", __func__, ret); goto error; } ret = clk_enable(sc->sc_ess_clk); if (ret != 0) { device_printf(dev, "%s: failed to enable clock (%d)\n", __func__, ret); goto error; } ret = hwreset_get_by_ofw_name(dev, 0, "ess_rst", &sc->sc_ess_rst); if (ret != 0) { device_printf(dev, "%s: failed to find ess_rst (%d)\n", __func__, ret); goto error; } /* * Ok, at this point we have enough resources to do an initial * reset and configuration. */ AR40XX_LOCK(sc); /* Initial PSGMII/RGMII port configuration */ ret = ar40xx_hw_psgmii_init_config(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to init PSGMII (%d)\n", ret); goto error_locked; } /* * ESS reset - this resets both the ethernet switch * AND the ethernet block. */ ret = ar40xx_hw_ess_reset(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to reset ESS block (%d)\n", ret); goto error_locked; } /* * Check the PHY IDs for each of the PHYs from 0..4; * this is useful to make sure that we can SEE the external * PHY(s). */ if (bootverbose) { ret = ar40xx_hw_phy_get_ids(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to check PHY IDs (%d)\n", ret); goto error_locked; } } /* * Do PSGMII PHY self-test; work-around issues. */ ret = ar40xx_hw_psgmii_self_test(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to do PSGMII self-test (%d)\n", ret); goto error_locked; } /* Return port config to runtime state */ ret = ar40xx_hw_psgmii_self_test_clean(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to do PSGMII runtime config (%d)\n", ret); goto error_locked; } /* mac_mode_init */ ret = ar40xx_hw_psgmii_set_mac_mode(sc, sc->sc_config.switch_mac_mode); /* Initialise each hardware port */ for (i = 0; i < AR40XX_NUM_PORTS; i++) { ret = ar40xx_hw_port_init(sc, i); } /* initialise the global switch configuration */ ret = ar40xx_hw_init_globals(sc); /* reset the switch vlan/port learning config */ ret = ar40xx_reset_switch(sc); /* cpuport setup */ ret = ar40xx_hw_port_cpuport_setup(sc); AR40XX_UNLOCK(sc); #if 0 /* We may end up needing the QM workaround code here.. */ device_printf(dev, "%s: TODO: QM error check\n", __func__); #endif /* Attach PHYs */ ret = ar40xx_attach_phys(sc); ret = bus_generic_probe(dev); bus_enumerate_hinted_children(dev); ret = bus_generic_attach(dev); /* Start timer */ callout_init_mtx(&sc->sc_phy_callout, &sc->sc_mtx, 0); /* * Setup the etherswitch info block. */ strlcpy(sc->sc_info.es_name, device_get_desc(dev), sizeof(sc->sc_info.es_name)); sc->sc_info.es_nports = AR40XX_NUM_PORTS; sc->sc_info.es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q; /* XXX TODO: double-tag / 802.1ad */ sc->sc_info.es_nvlangroups = AR40XX_NUM_VTU_ENTRIES; /* * Fetch the initial port configuration. */ AR40XX_LOCK(sc); ar40xx_tick(sc); AR40XX_UNLOCK(sc); ar40xx_sysctl_attach(sc); return (0); error_locked: AR40XX_UNLOCK(sc); error: ar40xx_detach(dev); return (ENXIO); } static void ar40xx_lock(device_t dev) { struct ar40xx_softc *sc = device_get_softc(dev); AR40XX_LOCK(sc); } static void ar40xx_unlock(device_t dev) { struct ar40xx_softc *sc = device_get_softc(dev); AR40XX_LOCK_ASSERT(sc); AR40XX_UNLOCK(sc); } static etherswitch_info_t * ar40xx_getinfo(device_t dev) { struct ar40xx_softc *sc = device_get_softc(dev); return (&sc->sc_info); } static int ar40xx_readreg(device_t dev, int addr) { struct ar40xx_softc *sc = device_get_softc(dev); if (addr >= sc->sc_ess_mem_size - 1) return (-1); AR40XX_REG_BARRIER_READ(sc); return AR40XX_REG_READ(sc, addr); } static int ar40xx_writereg(device_t dev, int addr, int value) { struct ar40xx_softc *sc = device_get_softc(dev); if (addr >= sc->sc_ess_mem_size - 1) return (-1); AR40XX_REG_WRITE(sc, addr, value); AR40XX_REG_BARRIER_WRITE(sc); return (0); } /* * Get the port configuration and status. */ static int ar40xx_getport(device_t dev, etherswitch_port_t *p) { struct ar40xx_softc *sc = device_get_softc(dev); struct mii_data *mii = NULL; struct ifmediareq *ifmr; int err; if (p->es_port < 0 || p->es_port > sc->sc_info.es_nports) return (ENXIO); AR40XX_LOCK(sc); /* Fetch the current VLAN configuration for this port */ /* PVID */ ar40xx_hw_get_port_pvid(sc, p->es_port, &p->es_pvid); /* * The VLAN egress aren't appropriate to the ports; * instead it's part of the VLAN group config. */ /* Get MII config */ mii = ar40xx_phy_miiforport(sc, p->es_port); AR40XX_UNLOCK(sc); if (p->es_port == 0) { /* CPU port */ p->es_flags |= ETHERSWITCH_PORT_CPU; ifmr = &p->es_ifmr; ifmr->ifm_count = 0; ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX; ifmr->ifm_mask = 0; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; } else if (mii != NULL) { /* non-CPU port */ err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCGIFMEDIA); if (err) return (err); } else { return (ENXIO); } return (0); } /* * Set the port configuration and status. */ static int ar40xx_setport(device_t dev, etherswitch_port_t *p) { struct ar40xx_softc *sc = device_get_softc(dev); struct ifmedia *ifm; struct mii_data *mii; if_t ifp; int ret; if (p->es_port < 0 || p->es_port > sc->sc_info.es_nports) return (EINVAL); /* Port flags */ AR40XX_LOCK(sc); ret = ar40xx_hw_set_port_pvid(sc, p->es_port, p->es_pvid); if (ret != 0) { AR40XX_UNLOCK(sc); return (ret); } /* XXX TODO: tag strip/unstrip, double-tag, etc */ AR40XX_UNLOCK(sc); /* Don't change media config on CPU port */ if (p->es_port == 0) return (0); mii = ar40xx_phy_miiforport(sc, p->es_port); if (mii == NULL) return (ENXIO); ifp = ar40xx_phy_ifpforport(sc, p->es_port); ifm = &mii->mii_media; return (ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA)); return (0); } /* * Get the current VLAN group (per-port, ISL, dot1q) configuration. * * For now the only supported operating mode is dot1q. */ static int ar40xx_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) { struct ar40xx_softc *sc = device_get_softc(dev); int vid, ret; if (vg->es_vlangroup > sc->sc_info.es_nvlangroups) return (EINVAL); vg->es_untagged_ports = 0; vg->es_member_ports = 0; vg->es_fid = 0; AR40XX_LOCK(sc); /* Note: only supporting 802.1q VLAN config for now */ if (sc->sc_vlan.vlan != 1) { vg->es_member_ports = 0; vg->es_untagged_ports = 0; AR40XX_UNLOCK(sc); return (-1); } /* Get vlangroup mapping to VLAN id */ vid = sc->sc_vlan.vlan_id[vg->es_vlangroup]; if ((vid & ETHERSWITCH_VID_VALID) == 0) { /* Not an active vgroup; bail */ AR40XX_UNLOCK(sc); return (0); } vg->es_vid = vid; ret = ar40xx_hw_vtu_get_vlan(sc, vid, &vg->es_member_ports, &vg->es_untagged_ports); AR40XX_UNLOCK(sc); if (ret == 0) { vg->es_vid |= ETHERSWITCH_VID_VALID; } return (ret); } /* * Set the current VLAN group (per-port, ISL, dot1q) configuration. * * For now the only supported operating mode is dot1q. */ static int ar40xx_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) { struct ar40xx_softc *sc = device_get_softc(dev); int err, vid; /* For now we only support 802.1q mode */ if (sc->sc_vlan.vlan == 0) return (EINVAL); AR40XX_LOCK(sc); vid = sc->sc_vlan.vlan_id[vg->es_vlangroup]; /* * If we have an 802.1q VID and it's different to the current one, * purge the current VTU entry. */ if ((vid != 0) && ((vid & ETHERSWITCH_VID_VALID) != 0) && ((vid & ETHERSWITCH_VID_MASK) != (vg->es_vid & ETHERSWITCH_VID_MASK))) { AR40XX_DPRINTF(sc, AR40XX_DBG_VTU_OP, "%s: purging VID %d first\n", __func__, vid); err = ar40xx_hw_vtu_flush(sc); if (err != 0) { AR40XX_UNLOCK(sc); return (err); } } /* Update VLAN ID */ vid = vg->es_vid & ETHERSWITCH_VID_MASK; sc->sc_vlan.vlan_id[vg->es_vlangroup] = vid; if (vid == 0) { /* Setting it to 0 disables the group */ AR40XX_UNLOCK(sc); return (0); } /* Add valid bit for this entry */ sc->sc_vlan.vlan_id[vg->es_vlangroup] = vid | ETHERSWITCH_VID_VALID; /* Update hardware */ err = ar40xx_hw_vtu_load_vlan(sc, vid, vg->es_member_ports, vg->es_untagged_ports); if (err != 0) { AR40XX_UNLOCK(sc); return (err); } /* Update the config for the given entry */ sc->sc_vlan.vlan_ports[vg->es_vlangroup] = vg->es_member_ports; sc->sc_vlan.vlan_untagged[vg->es_vlangroup] = vg->es_untagged_ports; AR40XX_UNLOCK(sc); return (0); } /* * Get the current configuration mode. */ static int ar40xx_getconf(device_t dev, etherswitch_conf_t *conf) { struct ar40xx_softc *sc = device_get_softc(dev); int ret; AR40XX_LOCK(sc); /* Only support dot1q VLAN for now */ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; /* Switch MAC address */ ret = ar40xx_hw_read_switch_mac_address(sc, &conf->switch_macaddr); if (ret == 0) conf->cmd |= ETHERSWITCH_CONF_SWITCH_MACADDR; AR40XX_UNLOCK(sc); return (0); } /* * Set the current configuration and do a switch reset. * * For now the only supported operating mode is dot1q, don't * allow it to be set to non-dot1q. */ static int ar40xx_setconf(device_t dev, etherswitch_conf_t *conf) { struct ar40xx_softc *sc = device_get_softc(dev); int ret = 0; if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) { /* Only support dot1q VLAN for now */ if (conf->vlan_mode != ETHERSWITCH_VLAN_DOT1Q) return (EINVAL); } if (conf->cmd & ETHERSWITCH_CONF_SWITCH_MACADDR) { AR40XX_LOCK(sc); ret = ar40xx_hw_read_switch_mac_address(sc, &conf->switch_macaddr); AR40XX_UNLOCK(sc); } return (ret); } /* * Flush all ATU entries. */ static int ar40xx_atu_flush_all(device_t dev) { struct ar40xx_softc *sc = device_get_softc(dev); int ret; AR40XX_LOCK(sc); ret = ar40xx_hw_atu_flush_all(sc); AR40XX_UNLOCK(sc); return (ret); } /* * Flush all ATU entries for the given port. */ static int ar40xx_atu_flush_port(device_t dev, int port) { struct ar40xx_softc *sc = device_get_softc(dev); int ret; AR40XX_LOCK(sc); ret = ar40xx_hw_atu_flush_port(sc, port); AR40XX_UNLOCK(sc); return (ret); } /* * Load the ATU table into local storage so it can be iterated * over. */ static int ar40xx_atu_fetch_table(device_t dev, etherswitch_atu_table_t *table) { struct ar40xx_softc *sc = device_get_softc(dev); int err, nitems; memset(&sc->atu.entries, 0, sizeof(sc->atu.entries)); table->es_nitems = 0; nitems = 0; AR40XX_LOCK(sc); sc->atu.count = 0; err = ar40xx_hw_atu_fetch_entry(sc, NULL, 0); if (err != 0) goto done; while (nitems < AR40XX_NUM_ATU_ENTRIES) { err = ar40xx_hw_atu_fetch_entry(sc, &sc->atu.entries[nitems], 1); if (err != 0) goto done; sc->atu.entries[nitems].id = nitems; nitems++; } done: sc->atu.count = nitems; table->es_nitems = nitems; AR40XX_UNLOCK(sc); return (0); } /* * Iterate over the ATU table entries that have been previously * fetched. */ static int ar40xx_atu_fetch_table_entry(device_t dev, etherswitch_atu_entry_t *e) { struct ar40xx_softc *sc = device_get_softc(dev); int id, err = 0; id = e->id; AR40XX_LOCK(sc); if (id > sc->atu.count) { err = ENOENT; goto done; } memcpy(e, &sc->atu.entries[id], sizeof(*e)); done: AR40XX_UNLOCK(sc); return (err); } static device_method_t ar40xx_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ar40xx_probe), DEVMETHOD(device_attach, ar40xx_attach), DEVMETHOD(device_detach, ar40xx_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* MII interface */ DEVMETHOD(miibus_readreg, ar40xx_readphy), DEVMETHOD(miibus_writereg, ar40xx_writephy), DEVMETHOD(miibus_statchg, ar40xx_statchg), /* MDIO interface */ DEVMETHOD(mdio_readreg, ar40xx_readphy), DEVMETHOD(mdio_writereg, ar40xx_writephy), /* etherswitch interface */ DEVMETHOD(etherswitch_lock, ar40xx_lock), DEVMETHOD(etherswitch_unlock, ar40xx_unlock), DEVMETHOD(etherswitch_getinfo, ar40xx_getinfo), DEVMETHOD(etherswitch_readreg, ar40xx_readreg), DEVMETHOD(etherswitch_writereg, ar40xx_writereg), DEVMETHOD(etherswitch_readphyreg, ar40xx_readphy), DEVMETHOD(etherswitch_writephyreg, ar40xx_writephy), DEVMETHOD(etherswitch_getport, ar40xx_getport), DEVMETHOD(etherswitch_setport, ar40xx_setport), DEVMETHOD(etherswitch_getvgroup, ar40xx_getvgroup), DEVMETHOD(etherswitch_setvgroup, ar40xx_setvgroup), DEVMETHOD(etherswitch_getconf, ar40xx_getconf), DEVMETHOD(etherswitch_setconf, ar40xx_setconf), DEVMETHOD(etherswitch_flush_all, ar40xx_atu_flush_all), DEVMETHOD(etherswitch_flush_port, ar40xx_atu_flush_port), DEVMETHOD(etherswitch_fetch_table, ar40xx_atu_fetch_table), DEVMETHOD(etherswitch_fetch_table_entry, ar40xx_atu_fetch_table_entry), DEVMETHOD_END }; DEFINE_CLASS_0(ar40xx, ar40xx_driver, ar40xx_methods, sizeof(struct ar40xx_softc)); DRIVER_MODULE(ar40xx, simplebus, ar40xx_driver, 0, 0); DRIVER_MODULE(ar40xx, ofwbus, ar40xx_driver, 0, 0); DRIVER_MODULE(miibus, ar40xx, miibus_driver, 0, 0); DRIVER_MODULE(mdio, ar40xx, mdio_driver, 0, 0); DRIVER_MODULE(etherswitch, ar40xx, etherswitch_driver, 0, 0); MODULE_DEPEND(ar40xx, mdio, 1, 1, 1); MODULE_DEPEND(ar40xx, miibus, 1, 1, 1); MODULE_DEPEND(ar40xx, etherswitch, 1, 1, 1); MODULE_VERSION(ar40xx, 1); diff --git a/sys/dev/etherswitch/ar40xx/ar40xx_phy.c b/sys/dev/etherswitch/ar40xx/ar40xx_phy.c index 3e2bcd4af18d..079a92983503 100644 --- a/sys/dev/etherswitch/ar40xx/ar40xx_phy.c +++ b/sys/dev/etherswitch/ar40xx/ar40xx_phy.c @@ -1,252 +1,252 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Adrian Chadd . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mdio_if.h" #include "miibus_if.h" #include "etherswitch_if.h" int ar40xx_phy_tick(struct ar40xx_softc *sc) { struct mii_softc *miisc; struct mii_data *mii; int phy; uint32_t reg; AR40XX_LOCK_ASSERT(sc); AR40XX_REG_BARRIER_READ(sc); /* * Loop over; update phy port status here */ for (phy = 0; phy < AR40XX_NUM_PHYS; phy++) { /* * Port here is PHY, not port! */ reg = AR40XX_REG_READ(sc, AR40XX_REG_PORT_STATUS(phy + 1)); mii = device_get_softc(sc->sc_phys.miibus[phy]); /* * Compare the current link status to the previous link * status. We may need to clear ATU / change phy config. */ if (((reg & AR40XX_PORT_STATUS_LINK_UP) != 0) && (mii->mii_media_status & IFM_ACTIVE) == 0) { AR40XX_DPRINTF(sc, AR40XX_DBG_PORT_STATUS, "%s: PHY %d: down -> up\n", __func__, phy); ar40xx_hw_port_link_up(sc, phy + 1); ar40xx_hw_atu_flush_port(sc, phy + 1); } if (((reg & AR40XX_PORT_STATUS_LINK_UP) == 0) && (mii->mii_media_status & IFM_ACTIVE) != 0) { AR40XX_DPRINTF(sc, AR40XX_DBG_PORT_STATUS, "%s: PHY %d: up -> down\n", __func__, phy); ar40xx_hw_port_link_down(sc, phy + 1); ar40xx_hw_atu_flush_port(sc, phy + 1); } mii_tick(mii); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) != miisc->mii_inst) continue; ukphy_status(miisc); mii_phy_update(miisc, MII_POLLSTAT); } } return (0); } static inline int ar40xx_portforphy(int phy) { return (phy+1); } struct mii_data * ar40xx_phy_miiforport(struct ar40xx_softc *sc, int port) { int phy; phy = port-1; if (phy < 0 || phy >= AR40XX_NUM_PHYS) return (NULL); return (device_get_softc(sc->sc_phys.miibus[phy])); } if_t ar40xx_phy_ifpforport(struct ar40xx_softc *sc, int port) { int phy; phy = port-1; if (phy < 0 || phy >= AR40XX_NUM_PHYS) return (NULL); return (sc->sc_phys.ifp[phy]); } static int ar40xx_ifmedia_upd(if_t ifp) { struct ar40xx_softc *sc = if_getsoftc(ifp); struct mii_data *mii = ar40xx_phy_miiforport(sc, if_getdunit(ifp)); AR40XX_DPRINTF(sc, AR40XX_DBG_PORT_STATUS, "%s: called, PHY %d\n", __func__, if_getdunit(ifp)); if (mii == NULL) return (ENXIO); mii_mediachg(mii); return (0); } static void ar40xx_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct ar40xx_softc *sc = if_getsoftc(ifp); struct mii_data *mii = ar40xx_phy_miiforport(sc, if_getdunit(ifp)); AR40XX_DPRINTF(sc, AR40XX_DBG_PORT_STATUS, "%s: called, PHY %d\n", __func__, if_getdunit(ifp)); if (mii == NULL) return; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } int ar40xx_attach_phys(struct ar40xx_softc *sc) { int phy, err = 0; char name[IFNAMSIZ]; /* PHYs need an interface, so we generate a dummy one */ snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev)); for (phy = 0; phy < AR40XX_NUM_PHYS; phy++) { sc->sc_phys.ifp[phy] = if_alloc(IFT_ETHER); if (sc->sc_phys.ifp[phy] == NULL) { device_printf(sc->sc_dev, "PHY %d: couldn't allocate ifnet structure\n", phy); err = ENOMEM; break; } sc->sc_phys.ifp[phy]->if_softc = sc; sc->sc_phys.ifp[phy]->if_flags |= IFF_UP | IFF_BROADCAST | IFF_DRV_RUNNING | IFF_SIMPLEX; sc->sc_phys.ifname[phy] = malloc(strlen(name)+1, M_DEVBUF, M_WAITOK); bcopy(name, sc->sc_phys.ifname[phy], strlen(name)+1); if_initname(sc->sc_phys.ifp[phy], sc->sc_phys.ifname[phy], ar40xx_portforphy(phy)); err = mii_attach(sc->sc_dev, &sc->sc_phys.miibus[phy], sc->sc_phys.ifp[phy], ar40xx_ifmedia_upd, ar40xx_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); device_printf(sc->sc_dev, "%s attached to pseudo interface %s\n", device_get_nameunit(sc->sc_phys.miibus[phy]), sc->sc_phys.ifp[phy]->if_xname); if (err != 0) { device_printf(sc->sc_dev, "attaching PHY %d failed\n", phy); return (err); } } return (0); } int ar40xx_hw_phy_get_ids(struct ar40xx_softc *sc) { int phy; uint32_t id1, id2; for (phy = 0; phy < AR40XX_NUM_PHYS; phy++) { id1 = MDIO_READREG(sc->sc_mdio_dev, phy, 2); id2 = MDIO_READREG(sc->sc_mdio_dev, phy, 3); device_printf(sc->sc_dev, "%s: PHY %d: ID1=0x%04x, ID2=0x%04x\n", __func__, phy, id1, id2); } return (0); } diff --git a/sys/dev/firmware/arm/scmi.c b/sys/dev/firmware/arm/scmi.c index 455a802f254d..3101be245ee1 100644 --- a/sys/dev/firmware/arm/scmi.c +++ b/sys/dev/firmware/arm/scmi.c @@ -1,270 +1,270 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Ruslan Bukin * * This work was supported by Innovate UK project 105694, "Digital Security * by Design (DSbD) Technology Platform Prototype". * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "dev/mailbox/arm/arm_doorbell.h" #include "scmi.h" #include "scmi_protocols.h" struct scmi_softc { struct simplebus_softc simplebus_sc; device_t dev; device_t tx_shmem; struct arm_doorbell *db; struct mtx mtx; int req_done; }; static device_t scmi_get_shmem(struct scmi_softc *sc, int index) { phandle_t *shmems; phandle_t node; device_t dev; size_t len; node = ofw_bus_get_node(sc->dev); if (node <= 0) return (NULL); len = OF_getencprop_alloc_multi(node, "shmem", sizeof(*shmems), (void **)&shmems); if (len <= 0) { device_printf(sc->dev, "%s: Can't get shmem node.\n", __func__); return (NULL); } if (index >= len) { OF_prop_free(shmems); return (NULL); } dev = OF_device_from_xref(shmems[index]); if (dev == NULL) device_printf(sc->dev, "%s: Can't get shmem device.\n", __func__); OF_prop_free(shmems); return (dev); } static void scmi_callback(void *arg) { struct scmi_softc *sc; sc = arg; dprintf("%s sc %p\n", __func__, sc); SCMI_LOCK(sc); sc->req_done = 1; wakeup(sc); SCMI_UNLOCK(sc); } static int scmi_request_locked(struct scmi_softc *sc, struct scmi_req *req) { struct scmi_smt_header hdr; int timeout; bzero(&hdr, sizeof(struct scmi_smt_header)); SCMI_ASSERT_LOCKED(sc); /* Read header */ scmi_shmem_read(sc->tx_shmem, 0, &hdr, SMT_HEADER_SIZE); if ((hdr.channel_status & SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE) == 0) return (1); /* Update header */ hdr.channel_status &= ~SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE; hdr.msg_header = req->protocol_id << SMT_HEADER_PROTOCOL_ID_S; hdr.msg_header |= req->message_id << SMT_HEADER_MESSAGE_ID_S; hdr.length = sizeof(hdr.msg_header) + req->in_size; hdr.flags |= SCMI_SHMEM_FLAG_INTR_ENABLED; /* Write header */ scmi_shmem_write(sc->tx_shmem, 0, &hdr, SMT_HEADER_SIZE); /* Write request */ scmi_shmem_write(sc->tx_shmem, SMT_HEADER_SIZE, req->in_buf, req->in_size); sc->req_done = 0; /* Interrupt SCP firmware. */ arm_doorbell_set(sc->db); timeout = 200; dprintf("%s: request\n", __func__); do { if (cold) { if (arm_doorbell_get(sc->db)) break; DELAY(10000); } else { msleep(sc, &sc->mtx, 0, "scmi", hz / 10); if (sc->req_done) break; } } while (timeout--); if (timeout <= 0) return (-1); dprintf("%s: got reply, timeout %d\n", __func__, timeout); /* Read header. */ scmi_shmem_read(sc->tx_shmem, 0, &hdr, SMT_HEADER_SIZE); /* Read response */ scmi_shmem_read(sc->tx_shmem, SMT_HEADER_SIZE, req->out_buf, req->out_size); return (0); } int scmi_request(device_t dev, struct scmi_req *req) { struct scmi_softc *sc; int error; sc = device_get_softc(dev); SCMI_LOCK(sc); error = scmi_request_locked(sc, req); SCMI_UNLOCK(sc); return (error); } static int scmi_probe(device_t dev) { if (!ofw_bus_is_compatible(dev, "arm,scmi")) return (ENXIO); if (!ofw_bus_status_okay(dev)) return (ENXIO); device_set_desc(dev, "ARM SCMI interface driver"); return (BUS_PROBE_DEFAULT); } static int scmi_attach(device_t dev) { struct scmi_softc *sc; phandle_t node; int error; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); if (node == -1) return (ENXIO); sc->tx_shmem = scmi_get_shmem(sc, 0); if (sc->tx_shmem == NULL) { device_printf(dev, "TX shmem dev not found.\n"); return (ENXIO); } sc->db = arm_doorbell_ofw_get(sc->dev, "tx"); if (sc->db == NULL) { device_printf(dev, "Doorbell device not found.\n"); return (ENXIO); } mtx_init(&sc->mtx, device_get_nameunit(dev), "SCMI", MTX_DEF); arm_doorbell_set_handler(sc->db, scmi_callback, sc); simplebus_init(dev, node); /* * Allow devices to identify. */ bus_generic_probe(dev); /* * Now walk the OFW tree and attach top-level devices. */ for (node = OF_child(node); node > 0; node = OF_peer(node)) simplebus_add_device(dev, node, 0, NULL, -1, NULL); error = bus_generic_attach(dev); return (error); } static int scmi_detach(device_t dev) { return (0); } static device_method_t scmi_methods[] = { DEVMETHOD(device_probe, scmi_probe), DEVMETHOD(device_attach, scmi_attach), DEVMETHOD(device_detach, scmi_detach), DEVMETHOD_END }; DEFINE_CLASS_1(scmi, scmi_driver, scmi_methods, sizeof(struct scmi_softc), simplebus_driver); DRIVER_MODULE(scmi, simplebus, scmi_driver, 0, 0); MODULE_VERSION(scmi, 1); diff --git a/sys/dev/firmware/arm/scmi_clk.c b/sys/dev/firmware/arm/scmi_clk.c index fbf65f0fee74..da7a8d37b465 100644 --- a/sys/dev/firmware/arm/scmi_clk.c +++ b/sys/dev/firmware/arm/scmi_clk.c @@ -1,431 +1,431 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Ruslan Bukin * * This work was supported by Innovate UK project 105694, "Digital Security * by Design (DSbD) Technology Platform Prototype". * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include -#include +#include #include #include #include #include "scmi.h" #include "scmi_protocols.h" #include "scmi_clk.h" struct scmi_clk_softc { device_t dev; device_t scmi; struct clkdom *clkdom; }; struct scmi_clknode_softc { device_t dev; int clock_id; }; static int __unused scmi_clk_get_rate(struct scmi_clk_softc *sc, int clk_id, uint64_t *rate) { struct scmi_clk_rate_get_out out; struct scmi_clk_rate_get_in in; struct scmi_req req; int error; req.protocol_id = SCMI_PROTOCOL_ID_CLOCK; req.message_id = SCMI_CLOCK_RATE_GET; req.in_buf = ∈ req.in_size = sizeof(struct scmi_clk_rate_get_in); req.out_buf = &out; req.out_size = sizeof(struct scmi_clk_rate_get_out); in.clock_id = clk_id; error = scmi_request(sc->scmi, &req); if (error != 0) return (error); if (out.status != 0) return (ENXIO); *rate = out.rate_lsb | ((uint64_t)out.rate_msb << 32); return (0); } static int scmi_clk_set_rate(struct scmi_clk_softc *sc, int clk_id, uint64_t rate) { struct scmi_clk_rate_set_out out; struct scmi_clk_rate_set_in in; struct scmi_req req; int error; req.protocol_id = SCMI_PROTOCOL_ID_CLOCK; req.message_id = SCMI_CLOCK_RATE_SET; req.in_buf = ∈ req.in_size = sizeof(struct scmi_clk_rate_set_in); req.out_buf = &out; req.out_size = sizeof(struct scmi_clk_rate_set_out); in.clock_id = clk_id; in.flags = SCMI_CLK_RATE_ROUND_CLOSEST; in.rate_lsb = (uint32_t)rate; in.rate_msb = (uint32_t)(rate >> 32); error = scmi_request(sc->scmi, &req); if (error != 0) return (error); if (out.status != 0) return (ENXIO); return (0); } static int __unused scmi_clk_gate(struct scmi_clk_softc *sc, int clk_id, int enable) { struct scmi_clk_state_out out; struct scmi_clk_state_in in; struct scmi_req req; int error; req.protocol_id = SCMI_PROTOCOL_ID_CLOCK; req.message_id = SCMI_CLOCK_CONFIG_SET; req.in_buf = ∈ req.in_size = sizeof(struct scmi_clk_state_in); req.out_buf = &out; req.out_size = sizeof(struct scmi_clk_state_out); in.clock_id = clk_id; in.attributes = enable; error = scmi_request(sc->scmi, &req); if (error != 0) return (error); if (out.status != 0) return (ENXIO); return (0); } static int scmi_clknode_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static int scmi_clknode_recalc_freq(struct clknode *clk, uint64_t *freq) { return (0); } static int scmi_clknode_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout, int flags, int *stop) { struct scmi_clknode_softc *clk_sc; struct scmi_clk_softc *sc; clk_sc = clknode_get_softc(clk); sc = device_get_softc(clk_sc->dev); dprintf("%s: %ld\n", __func__, *fout); scmi_clk_set_rate(sc, clk_sc->clock_id, *fout); *stop = 1; return (0); } static clknode_method_t scmi_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, scmi_clknode_init), CLKNODEMETHOD(clknode_recalc_freq, scmi_clknode_recalc_freq), CLKNODEMETHOD(clknode_set_freq, scmi_clknode_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(scmi_clknode, scmi_clknode_class, scmi_clknode_methods, sizeof(struct scmi_clknode_softc), clknode_class); static int scmi_clk_add_node(struct scmi_clk_softc *sc, int index, char *clock_name) { struct scmi_clknode_softc *clk_sc; struct clknode_init_def def; struct clknode *clk; memset(&def, 0, sizeof(def)); def.id = index; def.name = clock_name; def.parent_names = NULL; def.parent_cnt = 0; clk = clknode_create(sc->clkdom, &scmi_clknode_class, &def); if (clk == NULL) { device_printf(sc->dev, "Cannot create clknode.\n"); return (ENXIO); } clk_sc = clknode_get_softc(clk); clk_sc->dev = sc->dev; clk_sc->clock_id = index; if (clknode_register(sc->clkdom, clk) == NULL) { device_printf(sc->dev, "Could not register clock '%s'.\n", def.name); return (ENXIO); } device_printf(sc->dev, "Clock '%s' registered.\n", def.name); return (0); } static int scmi_clk_get_name(struct scmi_clk_softc *sc, int index, char **result) { struct scmi_clk_name_get_out out; struct scmi_clk_name_get_in in; struct scmi_req req; char *clock_name; int error; req.protocol_id = SCMI_PROTOCOL_ID_CLOCK; req.message_id = SCMI_CLOCK_NAME_GET; req.in_buf = ∈ req.in_size = sizeof(struct scmi_clk_name_get_in); req.out_buf = &out; req.out_size = sizeof(struct scmi_clk_name_get_out); in.clock_id = index; error = scmi_request(sc->scmi, &req); if (error != 0) return (error); if (out.status != 0) return (ENXIO); clock_name = malloc(sizeof(out.name), M_DEVBUF, M_WAITOK); strncpy(clock_name, out.name, sizeof(out.name)); *result = clock_name; return (0); } static int scmi_clk_attrs(struct scmi_clk_softc *sc, int index) { struct scmi_clk_attrs_out out; struct scmi_clk_attrs_in in; struct scmi_req req; int error; char *clock_name; req.protocol_id = SCMI_PROTOCOL_ID_CLOCK; req.message_id = SCMI_CLOCK_ATTRIBUTES; req.in_buf = ∈ req.in_size = sizeof(struct scmi_clk_attrs_in); req.out_buf = &out; req.out_size = sizeof(struct scmi_clk_attrs_out); in.clock_id = index; error = scmi_request(sc->scmi, &req); if (error != 0) return (error); if (out.status != 0) return (ENXIO); if (out.attributes & CLK_ATTRS_EXT_CLK_NAME) { error = scmi_clk_get_name(sc, index, &clock_name); if (error) return (error); } else { clock_name = malloc(sizeof(out.clock_name), M_DEVBUF, M_WAITOK); strncpy(clock_name, out.clock_name, sizeof(out.clock_name)); } error = scmi_clk_add_node(sc, index, clock_name); return (error); } static int scmi_clk_discover(struct scmi_clk_softc *sc) { struct scmi_clk_protocol_attrs_out out; struct scmi_req req; int nclocks; int failing; int error; int i; req.protocol_id = SCMI_PROTOCOL_ID_CLOCK; req.message_id = SCMI_PROTOCOL_ATTRIBUTES; req.in_buf = NULL; req.in_size = 0; req.out_buf = &out; req.out_size = sizeof(struct scmi_clk_protocol_attrs_out); error = scmi_request(sc->scmi, &req); if (error != 0) return (error); if (out.status != 0) return (ENXIO); nclocks = (out.attributes & CLK_ATTRS_NCLOCKS_M) >> CLK_ATTRS_NCLOCKS_S; device_printf(sc->dev, "Found %d clocks.\n", nclocks); failing = 0; for (i = 0; i < nclocks; i++) { error = scmi_clk_attrs(sc, i); if (error) { device_printf(sc->dev, "Could not process clock index %d.\n", i); failing++; } } if (failing == nclocks) return (ENXIO); return (0); } static int scmi_clk_init(struct scmi_clk_softc *sc) { int error; /* Create clock domain */ sc->clkdom = clkdom_create(sc->dev); if (sc->clkdom == NULL) return (ENXIO); error = scmi_clk_discover(sc); if (error) { device_printf(sc->dev, "Could not discover clocks.\n"); return (ENXIO); } error = clkdom_finit(sc->clkdom); if (error) { device_printf(sc->dev, "Failed to init clock domain.\n"); return (ENXIO); } return (0); } static int scmi_clk_probe(device_t dev) { phandle_t node; uint32_t reg; int error; node = ofw_bus_get_node(dev); error = OF_getencprop(node, "reg", ®, sizeof(uint32_t)); if (error < 0) return (ENXIO); if (reg != SCMI_PROTOCOL_ID_CLOCK) return (ENXIO); device_set_desc(dev, "SCMI Clock Management Unit"); return (BUS_PROBE_DEFAULT); } static int scmi_clk_attach(device_t dev) { struct scmi_clk_softc *sc; phandle_t node; sc = device_get_softc(dev); sc->dev = dev; sc->scmi = device_get_parent(dev); node = ofw_bus_get_node(sc->dev); OF_device_register_xref(OF_xref_from_node(node), sc->dev); scmi_clk_init(sc); return (0); } static int scmi_clk_detach(device_t dev) { return (0); } static device_method_t scmi_clk_methods[] = { /* Device interface */ DEVMETHOD(device_probe, scmi_clk_probe), DEVMETHOD(device_attach, scmi_clk_attach), DEVMETHOD(device_detach, scmi_clk_detach), DEVMETHOD_END }; static driver_t scmi_clk_driver = { "scmi_clk", scmi_clk_methods, sizeof(struct scmi_clk_softc), }; EARLY_DRIVER_MODULE(scmi_clk, scmi, scmi_clk_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(scmi_clk, 1); diff --git a/sys/dev/flash/flexspi/flex_spi.c b/sys/dev/flash/flexspi/flex_spi.c index 106a7845b2ad..766a1cfaa332 100644 --- a/sys/dev/flash/flexspi/flex_spi.c +++ b/sys/dev/flash/flexspi/flex_spi.c @@ -1,989 +1,989 @@ /*- * Copyright (c) 2021 Alstom Group. * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "flex_spi.h" static MALLOC_DEFINE(SECTOR_BUFFER, "flex_spi", "FSL QSPI sector buffer memory"); #define AHB_LUT_ID 31 #define MHZ(x) ((x)*1000*1000) #define SPI_DEFAULT_CLK_RATE (MHZ(10)) static int driver_flags = 0; SYSCTL_NODE(_hw, OID_AUTO, flex_spi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "FlexSPI driver parameters"); SYSCTL_INT(_hw_flex_spi, OID_AUTO, driver_flags, CTLFLAG_RDTUN, &driver_flags, 0, "Configuration flags and quirks"); static struct ofw_compat_data flex_spi_compat_data[] = { {"nxp,lx2160a-fspi", true}, {NULL, false} }; struct flex_spi_flash_info { char* name; uint32_t jedecid; uint32_t sectorsize; uint32_t sectorcount; uint32_t erasesize; uint32_t maxclk; }; /* Add information about supported Flashes. TODO: use SFDP instead */ static struct flex_spi_flash_info flex_spi_flash_info[] = { {"W25Q128JW", 0x001860ef, 64*1024, 256, 4096, MHZ(100)}, {NULL, 0, 0, 0, 0, 0} }; struct flex_spi_softc { device_t dev; unsigned int flags; struct bio_queue_head bio_queue; struct mtx disk_mtx; struct disk *disk; struct proc *p; unsigned int taskstate; uint8_t *buf; struct resource *ahb_mem_res; struct resource *mem_res; clk_t fspi_clk_en; clk_t fspi_clk; uint64_t fspi_clk_en_hz; uint64_t fspi_clk_hz; /* TODO: support more than one Flash per bus */ uint64_t fspi_max_clk; uint32_t quirks; /* Flash parameters */ uint32_t sectorsize; uint32_t sectorcount; uint32_t erasesize; }; static int flex_spi_read(struct flex_spi_softc *sc, off_t offset, caddr_t data, size_t count); static int flex_spi_write(struct flex_spi_softc *sc, off_t offset, uint8_t *data, size_t size); static int flex_spi_attach(device_t dev); static int flex_spi_probe(device_t dev); static int flex_spi_detach(device_t dev); /* disk routines */ static int flex_spi_open(struct disk *dp); static int flex_spi_close(struct disk *dp); static int flex_spi_ioctl(struct disk *, u_long, void *, int, struct thread *); static void flex_spi_strategy(struct bio *bp); static int flex_spi_getattr(struct bio *bp); static void flex_spi_task(void *arg); static uint32_t read_reg(struct flex_spi_softc *sc, uint32_t offset) { return ((bus_read_4(sc->mem_res, offset))); } static void write_reg(struct flex_spi_softc *sc, uint32_t offset, uint32_t value) { bus_write_4(sc->mem_res, offset, (value)); } static int reg_read_poll_tout(struct flex_spi_softc *sc, uint32_t offset, uint32_t mask, uint32_t delay_us, uint32_t iterations, bool positive) { uint32_t reg; uint32_t condition = 0; do { reg = read_reg(sc, offset); if (positive) condition = ((reg & mask) == 0); else condition = ((reg & mask) != 0); if (condition == 0) break; DELAY(delay_us); } while (condition && (--iterations > 0)); return (condition != 0); } static int flex_spi_clk_setup(struct flex_spi_softc *sc, uint32_t rate) { int ret = 0; /* disable to avoid glitching */ ret |= clk_disable(sc->fspi_clk_en); ret |= clk_disable(sc->fspi_clk); ret |= clk_set_freq(sc->fspi_clk, rate, 0); sc->fspi_clk_hz = rate; /* enable clocks back */ ret |= clk_enable(sc->fspi_clk_en); ret |= clk_enable(sc->fspi_clk); if (ret) return (EINVAL); return (0); } static void flex_spi_prepare_lut(struct flex_spi_softc *sc, uint8_t op) { uint32_t lut_id; uint32_t lut; /* unlock LUT */ write_reg(sc, FSPI_LUTKEY, FSPI_LUTKEY_VALUE); write_reg(sc, FSPI_LCKCR, FSPI_LCKER_UNLOCK); /* Read JEDEC ID */ lut_id = 0; switch (op) { case LUT_FLASH_CMD_JEDECID: lut = LUT_DEF(0, LUT_CMD, LUT_PAD(1), FSPI_CMD_READ_IDENT); lut |= LUT_DEF(1, LUT_NXP_READ, LUT_PAD(1), 0); write_reg(sc, FSPI_LUT_REG(lut_id), lut); write_reg(sc, FSPI_LUT_REG(lut_id) + 4, 0); break; case LUT_FLASH_CMD_READ: lut = LUT_DEF(0, LUT_CMD, LUT_PAD(1), FSPI_CMD_FAST_READ); lut |= LUT_DEF(1, LUT_ADDR, LUT_PAD(1), 3*8); write_reg(sc, FSPI_LUT_REG(lut_id), lut); lut = LUT_DEF(0, LUT_DUMMY, LUT_PAD(1), 1*8); lut |= LUT_DEF(1, LUT_NXP_READ, LUT_PAD(1), 0); write_reg(sc, FSPI_LUT_REG(lut_id) + 4, lut); write_reg(sc, FSPI_LUT_REG(lut_id) + 8, 0); break; case LUT_FLASH_CMD_STATUS_READ: lut = LUT_DEF(0, LUT_CMD, LUT_PAD(1), FSPI_CMD_READ_STATUS); lut |= LUT_DEF(1, LUT_NXP_READ, LUT_PAD(1), 0); write_reg(sc, FSPI_LUT_REG(lut_id), lut); write_reg(sc, FSPI_LUT_REG(lut_id) + 4, 0); break; case LUT_FLASH_CMD_PAGE_PROGRAM: lut = LUT_DEF(0, LUT_CMD, LUT_PAD(1), FSPI_CMD_PAGE_PROGRAM); lut |= LUT_DEF(1, LUT_ADDR, LUT_PAD(1), 3*8); write_reg(sc, FSPI_LUT_REG(lut_id), lut); lut = LUT_DEF(0, LUT_NXP_WRITE, LUT_PAD(1), 0); write_reg(sc, FSPI_LUT_REG(lut_id) + 4, lut); write_reg(sc, FSPI_LUT_REG(lut_id) + 8, 0); break; case LUT_FLASH_CMD_WRITE_ENABLE: lut = LUT_DEF(0, LUT_CMD, LUT_PAD(1), FSPI_CMD_WRITE_ENABLE); write_reg(sc, FSPI_LUT_REG(lut_id), lut); write_reg(sc, FSPI_LUT_REG(lut_id) + 4, 0); break; case LUT_FLASH_CMD_WRITE_DISABLE: lut = LUT_DEF(0, LUT_CMD, LUT_PAD(1), FSPI_CMD_WRITE_DISABLE); write_reg(sc, FSPI_LUT_REG(lut_id), lut); write_reg(sc, FSPI_LUT_REG(lut_id) + 4, 0); break; case LUT_FLASH_CMD_SECTOR_ERASE: lut = LUT_DEF(0, LUT_CMD, LUT_PAD(1), FSPI_CMD_SECTOR_ERASE); lut |= LUT_DEF(1, LUT_ADDR, LUT_PAD(1), 3*8); write_reg(sc, FSPI_LUT_REG(lut_id), lut); write_reg(sc, FSPI_LUT_REG(lut_id) + 4, 0); break; default: write_reg(sc, FSPI_LUT_REG(lut_id), 0); } /* lock LUT */ write_reg(sc, FSPI_LUTKEY, FSPI_LUTKEY_VALUE); write_reg(sc, FSPI_LCKCR, FSPI_LCKER_LOCK); } static void flex_spi_prepare_ahb_lut(struct flex_spi_softc *sc) { uint32_t lut_id; uint32_t lut; /* unlock LUT */ write_reg(sc, FSPI_LUTKEY, FSPI_LUTKEY_VALUE); write_reg(sc, FSPI_LCKCR, FSPI_LCKER_UNLOCK); lut_id = AHB_LUT_ID; lut = LUT_DEF(0, LUT_CMD, LUT_PAD(1), FSPI_CMD_FAST_READ); lut |= LUT_DEF(1, LUT_ADDR, LUT_PAD(1), 3*8); write_reg(sc, FSPI_LUT_REG(lut_id), lut); lut = LUT_DEF(0, LUT_DUMMY, LUT_PAD(1), 1*8); lut |= LUT_DEF(1, LUT_NXP_READ, LUT_PAD(1), 0); write_reg(sc, FSPI_LUT_REG(lut_id) + 4, lut); write_reg(sc, FSPI_LUT_REG(lut_id) + 8, 0); /* lock LUT */ write_reg(sc, FSPI_LUTKEY, FSPI_LUTKEY_VALUE); write_reg(sc, FSPI_LCKCR, FSPI_LCKER_LOCK); } #define DIR_READ 0 #define DIR_WRITE 1 static void flex_spi_read_rxfifo(struct flex_spi_softc *sc, uint8_t *buf, uint8_t size) { int i, ret, reg; /* * Default value of water mark level is 8 bytes, hence in single * read request controller can read max 8 bytes of data. */ for (i = 0; i < size; i += 4) { /* Wait for RXFIFO available */ if (i % 8 == 0) { ret = reg_read_poll_tout(sc, FSPI_INTR, FSPI_INTR_IPRXWA, 1, 50000, 1); if (ret) device_printf(sc->dev, "timed out waiting for FSPI_INTR_IPRXWA\n"); } if (i % 8 == 0) reg = read_reg(sc, FSPI_RFDR); else reg = read_reg(sc, FSPI_RFDR + 4); if (size >= (i + 4)) *(uint32_t *)(buf + i) = reg; else memcpy(buf + i, ®, size - i); /* move the FIFO pointer */ if (i % 8 != 0) write_reg(sc, FSPI_INTR, FSPI_INTR_IPRXWA); } /* invalid the RXFIFO */ write_reg(sc, FSPI_IPRXFCR, FSPI_IPRXFCR_CLR); /* move the FIFO pointer */ write_reg(sc, FSPI_INTR, FSPI_INTR_IPRXWA); } static void flex_spi_write_txfifo(struct flex_spi_softc *sc, uint8_t *buf, uint8_t size) { int i, ret, reg; /* invalid the TXFIFO */ write_reg(sc, FSPI_IPRXFCR, FSPI_IPTXFCR_CLR); /* * Default value of water mark level is 8 bytes, hence in single * read request controller can read max 8 bytes of data. */ for (i = 0; i < size; i += 4) { /* Wait for RXFIFO available */ if (i % 8 == 0) { ret = reg_read_poll_tout(sc, FSPI_INTR, FSPI_INTR_IPTXWE, 1, 50000, 1); if (ret) device_printf(sc->dev, "timed out waiting for FSPI_INTR_IPRXWA\n"); } if (size >= (i + 4)) reg = *(uint32_t *)(buf + i); else { reg = 0; memcpy(®, buf + i, size - i); } if (i % 8 == 0) write_reg(sc, FSPI_TFDR, reg); else write_reg(sc, FSPI_TFDR + 4, reg); /* move the FIFO pointer */ if (i % 8 != 0) write_reg(sc, FSPI_INTR, FSPI_INTR_IPTXWE); } /* move the FIFO pointer */ write_reg(sc, FSPI_INTR, FSPI_INTR_IPTXWE); } static int flex_spi_do_op(struct flex_spi_softc *sc, uint32_t op, uint32_t addr, uint8_t *buf, uint8_t size, uint8_t dir) { uint32_t cnt = 1000, reg; reg = read_reg(sc, FSPI_IPRXFCR); /* invalidate RXFIFO first */ reg &= ~FSPI_IPRXFCR_DMA_EN; reg |= FSPI_IPRXFCR_CLR; write_reg(sc, FSPI_IPRXFCR, reg); /* Prepare LUT */ flex_spi_prepare_lut(sc, op); write_reg(sc, FSPI_IPCR0, addr); /* * Always start the sequence at the same index since we update * the LUT at each BIO operation. And also specify the DATA * length, since it's has not been specified in the LUT. */ write_reg(sc, FSPI_IPCR1, size | (0 << FSPI_IPCR1_SEQID_SHIFT) | (0 << FSPI_IPCR1_SEQNUM_SHIFT)); if ((size != 0) && (dir == DIR_WRITE)) flex_spi_write_txfifo(sc, buf, size); /* Trigger the LUT now. */ write_reg(sc, FSPI_IPCMD, FSPI_IPCMD_TRG); /* Wait for completion. */ do { reg = read_reg(sc, FSPI_INTR); if (reg & FSPI_INTR_IPCMDDONE) { write_reg(sc, FSPI_INTR, FSPI_INTR_IPCMDDONE); break; } DELAY(1); } while (--cnt); if (cnt == 0) { device_printf(sc->dev, "timed out waiting for command completion\n"); return (ETIMEDOUT); } /* Invoke IP data read, if request is of data read. */ if ((size != 0) && (dir == DIR_READ)) flex_spi_read_rxfifo(sc, buf, size); return (0); } static int flex_spi_wait_for_controller(struct flex_spi_softc *sc) { int err; /* Wait for controller being ready. */ err = reg_read_poll_tout(sc, FSPI_STS0, FSPI_STS0_ARB_IDLE, 1, POLL_TOUT, 1); return (err); } static int flex_spi_wait_for_flash(struct flex_spi_softc *sc) { int ret; uint32_t status = 0; ret = flex_spi_wait_for_controller(sc); if (ret != 0) { device_printf(sc->dev, "%s: timed out waiting for controller", __func__); return (ret); } do { ret = flex_spi_do_op(sc, LUT_FLASH_CMD_STATUS_READ, 0, (void*)&status, 1, DIR_READ); if (ret != 0) { device_printf(sc->dev, "ERROR: failed to get flash status\n"); return (ret); } } while (status & STATUS_WIP); return (0); } static int flex_spi_identify(struct flex_spi_softc *sc) { int ret; uint32_t id = 0; struct flex_spi_flash_info *finfo = flex_spi_flash_info; ret = flex_spi_do_op(sc, LUT_FLASH_CMD_JEDECID, 0, (void*)&id, sizeof(id), DIR_READ); if (ret != 0) { device_printf(sc->dev, "ERROR: failed to identify device\n"); return (ret); } /* XXX TODO: SFDP to be implemented */ while (finfo->jedecid != 0) { if (id == finfo->jedecid) { device_printf(sc->dev, "found %s Flash\n", finfo->name); sc->sectorsize = finfo->sectorsize; sc->sectorcount = finfo->sectorcount; sc->erasesize = finfo->erasesize; sc->fspi_max_clk = finfo->maxclk; return (0); } finfo++; } return (EINVAL); } static inline int flex_spi_force_ip_mode(struct flex_spi_softc *sc) { if (sc->quirks & FSPI_QUIRK_USE_IP_ONLY) return (1); if (driver_flags & FSPI_QUIRK_USE_IP_ONLY) return (1); return (0); } static int flex_spi_read(struct flex_spi_softc *sc, off_t offset, caddr_t data, size_t count) { int err; size_t len; /* Wait for controller being ready. */ err = flex_spi_wait_for_controller(sc); if (err) device_printf(sc->dev, "warning: spi_read, timed out waiting for controller"); /* Use AHB access whenever we can */ if (flex_spi_force_ip_mode(sc) != 0) { do { if (((offset % 4) != 0) || (count < 4)) { *(uint8_t*)data = bus_read_1(sc->ahb_mem_res, offset); data++; count--; offset++; } else { *(uint32_t*)data = bus_read_4(sc->ahb_mem_res, offset); data += 4; count -= 4; offset += 4; } } while (count); return (0); } do { len = min(64, count); err = flex_spi_do_op(sc, LUT_FLASH_CMD_READ, offset, (void*)data, len, DIR_READ); if (err) return (err); offset += len; data += len; count -= len; } while (count); return (0); } static int flex_spi_write(struct flex_spi_softc *sc, off_t offset, uint8_t *data, size_t size) { int ret = 0; size_t ptr; flex_spi_wait_for_flash(sc); ret = flex_spi_do_op(sc, LUT_FLASH_CMD_WRITE_ENABLE, offset, NULL, 0, DIR_READ); if (ret != 0) { device_printf(sc->dev, "ERROR: failed to enable writes\n"); return (ret); } flex_spi_wait_for_flash(sc); /* per-sector write */ while (size > 0) { uint32_t sector_base = rounddown2(offset, sc->erasesize); size_t size_in_sector = size; if (size_in_sector + offset > sector_base + sc->erasesize) size_in_sector = sector_base + sc->erasesize - offset; /* Read sector */ ret = flex_spi_read(sc, sector_base, sc->buf, sc->erasesize); if (ret != 0) { device_printf(sc->dev, "ERROR: failed to read sector %d\n", sector_base); goto exit; } /* Erase sector */ flex_spi_wait_for_flash(sc); ret = flex_spi_do_op(sc, LUT_FLASH_CMD_SECTOR_ERASE, offset, NULL, 0, DIR_READ); if (ret != 0) { device_printf(sc->dev, "ERROR: failed to erase sector %d\n", sector_base); goto exit; } /* Update buffer with input data */ memcpy(sc->buf + (offset - sector_base), data, size_in_sector); /* Write buffer back to the flash * Up to 32 bytes per single request, request cannot spread * across 256-byte page boundary */ for (ptr = 0; ptr < sc->erasesize; ptr += 32) { flex_spi_wait_for_flash(sc); ret = flex_spi_do_op(sc, LUT_FLASH_CMD_PAGE_PROGRAM, sector_base + ptr, (void*)(sc->buf + ptr), 32, DIR_WRITE); if (ret != 0) { device_printf(sc->dev, "ERROR: failed to write address %ld\n", sector_base + ptr); goto exit; } } /* update pointers */ size = size - size_in_sector; offset = offset + size; } flex_spi_wait_for_flash(sc); ret = flex_spi_do_op(sc, LUT_FLASH_CMD_WRITE_DISABLE, offset, (void*)sc->buf, 0, DIR_READ); if (ret != 0) { device_printf(sc->dev, "ERROR: failed to disable writes\n"); goto exit; } flex_spi_wait_for_flash(sc); exit: return (ret); } static int flex_spi_default_setup(struct flex_spi_softc *sc) { int ret, i; uint32_t reg; /* Default clock speed */ ret = flex_spi_clk_setup(sc, SPI_DEFAULT_CLK_RATE); if (ret) return (ret); /* Reset the module */ /* w1c register, wait unit clear */ reg = read_reg(sc, FSPI_MCR0); reg |= FSPI_MCR0_SWRST; write_reg(sc, FSPI_MCR0, reg); ret = reg_read_poll_tout(sc, FSPI_MCR0, FSPI_MCR0_SWRST, 1000, POLL_TOUT, 0); if (ret != 0) { device_printf(sc->dev, "time out waiting for reset"); return (ret); } /* Disable the module */ write_reg(sc, FSPI_MCR0, FSPI_MCR0_MDIS); /* Reset the DLL register to default value */ write_reg(sc, FSPI_DLLACR, FSPI_DLLACR_OVRDEN); write_reg(sc, FSPI_DLLBCR, FSPI_DLLBCR_OVRDEN); /* enable module */ write_reg(sc, FSPI_MCR0, FSPI_MCR0_AHB_TIMEOUT(0xFF) | FSPI_MCR0_IP_TIMEOUT(0xFF) | (uint32_t) FSPI_MCR0_OCTCOMB_EN); /* * Disable same device enable bit and configure all slave devices * independently. */ reg = read_reg(sc, FSPI_MCR2); reg = reg & ~(FSPI_MCR2_SAMEDEVICEEN); write_reg(sc, FSPI_MCR2, reg); /* AHB configuration for access buffer 0~7. */ for (i = 0; i < 7; i++) write_reg(sc, FSPI_AHBRX_BUF0CR0 + 4 * i, 0); /* * Set ADATSZ with the maximum AHB buffer size to improve the read * performance. */ write_reg(sc, FSPI_AHBRX_BUF7CR0, (2048 / 8 | FSPI_AHBRXBUF0CR7_PREF)); /* prefetch and no start address alignment limitation */ write_reg(sc, FSPI_AHBCR, FSPI_AHBCR_PREF_EN | FSPI_AHBCR_RDADDROPT); /* AHB Read - Set lut sequence ID for all CS. */ flex_spi_prepare_ahb_lut(sc); write_reg(sc, FSPI_FLSHA1CR2, AHB_LUT_ID); write_reg(sc, FSPI_FLSHA2CR2, AHB_LUT_ID); write_reg(sc, FSPI_FLSHB1CR2, AHB_LUT_ID); write_reg(sc, FSPI_FLSHB2CR2, AHB_LUT_ID); /* disable interrupts */ write_reg(sc, FSPI_INTEN, 0); return (0); } static int flex_spi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, flex_spi_compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "NXP FlexSPI Flash"); return (BUS_PROBE_SPECIFIC); } static int flex_spi_attach(device_t dev) { struct flex_spi_softc *sc; phandle_t node; int rid; uint32_t reg; node = ofw_bus_get_node(dev); sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->disk_mtx, "flex_spi_DISK", "QSPI disk mtx", MTX_DEF); /* Get memory resources. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); rid = 1; sc->ahb_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->mem_res == NULL || sc->ahb_mem_res == NULL) { device_printf(dev, "could not allocate resources\n"); flex_spi_detach(dev); return (ENOMEM); } /* Get clocks */ if ((clk_get_by_ofw_name(dev, node, "fspi_en", &sc->fspi_clk_en) != 0) || (clk_get_freq(sc->fspi_clk_en, &sc->fspi_clk_en_hz) != 0)) { device_printf(dev, "could not get fspi_en clock\n"); flex_spi_detach(dev); return (EINVAL); } if ((clk_get_by_ofw_name(dev, node, "fspi", &sc->fspi_clk) != 0) || (clk_get_freq(sc->fspi_clk, &sc->fspi_clk_hz) != 0)) { device_printf(dev, "could not get fspi clock\n"); flex_spi_detach(dev); return (EINVAL); } /* Enable clocks */ if (clk_enable(sc->fspi_clk_en) != 0 || clk_enable(sc->fspi_clk) != 0) { device_printf(dev, "could not enable clocks\n"); flex_spi_detach(dev); return (EINVAL); } /* Clear potential interrupts */ reg = read_reg(sc, FSPI_INTR); if (reg) write_reg(sc, FSPI_INTR, reg); /* Default setup */ if (flex_spi_default_setup(sc) != 0) { device_printf(sc->dev, "Unable to initialize defaults\n"); flex_spi_detach(dev); return (ENXIO); } /* Identify attached Flash */ if(flex_spi_identify(sc) != 0) { device_printf(sc->dev, "Unable to identify Flash\n"); flex_spi_detach(dev); return (ENXIO); } if (flex_spi_clk_setup(sc, sc->fspi_max_clk) != 0) { device_printf(sc->dev, "Unable to set up SPI max clock\n"); flex_spi_detach(dev); return (ENXIO); } sc->buf = malloc(sc->erasesize, SECTOR_BUFFER, M_WAITOK); if (sc->buf == NULL) { device_printf(sc->dev, "Unable to set up allocate internal buffer\n"); flex_spi_detach(dev); return (ENOMEM); } /* Move it to per-flash */ sc->disk = disk_alloc(); sc->disk->d_open = flex_spi_open; sc->disk->d_close = flex_spi_close; sc->disk->d_strategy = flex_spi_strategy; sc->disk->d_getattr = flex_spi_getattr; sc->disk->d_ioctl = flex_spi_ioctl; sc->disk->d_name = "flash/qspi"; sc->disk->d_drv1 = sc; /* the most that can fit in a single spi transaction */ sc->disk->d_maxsize = DFLTPHYS; sc->disk->d_sectorsize = FLASH_SECTORSIZE; sc->disk->d_unit = device_get_unit(sc->dev); sc->disk->d_dump = NULL; sc->disk->d_mediasize = sc->sectorsize * sc->sectorcount; sc->disk->d_stripesize = sc->erasesize; bioq_init(&sc->bio_queue); sc->taskstate = TSTATE_RUNNING; kproc_create(&flex_spi_task, sc, &sc->p, 0, 0, "task: qspi flash"); disk_create(sc->disk, DISK_VERSION); return (0); } static int flex_spi_detach(device_t dev) { struct flex_spi_softc *sc; int err; sc = device_get_softc(dev); err = 0; if (!device_is_attached(dev)) goto free_resources; mtx_lock(&sc->disk_mtx); if (sc->taskstate == TSTATE_RUNNING) { sc->taskstate = TSTATE_STOPPING; wakeup(sc->disk); while (err == 0 && sc->taskstate != TSTATE_STOPPED) { err = mtx_sleep(sc->disk, &sc->disk_mtx, 0, "flex_spi", hz * 3); if (err != 0) { sc->taskstate = TSTATE_RUNNING; device_printf(sc->dev, "Failed to stop queue task\n"); } } } mtx_unlock(&sc->disk_mtx); mtx_destroy(&sc->disk_mtx); if (err == 0 && sc->taskstate == TSTATE_STOPPED) { disk_destroy(sc->disk); bioq_flush(&sc->bio_queue, NULL, ENXIO); } /* Disable hardware. */ free_resources: /* Release memory resource. */ if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); if (sc->ahb_mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->ahb_mem_res), sc->ahb_mem_res); /* Disable clocks */ if (sc->fspi_clk_en_hz) clk_disable(sc->fspi_clk_en); if (sc->fspi_clk_hz) clk_disable(sc->fspi_clk); free(sc->buf, SECTOR_BUFFER); return (err); } static int flex_spi_open(struct disk *dp) { return (0); } static int flex_spi_close(struct disk *dp) { return (0); } static int flex_spi_ioctl(struct disk *dp, u_long cmd, void *data, int fflag, struct thread *td) { return (ENOTSUP); } static void flex_spi_strategy(struct bio *bp) { struct flex_spi_softc *sc; sc = (struct flex_spi_softc *)bp->bio_disk->d_drv1; mtx_lock(&sc->disk_mtx); bioq_disksort(&sc->bio_queue, bp); mtx_unlock(&sc->disk_mtx); wakeup(sc->disk); } static int flex_spi_getattr(struct bio *bp) { struct flex_spi_softc *sc; device_t dev; if (bp->bio_disk == NULL || bp->bio_disk->d_drv1 == NULL) { return (ENXIO); } sc = bp->bio_disk->d_drv1; dev = sc->dev; if (strcmp(bp->bio_attribute, "SPI::device") != 0) { return (-1); } if (bp->bio_length != sizeof(dev)) { return (EFAULT); } bcopy(&dev, bp->bio_data, sizeof(dev)); return (0); } static void flex_spi_task(void *arg) { struct flex_spi_softc *sc; struct bio *bp; sc = (struct flex_spi_softc *)arg; for (;;) { mtx_lock(&sc->disk_mtx); do { if (sc->taskstate == TSTATE_STOPPING) { sc->taskstate = TSTATE_STOPPED; mtx_unlock(&sc->disk_mtx); wakeup(sc->disk); kproc_exit(0); } bp = bioq_first(&sc->bio_queue); if (bp == NULL) mtx_sleep(sc->disk, &sc->disk_mtx, PRIBIO, "flex_spi", 0); } while (bp == NULL); bioq_remove(&sc->bio_queue, bp); mtx_unlock(&sc->disk_mtx); switch (bp->bio_cmd) { case BIO_READ: bp->bio_error = flex_spi_read(sc, bp->bio_offset, bp->bio_data, bp->bio_bcount); break; case BIO_WRITE: bp->bio_error = flex_spi_write(sc, bp->bio_offset, bp->bio_data, bp->bio_bcount); break; default: bp->bio_error = EINVAL; } biodone(bp); } } static device_method_t flex_spi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, flex_spi_probe), DEVMETHOD(device_attach, flex_spi_attach), DEVMETHOD(device_detach, flex_spi_detach), { 0, 0 } }; static driver_t flex_spi_driver = { "flex_spi", flex_spi_methods, sizeof(struct flex_spi_softc), }; DRIVER_MODULE(flex_spi, simplebus, flex_spi_driver, 0, 0); SIMPLEBUS_PNP_INFO(flex_spi_compat_data); diff --git a/sys/dev/hdmi/dwc_hdmi_fdt.c b/sys/dev/hdmi/dwc_hdmi_fdt.c index d8cd1a3cba8f..927a6a15427d 100644 --- a/sys/dev/hdmi/dwc_hdmi_fdt.c +++ b/sys/dev/hdmi/dwc_hdmi_fdt.c @@ -1,197 +1,197 @@ /*- * Copyright (c) 2015 Oleksandr Tymoshenko * Copyright (c) 2016 Jared McNeill * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * HDMI core module */ #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "crtc_if.h" struct dwc_hdmi_fdt_softc { struct dwc_hdmi_softc base; clk_t clk_hdmi; clk_t clk_ahb; phandle_t i2c_xref; }; static struct ofw_compat_data compat_data[] = { { "synopsys,dwc-hdmi", 1 }, { NULL, 0 } }; static device_t dwc_hdmi_fdt_get_i2c_dev(device_t dev) { struct dwc_hdmi_fdt_softc *sc; sc = device_get_softc(dev); if (sc->i2c_xref == 0) return (NULL); return (OF_device_from_xref(sc->i2c_xref)); } static int dwc_hdmi_fdt_detach(device_t dev) { struct dwc_hdmi_fdt_softc *sc; sc = device_get_softc(dev); if (sc->clk_ahb != NULL) clk_release(sc->clk_ahb); if (sc->clk_hdmi != NULL) clk_release(sc->clk_hdmi); if (sc->base.sc_mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, sc->base.sc_mem_rid, sc->base.sc_mem_res); return (0); } static int dwc_hdmi_fdt_attach(device_t dev) { struct dwc_hdmi_fdt_softc *sc; phandle_t node, i2c_xref; uint32_t freq; int err; sc = device_get_softc(dev); sc->base.sc_dev = dev; sc->base.sc_get_i2c_dev = dwc_hdmi_fdt_get_i2c_dev; err = 0; /* Allocate memory resources. */ sc->base.sc_mem_rid = 0; sc->base.sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->base.sc_mem_rid, RF_ACTIVE); if (sc->base.sc_mem_res == NULL) { device_printf(dev, "Cannot allocate memory resources\n"); err = ENXIO; goto out; } node = ofw_bus_get_node(dev); if (OF_getencprop(node, "ddc", &i2c_xref, sizeof(i2c_xref)) == -1) sc->i2c_xref = 0; else sc->i2c_xref = i2c_xref; if (OF_getencprop(node, "reg-shift", &sc->base.sc_reg_shift, sizeof(sc->base.sc_reg_shift)) <= 0) sc->base.sc_reg_shift = 0; if (clk_get_by_ofw_name(dev, 0, "hdmi", &sc->clk_hdmi) != 0 || clk_get_by_ofw_name(dev, 0, "ahb", &sc->clk_ahb) != 0) { device_printf(dev, "Cannot get clocks\n"); err = ENXIO; goto out; } if (OF_getencprop(node, "clock-frequency", &freq, sizeof(freq)) > 0) { err = clk_set_freq(sc->clk_hdmi, freq, CLK_SET_ROUND_DOWN); if (err != 0) { device_printf(dev, "Cannot set HDMI clock frequency to %u Hz\n", freq); goto out; } } else device_printf(dev, "HDMI clock frequency not specified\n"); if (clk_enable(sc->clk_hdmi) != 0) { device_printf(dev, "Cannot enable HDMI clock\n"); err = ENXIO; goto out; } if (clk_enable(sc->clk_ahb) != 0) { device_printf(dev, "Cannot enable AHB clock\n"); err = ENXIO; goto out; } return (dwc_hdmi_init(dev)); out: dwc_hdmi_fdt_detach(dev); return (err); } static int dwc_hdmi_fdt_probe(device_t dev) { if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Synopsys DesignWare HDMI Controller"); return (BUS_PROBE_DEFAULT); } static device_method_t dwc_hdmi_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dwc_hdmi_fdt_probe), DEVMETHOD(device_attach, dwc_hdmi_fdt_attach), DEVMETHOD(device_detach, dwc_hdmi_fdt_detach), /* HDMI methods */ DEVMETHOD(hdmi_get_edid, dwc_hdmi_get_edid), DEVMETHOD(hdmi_set_videomode, dwc_hdmi_set_videomode), DEVMETHOD_END }; static driver_t dwc_hdmi_fdt_driver = { "dwc_hdmi", dwc_hdmi_fdt_methods, sizeof(struct dwc_hdmi_fdt_softc) }; DRIVER_MODULE(dwc_hdmi_fdt, simplebus, dwc_hdmi_fdt_driver, 0, 0); diff --git a/sys/dev/iicbus/controller/cadence/cdnc_i2c.c b/sys/dev/iicbus/controller/cadence/cdnc_i2c.c index fff9c57184d7..61f4975c10de 100644 --- a/sys/dev/iicbus/controller/cadence/cdnc_i2c.c +++ b/sys/dev/iicbus/controller/cadence/cdnc_i2c.c @@ -1,706 +1,706 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019-2020 Thomas Skibo * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Cadence / Zynq i2c driver. * * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. * (v1.12.2) July 1, 2018. Xilinx doc UG585. I2C Controller is documented * in Chapter 20. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include "iicbus_if.h" #ifdef I2CDEBUG #define DPRINTF(...) do { printf(__VA_ARGS__); } while (0) #else #define DPRINTF(...) do { } while (0) #endif #if 0 #define HWTYPE_CDNS_R1P10 1 #endif #define HWTYPE_CDNS_R1P14 2 static struct ofw_compat_data compat_data[] = { #if 0 {"cdns,i2c-r1p10", HWTYPE_CDNS_R1P10}, #endif {"cdns,i2c-r1p14", HWTYPE_CDNS_R1P14}, {NULL, 0} }; struct cdnc_i2c_softc { device_t dev; device_t iicbus; struct mtx sc_mtx; struct resource *mem_res; struct resource *irq_res; void *intrhandle; uint16_t cfg_reg_shadow; uint16_t istat; clk_t ref_clk; uint32_t ref_clock_freq; uint32_t i2c_clock_freq; int hwtype; int hold; /* sysctls */ unsigned int i2c_clk_real_freq; unsigned int interrupts; unsigned int timeout_ints; }; #define I2C_SC_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define I2C_SC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define I2C_SC_LOCK_INIT(sc) \ mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), NULL, MTX_DEF) #define I2C_SC_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx) #define I2C_SC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) #define RD2(sc, off) (bus_read_2((sc)->mem_res, (off))) #define WR2(sc, off, val) (bus_write_2((sc)->mem_res, (off), (val))) #define RD1(sc, off) (bus_read_1((sc)->mem_res, (off))) #define WR1(sc, off, val) (bus_write_1((sc)->mem_res, (off), (val))) /* Cadence I2C controller device registers. */ #define CDNC_I2C_CR 0x0000 /* Config register. */ #define CDNC_I2C_CR_DIV_A_MASK (3 << 14) #define CDNC_I2C_CR_DIV_A_SHIFT 14 #define CDNC_I2C_CR_DIV_A(a) ((a) << 14) #define CDNC_I2C_CR_DIV_A_MAX 3 #define CDNC_I2C_CR_DIV_B_MASK (0x3f << 8) #define CDNC_I2C_CR_DIV_B_SHIFT 8 #define CDNC_I2C_CR_DIV_B(b) ((b) << 8) #define CDNC_I2C_CR_DIV_B_MAX 63 #define CDNC_I2C_CR_CLR_FIFO (1 << 6) #define CDNC_I2C_CR_SLVMON_MODE (1 << 5) #define CDNC_I2C_CR_HOLD (1 << 4) #define CDNC_I2C_CR_ACKEN (1 << 3) #define CDNC_I2C_CR_NEA (1 << 2) #define CDNC_I2C_CR_MAST (1 << 1) #define CDNC_I2C_CR_RNW (1 << 0) #define CDNC_I2C_SR 0x0004 /* Status register. */ #define CDNC_I2C_SR_BUS_ACTIVE (1 << 8) #define CDNC_I2C_SR_RX_OVF (1 << 7) #define CDNC_I2C_SR_TX_VALID (1 << 6) #define CDNC_I2C_SR_RX_VALID (1 << 5) #define CDNC_I2C_SR_RXRW (1 << 3) #define CDNC_I2C_ADDR 0x0008 /* i2c address register. */ #define CDNC_I2C_DATA 0x000C /* i2c data register. */ #define CDNC_I2C_ISR 0x0010 /* Int status register. */ #define CDNC_I2C_ISR_ARB_LOST (1 << 9) #define CDNC_I2C_ISR_RX_UNDF (1 << 7) #define CDNC_I2C_ISR_TX_OVF (1 << 6) #define CDNC_I2C_ISR_RX_OVF (1 << 5) #define CDNC_I2C_ISR_SLV_RDY (1 << 4) #define CDNC_I2C_ISR_XFER_TMOUT (1 << 3) #define CDNC_I2C_ISR_XFER_NACK (1 << 2) #define CDNC_I2C_ISR_XFER_DATA (1 << 1) #define CDNC_I2C_ISR_XFER_DONE (1 << 0) #define CDNC_I2C_ISR_ALL 0x2ff #define CDNC_I2C_TRANS_SIZE 0x0014 /* Transfer size. */ #define CDNC_I2C_PAUSE 0x0018 /* Slv Monitor Pause reg. */ #define CDNC_I2C_TIME_OUT 0x001C /* Time-out register. */ #define CDNC_I2C_TIME_OUT_MIN 31 #define CDNC_I2C_TIME_OUT_MAX 255 #define CDNC_I2C_IMR 0x0020 /* Int mask register. */ #define CDNC_I2C_IER 0x0024 /* Int enable register. */ #define CDNC_I2C_IDR 0x0028 /* Int disable register. */ #define CDNC_I2C_FIFO_SIZE 16 #define CDNC_I2C_DEFAULT_I2C_CLOCK 400000 /* 400Khz default */ #define CDNC_I2C_ISR_ERRS (CDNC_I2C_ISR_ARB_LOST | CDNC_I2C_ISR_RX_UNDF | \ CDNC_I2C_ISR_TX_OVF | CDNC_I2C_ISR_RX_OVF | CDNC_I2C_ISR_XFER_TMOUT | \ CDNC_I2C_ISR_XFER_NACK) /* Configure clock dividers. */ static int cdnc_i2c_set_freq(struct cdnc_i2c_softc *sc) { uint32_t div_a, div_b, err, clk_out; uint32_t best_div_a, best_div_b, best_err; best_div_a = 0; best_div_b = 0; best_err = ~0U; /* * The i2c controller has a two-stage clock divider to create * the "clock enable" signal used to sample the incoming SCL and * SDA signals. The Clock Enable signal is divided by 22 to create * the outgoing SCL signal. * * Try all div_a values and pick best match. */ for (div_a = 0; div_a <= CDNC_I2C_CR_DIV_A_MAX; div_a++) { div_b = sc->ref_clock_freq / (22 * sc->i2c_clock_freq * (div_a + 1)); if (div_b > CDNC_I2C_CR_DIV_B_MAX) continue; clk_out = sc->ref_clock_freq / (22 * (div_a + 1) * (div_b + 1)); err = clk_out > sc->i2c_clock_freq ? clk_out - sc->i2c_clock_freq : sc->i2c_clock_freq - clk_out; if (err < best_err) { best_err = err; best_div_a = div_a; best_div_b = div_b; } } if (best_err == ~0U) { device_printf(sc->dev, "cannot configure clock divider.\n"); return (EINVAL); /* out of range */ } clk_out = sc->ref_clock_freq / (22 * (best_div_a + 1) * (best_div_b + 1)); DPRINTF("%s: ref_clock_freq=%d i2c_clock_freq=%d\n", __func__, sc->ref_clock_freq, sc->i2c_clock_freq); DPRINTF("%s: div_a=%d div_b=%d real-freq=%d\n", __func__, best_div_a, best_div_b, clk_out); sc->cfg_reg_shadow &= ~(CDNC_I2C_CR_DIV_A_MASK | CDNC_I2C_CR_DIV_B_MASK); sc->cfg_reg_shadow |= CDNC_I2C_CR_DIV_A(best_div_a) | CDNC_I2C_CR_DIV_B(best_div_b); WR2(sc, CDNC_I2C_CR, sc->cfg_reg_shadow); sc->i2c_clk_real_freq = clk_out; return (0); } /* Initialize hardware. */ static int cdnc_i2c_init_hw(struct cdnc_i2c_softc *sc) { /* Reset config register and clear FIFO. */ sc->cfg_reg_shadow = 0; WR2(sc, CDNC_I2C_CR, CDNC_I2C_CR_CLR_FIFO); sc->hold = 0; /* Clear and disable all interrupts. */ WR2(sc, CDNC_I2C_ISR, CDNC_I2C_ISR_ALL); WR2(sc, CDNC_I2C_IDR, CDNC_I2C_ISR_ALL); /* Max out bogus time-out register. */ WR1(sc, CDNC_I2C_TIME_OUT, CDNC_I2C_TIME_OUT_MAX); /* Set up clock dividers. */ return (cdnc_i2c_set_freq(sc)); } static int cdnc_i2c_errs(struct cdnc_i2c_softc *sc, uint16_t istat) { DPRINTF("%s: istat=0x%x\n", __func__, istat); /* XXX: clean up after errors. */ /* Reset config register and clear FIFO. */ sc->cfg_reg_shadow &= CDNC_I2C_CR_DIV_A_MASK | CDNC_I2C_CR_DIV_B_MASK; WR2(sc, CDNC_I2C_CR, sc->cfg_reg_shadow | CDNC_I2C_CR_CLR_FIFO); sc->hold = 0; if (istat & CDNC_I2C_ISR_XFER_TMOUT) return (IIC_ETIMEOUT); else if (istat & CDNC_I2C_ISR_RX_UNDF) return (IIC_EUNDERFLOW); else if (istat & (CDNC_I2C_ISR_RX_OVF | CDNC_I2C_ISR_TX_OVF)) return (IIC_EOVERFLOW); else if (istat & CDNC_I2C_ISR_XFER_NACK) return (IIC_ENOACK); else if (istat & CDNC_I2C_ISR_ARB_LOST) return (IIC_EBUSERR); /* XXX: ???? */ else /* Should not happen */ return (IIC_NOERR); } static int cdnc_i2c_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr) { struct cdnc_i2c_softc *sc = device_get_softc(dev); int error; DPRINTF("%s: speed=%d addr=0x%x\n", __func__, speed, addr); I2C_SC_LOCK(sc); sc->i2c_clock_freq = IICBUS_GET_FREQUENCY(sc->iicbus, speed); error = cdnc_i2c_init_hw(sc); I2C_SC_UNLOCK(sc); return (error ? IIC_ENOTSUPP : IIC_NOERR); } static void cdnc_i2c_intr(void *arg) { struct cdnc_i2c_softc *sc = (struct cdnc_i2c_softc *)arg; uint16_t status; I2C_SC_LOCK(sc); sc->interrupts++; /* Read active interrupts. */ status = RD2(sc, CDNC_I2C_ISR) & ~RD2(sc, CDNC_I2C_IMR); /* Clear interrupts. */ WR2(sc, CDNC_I2C_ISR, status); if (status & CDNC_I2C_ISR_XFER_TMOUT) sc->timeout_ints++; sc->istat |= status; if (status) wakeup(sc); I2C_SC_UNLOCK(sc); } static int cdnc_i2c_xfer_rd(struct cdnc_i2c_softc *sc, struct iic_msg *msg) { int error = IIC_NOERR; uint16_t flags = msg->flags; uint16_t len = msg->len; int idx = 0, nbytes, last, first = 1; uint16_t statr; DPRINTF("%s: flags=0x%x len=%d\n", __func__, flags, len); #if 0 if (sc->hwtype == HWTYPE_CDNS_R1P10 && (flags & IIC_M_NOSTOP)) return (IIC_ENOTSUPP); #endif I2C_SC_ASSERT_LOCKED(sc); /* Program config register. */ sc->cfg_reg_shadow &= CDNC_I2C_CR_DIV_A_MASK | CDNC_I2C_CR_DIV_B_MASK; sc->cfg_reg_shadow |= CDNC_I2C_CR_HOLD | CDNC_I2C_CR_ACKEN | CDNC_I2C_CR_NEA | CDNC_I2C_CR_MAST | CDNC_I2C_CR_RNW; WR2(sc, CDNC_I2C_CR, sc->cfg_reg_shadow | CDNC_I2C_CR_CLR_FIFO); sc->hold = 1; while (len > 0) { nbytes = MIN(CDNC_I2C_FIFO_SIZE - 2, len); WR1(sc, CDNC_I2C_TRANS_SIZE, nbytes); last = nbytes == len && !(flags & IIC_M_NOSTOP); if (last) { /* Clear HOLD bit on last transfer. */ sc->cfg_reg_shadow &= ~CDNC_I2C_CR_HOLD; WR2(sc, CDNC_I2C_CR, sc->cfg_reg_shadow); sc->hold = 0; } /* Writing slv address for a start or repeated start. */ if (first && !(flags & IIC_M_NOSTART)) WR2(sc, CDNC_I2C_ADDR, msg->slave >> 1); first = 0; /* Enable FIFO interrupts and wait. */ if (last) WR2(sc, CDNC_I2C_IER, CDNC_I2C_ISR_XFER_DONE | CDNC_I2C_ISR_ERRS); else WR2(sc, CDNC_I2C_IER, CDNC_I2C_ISR_XFER_DATA | CDNC_I2C_ISR_ERRS); error = mtx_sleep(sc, &sc->sc_mtx, 0, "cdi2c", hz); /* Disable FIFO interrupts. */ WR2(sc, CDNC_I2C_IDR, CDNC_I2C_ISR_XFER_DATA | CDNC_I2C_ISR_XFER_DONE | CDNC_I2C_ISR_ERRS); if (error == EWOULDBLOCK) error = cdnc_i2c_errs(sc, CDNC_I2C_ISR_XFER_TMOUT); else if (sc->istat & CDNC_I2C_ISR_ERRS) error = cdnc_i2c_errs(sc, sc->istat); sc->istat = 0; if (error != IIC_NOERR) break; /* Read nbytes from FIFO. */ while (nbytes-- > 0) { statr = RD2(sc, CDNC_I2C_SR); if (!(statr & CDNC_I2C_SR_RX_VALID)) { printf("%s: RX FIFO underflow?\n", __func__); break; } msg->buf[idx++] = RD2(sc, CDNC_I2C_DATA); len--; } } return (error); } static int cdnc_i2c_xfer_wr(struct cdnc_i2c_softc *sc, struct iic_msg *msg) { int error = IIC_NOERR; uint16_t flags = msg->flags; uint16_t len = msg->len; int idx = 0, nbytes, last, first = 1; DPRINTF("%s: flags=0x%x len=%d\n", __func__, flags, len); I2C_SC_ASSERT_LOCKED(sc); /* Program config register. */ sc->cfg_reg_shadow &= CDNC_I2C_CR_DIV_A_MASK | CDNC_I2C_CR_DIV_B_MASK; sc->cfg_reg_shadow |= CDNC_I2C_CR_HOLD | CDNC_I2C_CR_ACKEN | CDNC_I2C_CR_NEA | CDNC_I2C_CR_MAST; WR2(sc, CDNC_I2C_CR, sc->cfg_reg_shadow | CDNC_I2C_CR_CLR_FIFO); sc->hold = 1; while (len > 0) { /* Put as much data into fifo as you can. */ nbytes = MIN(len, CDNC_I2C_FIFO_SIZE - RD1(sc, CDNC_I2C_TRANS_SIZE) - 1); len -= nbytes; while (nbytes-- > 0) WR2(sc, CDNC_I2C_DATA, msg->buf[idx++]); last = len == 0 && !(flags & IIC_M_NOSTOP); if (last) { /* Clear HOLD bit on last transfer. */ sc->cfg_reg_shadow &= ~CDNC_I2C_CR_HOLD; WR2(sc, CDNC_I2C_CR, sc->cfg_reg_shadow); sc->hold = 0; } /* Perform START if this is start or repeated start. */ if (first && !(flags & IIC_M_NOSTART)) WR2(sc, CDNC_I2C_ADDR, msg->slave >> 1); first = 0; /* Enable FIFO interrupts. */ WR2(sc, CDNC_I2C_IER, CDNC_I2C_ISR_XFER_DONE | CDNC_I2C_ISR_ERRS); /* Wait for end of data transfer. */ error = mtx_sleep(sc, &sc->sc_mtx, 0, "cdi2c", hz); /* Disable FIFO interrupts. */ WR2(sc, CDNC_I2C_IDR, CDNC_I2C_ISR_XFER_DONE | CDNC_I2C_ISR_ERRS); if (error == EWOULDBLOCK) error = cdnc_i2c_errs(sc, CDNC_I2C_ISR_XFER_TMOUT); else if (sc->istat & CDNC_I2C_ISR_ERRS) error = cdnc_i2c_errs(sc, sc->istat); sc->istat = 0; if (error) break; } return (error); } static int cdnc_i2c_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs) { struct cdnc_i2c_softc *sc = device_get_softc(dev); int i, error = IIC_NOERR; DPRINTF("%s: nmsgs=%d\n", __func__, nmsgs); I2C_SC_LOCK(sc); for (i = 0; i < nmsgs; i++) { DPRINTF("%s: msg[%d]: hold=%d slv=0x%x flags=0x%x len=%d\n", __func__, i, sc->hold, msgs[i].slave, msgs[i].flags, msgs[i].len); if (!sc->hold && (msgs[i].flags & IIC_M_NOSTART)) return (IIC_ENOTSUPP); if (msgs[i].flags & IIC_M_RD) { error = cdnc_i2c_xfer_rd(sc, &msgs[i]); if (error != IIC_NOERR) break; } else { error = cdnc_i2c_xfer_wr(sc, &msgs[i]); if (error != IIC_NOERR) break; } } I2C_SC_UNLOCK(sc); return (error); } static void cdnc_i2c_add_sysctls(device_t dev) { struct cdnc_i2c_softc *sc = device_get_softc(dev); struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child; ctx = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "i2c_clk_real_freq", CTLFLAG_RD, &sc->i2c_clk_real_freq, 0, "i2c clock real frequency"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_interrupts", CTLFLAG_RD, &sc->interrupts, 0, "interrupt calls"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_timeouts", CTLFLAG_RD, &sc->timeout_ints, 0, "hardware timeout interrupts"); } static int cdnc_i2c_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Cadence I2C Controller"); return (BUS_PROBE_DEFAULT); } static int cdnc_i2c_detach(device_t); static int cdnc_i2c_attach(device_t dev) { struct cdnc_i2c_softc *sc; int rid, err; phandle_t node; pcell_t cell; uint64_t freq; sc = device_get_softc(dev); sc->dev = dev; sc->hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data; I2C_SC_LOCK_INIT(sc); /* Get ref-clock and i2c-clock properties. */ node = ofw_bus_get_node(dev); if (OF_getprop(node, "ref-clock", &cell, sizeof(cell)) > 0) sc->ref_clock_freq = fdt32_to_cpu(cell); else if (clk_get_by_ofw_index(dev, node, 0, &sc->ref_clk) == 0) { if ((err = clk_enable(sc->ref_clk)) != 0) device_printf(dev, "Cannot enable clock. err=%d\n", err); else if ((err = clk_get_freq(sc->ref_clk, &freq)) != 0) device_printf(dev, "Cannot get clock frequency. err=%d\n", err); else sc->ref_clock_freq = freq; } else { device_printf(dev, "must have ref-clock property\n"); return (ENXIO); } if (OF_getprop(node, "clock-frequency", &cell, sizeof(cell)) > 0) sc->i2c_clock_freq = fdt32_to_cpu(cell); else sc->i2c_clock_freq = CDNC_I2C_DEFAULT_I2C_CLOCK; /* Get memory resource. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "could not allocate memory resources.\n"); cdnc_i2c_detach(dev); return (ENOMEM); } /* Allocate IRQ. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "could not allocate IRQ resource.\n"); cdnc_i2c_detach(dev); return (ENOMEM); } /* Activate the interrupt. */ err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, cdnc_i2c_intr, sc, &sc->intrhandle); if (err) { device_printf(dev, "could not setup IRQ.\n"); cdnc_i2c_detach(dev); return (err); } /* Configure the device. */ err = cdnc_i2c_init_hw(sc); if (err) { cdnc_i2c_detach(dev); return (err); } sc->iicbus = device_add_child(dev, "iicbus", -1); cdnc_i2c_add_sysctls(dev); /* Probe and attach iicbus when interrupts work. */ return (bus_delayed_attach_children(dev)); } static int cdnc_i2c_detach(device_t dev) { struct cdnc_i2c_softc *sc = device_get_softc(dev); if (device_is_attached(dev)) bus_generic_detach(dev); if (sc->ref_clk != NULL) { clk_release(sc->ref_clk); sc->ref_clk = NULL; } /* Delete iic bus. */ if (sc->iicbus) device_delete_child(dev, sc->iicbus); /* Disable hardware. */ if (sc->mem_res != NULL) { sc->cfg_reg_shadow = 0; WR2(sc, CDNC_I2C_CR, CDNC_I2C_CR_CLR_FIFO); /* Clear and disable all interrupts. */ WR2(sc, CDNC_I2C_ISR, CDNC_I2C_ISR_ALL); WR2(sc, CDNC_I2C_IDR, CDNC_I2C_ISR_ALL); } /* Teardown and release interrupt. */ if (sc->irq_res != NULL) { if (sc->intrhandle) bus_teardown_intr(dev, sc->irq_res, sc->intrhandle); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res), sc->irq_res); sc->irq_res = NULL; } /* Release memory resource. */ if (sc->mem_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); sc->mem_res = NULL; } I2C_SC_LOCK_DESTROY(sc); return (0); } static phandle_t cdnc_i2c_get_node(device_t bus, device_t dev) { return (ofw_bus_get_node(bus)); } static device_method_t cdnc_i2c_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cdnc_i2c_probe), DEVMETHOD(device_attach, cdnc_i2c_attach), DEVMETHOD(device_detach, cdnc_i2c_detach), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, cdnc_i2c_get_node), /* iicbus methods */ DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_reset, cdnc_i2c_reset), DEVMETHOD(iicbus_transfer, cdnc_i2c_transfer), DEVMETHOD_END }; static driver_t cdnc_i2c_driver = { "cdnc_i2c", cdnc_i2c_methods, sizeof(struct cdnc_i2c_softc), }; DRIVER_MODULE(cdnc_i2c, simplebus, cdnc_i2c_driver, NULL, NULL); DRIVER_MODULE(ofw_iicbus, cdnc_i2c, ofw_iicbus_driver, NULL, NULL); MODULE_DEPEND(cdnc_i2c, iicbus, 1, 1, 1); MODULE_DEPEND(cdnc_i2c, ofw_iicbus, 1, 1, 1); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/dev/iicbus/controller/opencores/iicoc_fdt.c b/sys/dev/iicbus/controller/opencores/iicoc_fdt.c index 40994c22628b..649027038659 100644 --- a/sys/dev/iicbus/controller/opencores/iicoc_fdt.c +++ b/sys/dev/iicbus/controller/opencores/iicoc_fdt.c @@ -1,189 +1,189 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Axiado Corporation. * All rights reserved. * * This software was developed in part by Philip Paeps under contract for * Axiado Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include "iicbus_if.h" #include "iicoc.h" static struct ofw_compat_data compat_data[] = { { "opencores,i2c-ocores", 1 }, { "sifive,fu740-c000-i2c", 1 }, { "sifive,fu540-c000-i2c", 1 }, { "sifive,i2c0", 1 }, { NULL, 0 } }; static struct resource_spec iicoc_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, RESOURCE_SPEC_END }; static phandle_t iicoc_get_node(device_t bus, device_t dev) { /* Share controller node with iicbus device. */ return (ofw_bus_get_node(bus)); } static int iicoc_attach(device_t dev) { struct iicoc_softc *sc; phandle_t node; clk_t clock; uint64_t clockfreq; int error; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->sc_mtx, "iicoc", "iicoc", MTX_DEF); error = bus_alloc_resources(dev, iicoc_spec, &sc->mem_res); if (error) { device_printf(dev, "Could not allocate bus resource.\n"); goto fail; } node = ofw_bus_get_node(dev); sc->reg_shift = 0; OF_getencprop(node, "reg-shift", &sc->reg_shift, sizeof(sc->reg_shift)); error = clk_get_by_ofw_index(dev, 0, 0, &clock); if (error) { device_printf(dev, "Couldn't get clock\n"); goto fail; } error = clk_enable(clock); if (error) { device_printf(dev, "Couldn't enable clock\n"); goto fail1; } error = clk_get_freq(clock, &clockfreq); if (error) { device_printf(dev, "Couldn't get clock frequency\n"); goto fail1; } if (clockfreq > UINT_MAX) { device_printf(dev, "Unsupported clock frequency\n"); goto fail1; } sc->clockfreq = (u_int)clockfreq; sc->i2cfreq = XLP_I2C_FREQ; iicoc_init(dev); sc->iicbus = device_add_child(dev, "iicbus", -1); if (sc->iicbus == NULL) { device_printf(dev, "Could not allocate iicbus instance.\n"); error = ENXIO; goto fail1; } /* Probe and attach the iicbus when interrupts are available. */ config_intrhook_oneshot((ich_func_t)bus_generic_attach, dev); return (0); fail1: clk_disable(clock); fail: bus_release_resources(dev, iicoc_spec, &sc->mem_res); mtx_destroy(&sc->sc_mtx); return (error); } static int iicoc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "OpenCores I2C master controller"); return (BUS_PROBE_DEFAULT); } static device_method_t iicoc_methods[] = { /* device interface */ DEVMETHOD(device_probe, iicoc_probe), DEVMETHOD(device_attach, iicoc_attach), /* ofw interface */ DEVMETHOD(ofw_bus_get_node, iicoc_get_node), /* iicbus interface */ DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_repeated_start, iicoc_iicbus_repeated_start), DEVMETHOD(iicbus_start, iicoc_iicbus_start), DEVMETHOD(iicbus_stop, iicoc_iicbus_stop), DEVMETHOD(iicbus_reset, iicoc_iicbus_reset), DEVMETHOD(iicbus_write, iicoc_iicbus_write), DEVMETHOD(iicbus_read, iicoc_iicbus_read), DEVMETHOD(iicbus_transfer, iicbus_transfer_gen), DEVMETHOD_END }; static driver_t iicoc_driver = { "iicoc", iicoc_methods, sizeof(struct iicoc_softc), }; SIMPLEBUS_PNP_INFO(compat_data); DRIVER_MODULE(iicoc, simplebus, iicoc_driver, 0, 0); DRIVER_MODULE(ofw_iicbus, iicoc, ofw_iicbus_driver, 0, 0); MODULE_DEPEND(iicoc, iicbus, 1, 1, 1); MODULE_DEPEND(iicoc, ofw_iicbus, 1, 1, 1); diff --git a/sys/dev/iicbus/controller/rockchip/rk_i2c.c b/sys/dev/iicbus/controller/rockchip/rk_i2c.c index 0961dbc96a92..4a431649de49 100644 --- a/sys/dev/iicbus/controller/rockchip/rk_i2c.c +++ b/sys/dev/iicbus/controller/rockchip/rk_i2c.c @@ -1,731 +1,731 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include "iicbus_if.h" #define RK_I2C_CON 0x00 #define RK_I2C_CON_EN (1 << 0) #define RK_I2C_CON_MODE_SHIFT 1 #define RK_I2C_CON_MODE_TX 0 #define RK_I2C_CON_MODE_RRX 1 #define RK_I2C_CON_MODE_RX 2 #define RK_I2C_CON_MODE_RTX 3 #define RK_I2C_CON_MODE_MASK 0x6 #define RK_I2C_CON_START (1 << 3) #define RK_I2C_CON_STOP (1 << 4) #define RK_I2C_CON_LASTACK (1 << 5) #define RK_I2C_CON_NAKSTOP (1 << 6) #define RK_I2C_CON_CTRL_MASK 0xFF #define RK_I2C_CLKDIV 0x04 #define RK_I2C_CLKDIVL_MASK 0xFFFF #define RK_I2C_CLKDIVL_SHIFT 0 #define RK_I2C_CLKDIVH_MASK 0xFFFF0000 #define RK_I2C_CLKDIVH_SHIFT 16 #define RK_I2C_CLKDIV_MUL 8 #define RK_I2C_MRXADDR 0x08 #define RK_I2C_MRXADDR_SADDR_MASK 0xFFFFFF #define RK_I2C_MRXADDR_VALID(x) (1 << (24 + x)) #define RK_I2C_MRXRADDR 0x0C #define RK_I2C_MRXRADDR_SRADDR_MASK 0xFFFFFF #define RK_I2C_MRXRADDR_VALID(x) (1 << (24 + x)) #define RK_I2C_MTXCNT 0x10 #define RK_I2C_MTXCNT_MASK 0x3F #define RK_I2C_MRXCNT 0x14 #define RK_I2C_MRXCNT_MASK 0x3F #define RK_I2C_IEN 0x18 #define RK_I2C_IEN_BTFIEN (1 << 0) #define RK_I2C_IEN_BRFIEN (1 << 1) #define RK_I2C_IEN_MBTFIEN (1 << 2) #define RK_I2C_IEN_MBRFIEN (1 << 3) #define RK_I2C_IEN_STARTIEN (1 << 4) #define RK_I2C_IEN_STOPIEN (1 << 5) #define RK_I2C_IEN_NAKRCVIEN (1 << 6) #define RK_I2C_IEN_ALL (RK_I2C_IEN_MBTFIEN | RK_I2C_IEN_MBRFIEN | \ RK_I2C_IEN_STARTIEN | RK_I2C_IEN_STOPIEN | RK_I2C_IEN_NAKRCVIEN) #define RK_I2C_IPD 0x1C #define RK_I2C_IPD_BTFIPD (1 << 0) #define RK_I2C_IPD_BRFIPD (1 << 1) #define RK_I2C_IPD_MBTFIPD (1 << 2) #define RK_I2C_IPD_MBRFIPD (1 << 3) #define RK_I2C_IPD_STARTIPD (1 << 4) #define RK_I2C_IPD_STOPIPD (1 << 5) #define RK_I2C_IPD_NAKRCVIPD (1 << 6) #define RK_I2C_IPD_ALL (RK_I2C_IPD_MBTFIPD | RK_I2C_IPD_MBRFIPD | \ RK_I2C_IPD_STARTIPD | RK_I2C_IPD_STOPIPD | RK_I2C_IPD_NAKRCVIPD) #define RK_I2C_FNCT 0x20 #define RK_I2C_FNCT_MASK 0x3F #define RK_I2C_TXDATA_BASE 0x100 #define RK_I2C_RXDATA_BASE 0x200 /* 8 data registers, 4 bytes each. */ #define RK_I2C_MAX_RXTX_LEN 32 enum rk_i2c_state { STATE_IDLE = 0, STATE_START, STATE_READ, STATE_WRITE, STATE_STOP }; struct rk_i2c_softc { device_t dev; struct resource *res[2]; struct mtx mtx; clk_t sclk; clk_t pclk; int busy; void * intrhand; uint32_t intr; uint32_t ipd; struct iic_msg *msg; size_t cnt; bool transfer_done; bool nak_recv; bool tx_slave_addr; uint8_t mode; uint8_t state; device_t iicbus; }; static struct ofw_compat_data compat_data[] = { {"rockchip,rk3288-i2c", 1}, {"rockchip,rk3328-i2c", 1}, {"rockchip,rk3399-i2c", 1}, {NULL, 0} }; static struct resource_spec rk_i2c_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static int rk_i2c_probe(device_t dev); static int rk_i2c_attach(device_t dev); static int rk_i2c_detach(device_t dev); #define RK_I2C_LOCK(sc) mtx_lock(&(sc)->mtx) #define RK_I2C_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define RK_I2C_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) #define RK_I2C_READ(sc, reg) bus_read_4((sc)->res[0], (reg)) #define RK_I2C_WRITE(sc, reg, val) bus_write_4((sc)->res[0], (reg), (val)) static uint32_t rk_i2c_get_clkdiv(struct rk_i2c_softc *sc, uint32_t speed) { uint64_t sclk_freq; uint32_t clkdiv; int err; err = clk_get_freq(sc->sclk, &sclk_freq); if (err != 0) return (err); clkdiv = (sclk_freq / speed / RK_I2C_CLKDIV_MUL / 2) - 1; clkdiv &= RK_I2C_CLKDIVL_MASK; clkdiv = clkdiv << RK_I2C_CLKDIVH_SHIFT | clkdiv; return (clkdiv); } static int rk_i2c_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr) { struct rk_i2c_softc *sc; uint32_t clkdiv; u_int busfreq; sc = device_get_softc(dev); busfreq = IICBUS_GET_FREQUENCY(sc->iicbus, speed); clkdiv = rk_i2c_get_clkdiv(sc, busfreq); RK_I2C_LOCK(sc); /* Set the clock divider */ RK_I2C_WRITE(sc, RK_I2C_CLKDIV, clkdiv); /* Disable the module */ RK_I2C_WRITE(sc, RK_I2C_CON, 0); RK_I2C_UNLOCK(sc); return (0); } static uint8_t rk_i2c_fill_tx(struct rk_i2c_softc *sc) { uint32_t buf32; uint8_t buf; int i, j, len; len = sc->msg->len - sc->cnt; if (sc->tx_slave_addr) { KASSERT(sc->cnt == 0, ("tx_slave_addr in the middle of data")); len++; } if (len > RK_I2C_MAX_RXTX_LEN) len = RK_I2C_MAX_RXTX_LEN; for (i = 0; i < len; ) { buf32 = 0; /* Process next 4 bytes or whatever remains. */ for (j = 0; j < MIN(len - i, 4); j++) { /* Fill the addr if needed */ if (sc->tx_slave_addr) { buf = sc->msg->slave; sc->tx_slave_addr = false; } else { KASSERT(sc->cnt < sc->msg->len, ("%s: data buffer overrun", __func__)); buf = sc->msg->buf[sc->cnt]; sc->cnt++; } buf32 |= (uint32_t)buf << (j * 8); } KASSERT(i % 4 == 0, ("%s: misaligned write offset", __func__)); RK_I2C_WRITE(sc, RK_I2C_TXDATA_BASE + i, buf32); i += j; } return (len); } static void rk_i2c_drain_rx(struct rk_i2c_softc *sc) { uint32_t buf32 = 0; uint8_t buf8; int len; int i; if (sc->msg == NULL) { device_printf(sc->dev, "No current iic msg\n"); return; } len = sc->msg->len - sc->cnt; if (len > RK_I2C_MAX_RXTX_LEN) len = RK_I2C_MAX_RXTX_LEN; for (i = 0; i < len; i++) { if (i % 4 == 0) buf32 = RK_I2C_READ(sc, RK_I2C_RXDATA_BASE + i); buf8 = (buf32 >> ((i % 4) * 8)) & 0xFF; sc->msg->buf[sc->cnt++] = buf8; } } static void rk_i2c_send_stop(struct rk_i2c_softc *sc) { uint32_t reg; RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_STOPIEN); sc->state = STATE_STOP; reg = RK_I2C_READ(sc, RK_I2C_CON); reg |= RK_I2C_CON_STOP; RK_I2C_WRITE(sc, RK_I2C_CON, reg); } static void rk_i2c_intr_locked(struct rk_i2c_softc *sc) { uint32_t reg; int transfer_len; sc->ipd = RK_I2C_READ(sc, RK_I2C_IPD); /* Something to handle? */ if ((sc->ipd & RK_I2C_IPD_ALL) == 0) return; RK_I2C_WRITE(sc, RK_I2C_IPD, sc->ipd); sc->ipd &= RK_I2C_IPD_ALL; if (sc->ipd & RK_I2C_IPD_NAKRCVIPD) { /* NACK received */ sc->ipd &= ~RK_I2C_IPD_NAKRCVIPD; sc->nak_recv = true; /* XXXX last byte !!!, signal error !!! */ sc->transfer_done = true; sc->state = STATE_IDLE; goto err; } switch (sc->state) { case STATE_START: /* Disable start bit */ reg = RK_I2C_READ(sc, RK_I2C_CON); reg &= ~RK_I2C_CON_START; RK_I2C_WRITE(sc, RK_I2C_CON, reg); if (sc->mode == RK_I2C_CON_MODE_RRX || sc->mode == RK_I2C_CON_MODE_RX) { sc->state = STATE_READ; RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_MBRFIEN | RK_I2C_IEN_NAKRCVIEN); if ((sc->msg->len - sc->cnt) > 32) transfer_len = 32; else { transfer_len = sc->msg->len - sc->cnt; reg = RK_I2C_READ(sc, RK_I2C_CON); reg |= RK_I2C_CON_LASTACK; RK_I2C_WRITE(sc, RK_I2C_CON, reg); } RK_I2C_WRITE(sc, RK_I2C_MRXCNT, transfer_len); } else { sc->state = STATE_WRITE; RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_MBTFIEN | RK_I2C_IEN_NAKRCVIEN); transfer_len = rk_i2c_fill_tx(sc); RK_I2C_WRITE(sc, RK_I2C_MTXCNT, transfer_len); } break; case STATE_READ: rk_i2c_drain_rx(sc); if (sc->cnt == sc->msg->len) rk_i2c_send_stop(sc); else { sc->mode = RK_I2C_CON_MODE_RX; reg = RK_I2C_READ(sc, RK_I2C_CON) & \ ~RK_I2C_CON_CTRL_MASK; reg |= sc->mode << RK_I2C_CON_MODE_SHIFT; reg |= RK_I2C_CON_EN; if ((sc->msg->len - sc->cnt) > 32) transfer_len = 32; else { transfer_len = sc->msg->len - sc->cnt; reg |= RK_I2C_CON_LASTACK; } RK_I2C_WRITE(sc, RK_I2C_CON, reg); RK_I2C_WRITE(sc, RK_I2C_MRXCNT, transfer_len); } break; case STATE_WRITE: if (sc->cnt < sc->msg->len) { /* Keep writing. */ RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_MBTFIEN | RK_I2C_IEN_NAKRCVIEN); transfer_len = rk_i2c_fill_tx(sc); RK_I2C_WRITE(sc, RK_I2C_MTXCNT, transfer_len); break; } else if (!(sc->msg->flags & IIC_M_NOSTOP)) { rk_i2c_send_stop(sc); break; } /* passthru */ case STATE_STOP: /* Disable stop bit */ reg = RK_I2C_READ(sc, RK_I2C_CON); reg &= ~RK_I2C_CON_STOP; RK_I2C_WRITE(sc, RK_I2C_CON, reg); sc->transfer_done = 1; sc->state = STATE_IDLE; break; case STATE_IDLE: break; } err: wakeup(sc); } static void rk_i2c_intr(void *arg) { struct rk_i2c_softc *sc; sc = (struct rk_i2c_softc *)arg; RK_I2C_LOCK(sc); rk_i2c_intr_locked(sc); RK_I2C_UNLOCK(sc); } static void rk_i2c_start_xfer(struct rk_i2c_softc *sc, struct iic_msg *msg, boolean_t last) { uint32_t reg; uint8_t len; sc->transfer_done = false; sc->nak_recv = false; sc->tx_slave_addr = false; sc->cnt = 0; sc->state = STATE_IDLE; sc->msg = msg; reg = RK_I2C_READ(sc, RK_I2C_CON) & ~RK_I2C_CON_CTRL_MASK; if (!(sc->msg->flags & IIC_M_NOSTART)) { /* Stadard message */ if (sc->mode == RK_I2C_CON_MODE_TX) { sc->tx_slave_addr = true; } sc->state = STATE_START; reg |= RK_I2C_CON_START; RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_STARTIEN); } else { /* Continuation message */ if (sc->mode == RK_I2C_CON_MODE_RX) { sc->state = STATE_READ; if (last) reg |= RK_I2C_CON_LASTACK; RK_I2C_WRITE(sc, RK_I2C_MRXCNT, sc->msg->len); RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_MBRFIEN | RK_I2C_IEN_NAKRCVIEN); } else { sc->state = STATE_WRITE; len = rk_i2c_fill_tx(sc); RK_I2C_WRITE(sc, RK_I2C_MTXCNT, len); RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_MBTFIEN | RK_I2C_IEN_NAKRCVIEN); } } reg |= RK_I2C_CON_NAKSTOP; reg |= sc->mode << RK_I2C_CON_MODE_SHIFT; reg |= RK_I2C_CON_EN; RK_I2C_WRITE(sc, RK_I2C_CON, reg); } static int rk_i2c_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs) { struct rk_i2c_softc *sc; uint32_t reg; bool last_msg; int i, j, timeout, err; sc = device_get_softc(dev); RK_I2C_LOCK(sc); while (sc->busy) mtx_sleep(sc, &sc->mtx, 0, "i2cbuswait", 0); sc->busy = 1; /* Disable the module and interrupts */ RK_I2C_WRITE(sc, RK_I2C_CON, 0); RK_I2C_WRITE(sc, RK_I2C_IEN, 0); /* Clean stale interrupts */ RK_I2C_WRITE(sc, RK_I2C_IPD, RK_I2C_IPD_ALL); err = 0; for (i = 0; i < nmsgs; i++) { /* Validate parameters. */ if (msgs == NULL || msgs[i].buf == NULL || msgs[i].len == 0) { err = IIC_ENOTSUPP; break; } /* * If next message have NOSTART flag, then they both * should be same type (read/write) and same address. */ if (i < nmsgs - 1) { if ((msgs[i + 1].flags & IIC_M_NOSTART) && ((msgs[i].flags & IIC_M_RD) != (msgs[i + 1].flags & IIC_M_RD) || (msgs[i].slave != msgs[i + 1].slave))) { err = IIC_ENOTSUPP; break; } } /* * Detect simple register read case. * The first message should be IIC_M_WR | IIC_M_NOSTOP, * next pure IIC_M_RD (no other flags allowed). Both * messages should have same slave address. */ if (nmsgs - i >= 2 && msgs[i].len < 4 && msgs[i].flags == (IIC_M_WR | IIC_M_NOSTOP) && msgs[i + 1].flags == IIC_M_RD && (msgs[i].slave & ~LSB) == (msgs[i + 1].slave & ~LSB)) { sc->mode = RK_I2C_CON_MODE_RRX; /* Write slave address */ reg = msgs[i].slave & ~LSB; reg |= RK_I2C_MRXADDR_VALID(0); RK_I2C_WRITE(sc, RK_I2C_MRXADDR, reg); /* Write slave register address */ reg = 0; for (j = 0; j < msgs[i].len ; j++) { reg |= (uint32_t)msgs[i].buf[j] << (j * 8); reg |= RK_I2C_MRXADDR_VALID(j); } RK_I2C_WRITE(sc, RK_I2C_MRXRADDR, reg); i++; } else { if (msgs[i].flags & IIC_M_RD) { if (msgs[i].flags & IIC_M_NOSTART) { sc->mode = RK_I2C_CON_MODE_RX; } else { sc->mode = RK_I2C_CON_MODE_RRX; reg = msgs[i].slave & ~LSB; reg |= RK_I2C_MRXADDR_VALID(0); RK_I2C_WRITE(sc, RK_I2C_MRXADDR, reg); RK_I2C_WRITE(sc, RK_I2C_MRXRADDR, 0); } } else { sc->mode = RK_I2C_CON_MODE_TX; } } /* last message ? */ last_msg = (i >= nmsgs - 1) || !(msgs[i + 1].flags & IIC_M_NOSTART); rk_i2c_start_xfer(sc, msgs + i, last_msg); if (cold) { for(timeout = 10000; timeout > 0; timeout--) { rk_i2c_intr_locked(sc); if (sc->transfer_done) break; DELAY(1000); } if (timeout <= 0) err = IIC_ETIMEOUT; } else { while (err == 0 && !sc->transfer_done) { err = msleep(sc, &sc->mtx, PZERO, "rk_i2c", 10 * hz); } } } /* Disable the module and interrupts */ RK_I2C_WRITE(sc, RK_I2C_CON, 0); RK_I2C_WRITE(sc, RK_I2C_IEN, 0); sc->busy = 0; if (sc->nak_recv) err = IIC_ENOACK; RK_I2C_UNLOCK(sc); return (err); } static int rk_i2c_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "RockChip I2C"); return (BUS_PROBE_DEFAULT); } static int rk_i2c_attach(device_t dev) { struct rk_i2c_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->mtx, device_get_nameunit(dev), "rk_i2c", MTX_DEF); if (bus_alloc_resources(dev, rk_i2c_spec, sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); error = ENXIO; goto fail; } if (bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE, NULL, rk_i2c_intr, sc, &sc->intrhand)) { bus_release_resources(dev, rk_i2c_spec, sc->res); device_printf(dev, "cannot setup interrupt handler\n"); return (ENXIO); } clk_set_assigned(dev, ofw_bus_get_node(dev)); /* Activate the module clocks. */ error = clk_get_by_ofw_name(dev, 0, "i2c", &sc->sclk); if (error != 0) { device_printf(dev, "cannot get i2c clock\n"); goto fail; } error = clk_enable(sc->sclk); if (error != 0) { device_printf(dev, "cannot enable i2c clock\n"); goto fail; } /* pclk clock is optional. */ error = clk_get_by_ofw_name(dev, 0, "pclk", &sc->pclk); if (error != 0 && error != ENOENT) { device_printf(dev, "cannot get pclk clock\n"); goto fail; } if (sc->pclk != NULL) { error = clk_enable(sc->pclk); if (error != 0) { device_printf(dev, "cannot enable pclk clock\n"); goto fail; } } sc->iicbus = device_add_child(dev, "iicbus", -1); if (sc->iicbus == NULL) { device_printf(dev, "cannot add iicbus child device\n"); error = ENXIO; goto fail; } bus_generic_attach(dev); return (0); fail: if (rk_i2c_detach(dev) != 0) device_printf(dev, "Failed to detach\n"); return (error); } static int rk_i2c_detach(device_t dev) { struct rk_i2c_softc *sc; int error; sc = device_get_softc(dev); if ((error = bus_generic_detach(dev)) != 0) return (error); if (sc->iicbus != NULL) if ((error = device_delete_child(dev, sc->iicbus)) != 0) return (error); if (sc->sclk != NULL) clk_release(sc->sclk); if (sc->pclk != NULL) clk_release(sc->pclk); if (sc->intrhand != NULL) bus_teardown_intr(sc->dev, sc->res[1], sc->intrhand); bus_release_resources(dev, rk_i2c_spec, sc->res); mtx_destroy(&sc->mtx); return (0); } static phandle_t rk_i2c_get_node(device_t bus, device_t dev) { return ofw_bus_get_node(bus); } static device_method_t rk_i2c_methods[] = { DEVMETHOD(device_probe, rk_i2c_probe), DEVMETHOD(device_attach, rk_i2c_attach), DEVMETHOD(device_detach, rk_i2c_detach), /* OFW methods */ DEVMETHOD(ofw_bus_get_node, rk_i2c_get_node), DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_reset, rk_i2c_reset), DEVMETHOD(iicbus_transfer, rk_i2c_transfer), DEVMETHOD_END }; static driver_t rk_i2c_driver = { "rk_i2c", rk_i2c_methods, sizeof(struct rk_i2c_softc), }; EARLY_DRIVER_MODULE(rk_i2c, simplebus, rk_i2c_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE); EARLY_DRIVER_MODULE(ofw_iicbus, rk_i2c, ofw_iicbus_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE); MODULE_DEPEND(rk_i2c, iicbus, 1, 1, 1); MODULE_VERSION(rk_i2c, 1); diff --git a/sys/dev/iicbus/controller/twsi/a10_twsi.c b/sys/dev/iicbus/controller/twsi/a10_twsi.c index e66f941d3814..40d75f64e3e1 100644 --- a/sys/dev/iicbus/controller/twsi/a10_twsi.c +++ b/sys/dev/iicbus/controller/twsi/a10_twsi.c @@ -1,152 +1,152 @@ /*- * Copyright (c) 2016-2019 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include "iicbus_if.h" #define TWI_ADDR 0x0 #define TWI_XADDR 0x4 #define TWI_DATA 0x8 #define TWI_CNTR 0xC #define TWI_STAT 0x10 #define TWI_CCR 0x14 #define TWI_SRST 0x18 #define TWI_EFR 0x1C #define TWI_LCR 0x20 static struct ofw_compat_data compat_data[] = { {"allwinner,sun4i-a10-i2c", 1}, {"allwinner,sun6i-a31-i2c", 1}, {"allwinner,sun8i-a83t-i2c", 1}, {NULL, 0}, }; static int a10_twsi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner Integrated I2C Bus Controller"); return (BUS_PROBE_DEFAULT); } static int a10_twsi_attach(device_t dev) { struct twsi_softc *sc; hwreset_t rst; int error; sc = device_get_softc(dev); /* De-assert reset */ if (hwreset_get_by_ofw_idx(dev, 0, 0, &rst) == 0) { error = hwreset_deassert(rst); if (error != 0) { device_printf(dev, "could not de-assert reset\n"); return (error); } } /* Activate clock */ error = clk_get_by_ofw_index(dev, 0, 0, &sc->clk_core); if (error != 0) { device_printf(dev, "could not find clock\n"); return (error); } error = clk_enable(sc->clk_core); if (error != 0) { device_printf(dev, "could not enable clock\n"); return (error); } sc->reg_data = TWI_DATA; sc->reg_slave_addr = TWI_ADDR; sc->reg_slave_ext_addr = TWI_XADDR; sc->reg_control = TWI_CNTR; sc->reg_status = TWI_STAT; sc->reg_baud_rate = TWI_CCR; sc->reg_soft_reset = TWI_SRST; if (ofw_bus_is_compatible(dev, "allwinner,sun6i-a31-i2c") || ofw_bus_is_compatible(dev, "allwinner,sun6i-a83t-i2c")) sc->iflag_w1c = true; return (twsi_attach(dev)); } static phandle_t a10_twsi_get_node(device_t bus, device_t dev) { return (ofw_bus_get_node(bus)); } static device_method_t a10_twsi_methods[] = { /* device interface */ DEVMETHOD(device_probe, a10_twsi_probe), DEVMETHOD(device_attach, a10_twsi_attach), /* OFW methods */ DEVMETHOD(ofw_bus_get_node, a10_twsi_get_node), { 0, 0 } }; DEFINE_CLASS_1(iichb, a10_twsi_driver, a10_twsi_methods, sizeof(struct twsi_softc), twsi_driver); EARLY_DRIVER_MODULE(a10_twsi, simplebus, a10_twsi_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE); EARLY_DRIVER_MODULE(iicbus, a10_twsi, iicbus_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE); MODULE_DEPEND(a10_twsi, iicbus, 1, 1, 1); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/dev/iicbus/controller/twsi/mv_twsi.c b/sys/dev/iicbus/controller/twsi/mv_twsi.c index 23f3ee8debd2..b3032533da47 100644 --- a/sys/dev/iicbus/controller/twsi/mv_twsi.c +++ b/sys/dev/iicbus/controller/twsi/mv_twsi.c @@ -1,227 +1,227 @@ /*- * Copyright (C) 2008 MARVELL INTERNATIONAL LTD. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Driver for the TWSI (aka I2C, aka IIC) bus controller found on Marvell * and Allwinner SoCs. Supports master operation only, and works in polling mode. * * Calls to DELAY() are needed per Application Note AN-179 "TWSI Software * Guidelines for Discovery(TM), Horizon (TM) and Feroceon(TM) Devices". */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "iicbus_if.h" #define MV_TWSI_NAME "twsi" #define IICBUS_DEVNAME "iicbus" #define TWSI_ADDR 0x00 #define TWSI_DATA 0x04 #define TWSI_CNTR 0x08 #define TWSI_XADDR 0x10 #define TWSI_STAT 0x0c #define TWSI_BAUD_RATE 0x0c #define TWSI_SRST 0x1c #define TWSI_BAUD_RATE_RAW(C,M,N) ((C)/((10*(M+1))<<(N+1))) #define TWSI_BAUD_RATE_SLOW 50000 /* 50kHz */ #define TWSI_BAUD_RATE_FAST 100000 /* 100kHz */ #define TWSI_DEBUG #undef TWSI_DEBUG #ifdef TWSI_DEBUG #define debugf(fmt, args...) do { printf("%s(): ", __func__); printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif static phandle_t mv_twsi_get_node(device_t, device_t); static int mv_twsi_probe(device_t); static int mv_twsi_attach(device_t); static struct ofw_compat_data compat_data[] = { { "mrvl,twsi", true }, { "marvell,mv64xxx-i2c", true }, { "marvell,mv78230-i2c", true }, { NULL, false } }; static device_method_t mv_twsi_methods[] = { /* device interface */ DEVMETHOD(device_probe, mv_twsi_probe), DEVMETHOD(device_attach, mv_twsi_attach), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, mv_twsi_get_node), DEVMETHOD_END }; DEFINE_CLASS_1(twsi, mv_twsi_driver, mv_twsi_methods, sizeof(struct twsi_softc), twsi_driver); DRIVER_MODULE(twsi, simplebus, mv_twsi_driver, 0, 0); DRIVER_MODULE(iicbus, twsi, iicbus_driver, 0, 0); MODULE_DEPEND(twsi, iicbus, 1, 1, 1); SIMPLEBUS_PNP_INFO(compat_data); static phandle_t mv_twsi_get_node(device_t bus, device_t dev) { /* Used by ofw_iicbus. */ return (ofw_bus_get_node(bus)); } static int mv_twsi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Marvell Integrated I2C Bus Controller"); return (BUS_PROBE_DEFAULT); } #define ABSSUB(a,b) (((a) > (b)) ? (a) - (b) : (b) - (a)) static void mv_twsi_cal_baud_rate(struct twsi_softc *sc, const uint32_t target, struct twsi_baud_rate *rate) { uint64_t clk; uint32_t cur, diff, diff0; int m, n, m0, n0; /* Calculate baud rate. */ m0 = n0 = 4; /* Default values on reset */ diff0 = 0xffffffff; clk_get_freq(sc->clk_core, &clk); for (n = 0; n < 8; n++) { for (m = 0; m < 16; m++) { cur = TWSI_BAUD_RATE_RAW(clk,m,n); diff = ABSSUB(target, cur); if (diff < diff0) { m0 = m; n0 = n; diff0 = diff; } } } rate->raw = TWSI_BAUD_RATE_RAW(clk, m0, n0); rate->param = TWSI_BAUD_RATE_PARAM(m0, n0); rate->m = m0; rate->n = n0; } static int mv_twsi_attach(device_t dev) { struct twsi_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; /* Activate clock */ error = clk_get_by_ofw_index(dev, 0, 0, &sc->clk_core); if (error != 0) { device_printf(dev, "could not find core clock\n"); return (error); } error = clk_enable(sc->clk_core); if (error != 0) { device_printf(dev, "could not enable core clock\n"); return (error); } if (clk_get_by_ofw_index(dev, 0, 1, &sc->clk_reg) == 0) { error = clk_enable(sc->clk_reg); if (error != 0) { device_printf(dev, "could not enable core clock\n"); return (error); } } mv_twsi_cal_baud_rate(sc, TWSI_BAUD_RATE_SLOW, &sc->baud_rate[IIC_SLOW]); mv_twsi_cal_baud_rate(sc, TWSI_BAUD_RATE_FAST, &sc->baud_rate[IIC_FAST]); if (bootverbose) device_printf(dev, "calculated baud rates are:\n" " %" PRIu32 " kHz (M=%d, N=%d) for slow,\n" " %" PRIu32 " kHz (M=%d, N=%d) for fast.\n", sc->baud_rate[IIC_SLOW].raw / 1000, sc->baud_rate[IIC_SLOW].m, sc->baud_rate[IIC_SLOW].n, sc->baud_rate[IIC_FAST].raw / 1000, sc->baud_rate[IIC_FAST].m, sc->baud_rate[IIC_FAST].n); sc->reg_data = TWSI_DATA; sc->reg_slave_addr = TWSI_ADDR; sc->reg_slave_ext_addr = TWSI_XADDR; sc->reg_control = TWSI_CNTR; sc->reg_status = TWSI_STAT; sc->reg_baud_rate = TWSI_BAUD_RATE; sc->reg_soft_reset = TWSI_SRST; return (twsi_attach(dev)); } diff --git a/sys/dev/iicbus/controller/twsi/twsi.h b/sys/dev/iicbus/controller/twsi/twsi.h index 132a75fd0ab8..54aec51bbb76 100644 --- a/sys/dev/iicbus/controller/twsi/twsi.h +++ b/sys/dev/iicbus/controller/twsi/twsi.h @@ -1,82 +1,82 @@ /*- * Copyright (C) 2008 MARVELL INTERNATIONAL LTD. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _TWSI_H_ #define _TWSI_H_ -#include +#include struct twsi_baud_rate { uint32_t raw; int param; int m; int n; }; struct twsi_softc { device_t dev; struct resource *res[2]; struct mtx mutex; device_t iicbus; clk_t clk_core; clk_t clk_reg; void * intrhand; bool have_intr; struct iic_msg *msgs; uint32_t nmsgs; uint32_t msg_idx; uint16_t sent_bytes; uint16_t recv_bytes; int transfer; int error; int debug; uint32_t control_val; bool iflag_w1c; bus_size_t reg_data; bus_size_t reg_slave_addr; bus_size_t reg_slave_ext_addr; bus_size_t reg_control; bus_size_t reg_status; bus_size_t reg_baud_rate; bus_size_t reg_soft_reset; struct twsi_baud_rate baud_rate[IIC_FASTEST + 1]; }; DECLARE_CLASS(twsi_driver); #define TWSI_BAUD_RATE_PARAM(M,N) ((((M) << 3) | ((N) & 0x7)) & 0x7f) int twsi_attach(device_t); int twsi_detach(device_t); #endif /* _TWSI_H_ */ diff --git a/sys/dev/iicbus/pmic/rockchip/rk8xx_clocks.c b/sys/dev/iicbus/pmic/rockchip/rk8xx_clocks.c index c450154001f6..c65a41294c96 100644 --- a/sys/dev/iicbus/pmic/rockchip/rk8xx_clocks.c +++ b/sys/dev/iicbus/pmic/rockchip/rk8xx_clocks.c @@ -1,147 +1,147 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018-2021 Emmanuel Vadot * Copyright (c) 2021 Bjoern A. Zeeb * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include -#include +#include #include /* Clock class and method */ struct rk8xx_clk_sc { device_t base_dev; }; #define CLK32OUT_REG 0x20 #define CLK32OUT_CLKOUT2_EN 1 static int rk8xx_clk_set_gate_1(struct clknode *clk, bool enable) { struct rk8xx_clk_sc *sc; uint8_t val; sc = clknode_get_softc(clk); rk8xx_read(sc->base_dev, CLK32OUT_REG, &val, sizeof(val)); if (enable) val |= CLK32OUT_CLKOUT2_EN; else val &= ~CLK32OUT_CLKOUT2_EN; rk8xx_write(sc->base_dev, CLK32OUT_REG, &val, 1); return (0); } static int rk8xx_clk_recalc(struct clknode *clk, uint64_t *freq) { *freq = 32768; return (0); } static clknode_method_t rk8xx_clk_clknode_methods_0[] = { CLKNODEMETHOD(clknode_recalc_freq, rk8xx_clk_recalc), CLKNODEMETHOD_END }; DEFINE_CLASS_1(rk8xx_clk_clknode_0, rk8xx_clk_clknode_class_0, rk8xx_clk_clknode_methods_0, sizeof(struct rk8xx_clk_sc), clknode_class); static clknode_method_t rk8xx_clk_clknode_methods_1[] = { CLKNODEMETHOD(clknode_set_gate, rk8xx_clk_set_gate_1), CLKNODEMETHOD_END }; DEFINE_CLASS_1(rk8xx_clk_clknode_1, rk8xx_clk_clknode_class_1, rk8xx_clk_clknode_methods_1, sizeof(struct rk8xx_clk_sc), rk8xx_clk_clknode_class_0); int rk8xx_attach_clocks(struct rk8xx_softc *sc) { struct clkdom *clkdom; struct clknode_init_def clkidef; struct clknode *clk; struct rk8xx_clk_sc *clksc; const char **clknames; phandle_t node; int nclks, rv; node = ofw_bus_get_node(sc->dev); /* clock-output-names are optional. Could use them for clkidef.name. */ nclks = ofw_bus_string_list_to_array(node, "clock-output-names", &clknames); clkdom = clkdom_create(sc->dev); memset(&clkidef, 0, sizeof(clkidef)); clkidef.id = 0; clkidef.name = (nclks == 2) ? clknames[0] : "clk32kout1"; clk = clknode_create(clkdom, &rk8xx_clk_clknode_class_0, &clkidef); if (clk == NULL) { device_printf(sc->dev, "Cannot create '%s'.\n", clkidef.name); return (ENXIO); } clksc = clknode_get_softc(clk); clksc->base_dev = sc->dev; clknode_register(clkdom, clk); memset(&clkidef, 0, sizeof(clkidef)); clkidef.id = 1; clkidef.name = (nclks == 2) ? clknames[1] : "clk32kout2"; clk = clknode_create(clkdom, &rk8xx_clk_clknode_class_1, &clkidef); if (clk == NULL) { device_printf(sc->dev, "Cannot create '%s'.\n", clkidef.name); return (ENXIO); } clksc = clknode_get_softc(clk); clksc->base_dev = sc->dev; clknode_register(clkdom, clk); rv = clkdom_finit(clkdom); if (rv != 0) { device_printf(sc->dev, "Cannot finalize clkdom initialization: " "%d\n", rv); return (ENXIO); } if (bootverbose) clkdom_dump(clkdom); return (0); } diff --git a/sys/dev/mmc/host/dwmmc.c b/sys/dev/mmc/host/dwmmc.c index 8cd50d2cc898..fd55ce5c0cd4 100644 --- a/sys/dev/mmc/host/dwmmc.c +++ b/sys/dev/mmc/host/dwmmc.c @@ -1,1578 +1,1578 @@ /*- * Copyright (c) 2014-2019 Ruslan Bukin * All rights reserved. * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) * ("CTSRD"), as part of the DARPA CRASH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Synopsys DesignWare Mobile Storage Host Controller * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22) */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include "opt_mmccam.h" #ifdef MMCCAM #include #include #include #include #include #include "mmc_sim_if.h" #endif #include "mmcbr_if.h" #ifdef DEBUG #define dprintf(fmt, args...) printf(fmt, ##args) #else #define dprintf(x, arg...) #endif #define READ4(_sc, _reg) \ bus_read_4((_sc)->res[0], _reg) #define WRITE4(_sc, _reg, _val) \ bus_write_4((_sc)->res[0], _reg, _val) #define DIV_ROUND_UP(n, d) howmany(n, d) #define DWMMC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define DWMMC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define DWMMC_LOCK_INIT(_sc) \ mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ "dwmmc", MTX_DEF) #define DWMMC_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); #define DWMMC_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); #define DWMMC_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); #define PENDING_CMD 0x01 #define PENDING_STOP 0x02 #define CARD_INIT_DONE 0x04 #define DWMMC_DATA_ERR_FLAGS (SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \ |SDMMC_INTMASK_SBE | SDMMC_INTMASK_EBE) #define DWMMC_CMD_ERR_FLAGS (SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \ |SDMMC_INTMASK_RE) #define DWMMC_ERR_FLAGS (DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \ |SDMMC_INTMASK_HLE) #define DES0_DIC (1 << 1) /* Disable Interrupt on Completion */ #define DES0_LD (1 << 2) /* Last Descriptor */ #define DES0_FS (1 << 3) /* First Descriptor */ #define DES0_CH (1 << 4) /* second address CHained */ #define DES0_ER (1 << 5) /* End of Ring */ #define DES0_CES (1 << 30) /* Card Error Summary */ #define DES0_OWN (1 << 31) /* OWN */ #define DES1_BS1_MASK 0x1fff struct idmac_desc { uint32_t des0; /* control */ uint32_t des1; /* bufsize */ uint32_t des2; /* buf1 phys addr */ uint32_t des3; /* buf2 phys addr or next descr */ }; #define IDMAC_DESC_SEGS (PAGE_SIZE / (sizeof(struct idmac_desc))) #define IDMAC_DESC_SIZE (sizeof(struct idmac_desc) * IDMAC_DESC_SEGS) #define DEF_MSIZE 0x2 /* Burst size of multiple transaction */ /* * Size field in DMA descriptor is 13 bits long (up to 4095 bytes), * but must be a multiple of the data bus size.Additionally, we must ensure * that bus_dmamap_load() doesn't additionally fragments buffer (because it * is processed with page size granularity). Thus limit fragment size to half * of page. * XXX switch descriptor format to array and use second buffer pointer for * second half of page */ #define IDMAC_MAX_SIZE 2048 /* * Busdma may bounce buffers, so we must reserve 2 descriptors * (on start and on end) for bounced fragments. */ #define DWMMC_MAX_DATA (IDMAC_MAX_SIZE * (IDMAC_DESC_SEGS - 2)) / MMC_SECTOR_SIZE static void dwmmc_next_operation(struct dwmmc_softc *); static int dwmmc_setup_bus(struct dwmmc_softc *, int); static int dma_done(struct dwmmc_softc *, struct mmc_command *); static int dma_stop(struct dwmmc_softc *); static void pio_read(struct dwmmc_softc *, struct mmc_command *); static void pio_write(struct dwmmc_softc *, struct mmc_command *); static void dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present); static struct resource_spec dwmmc_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; #define HWTYPE_MASK (0x0000ffff) #define HWFLAG_MASK (0xffff << 16) static void dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (nsegs != 1) panic("%s: nsegs != 1 (%d)\n", __func__, nsegs); if (error != 0) panic("%s: error != 0 (%d)\n", __func__, error); *(bus_addr_t *)arg = segs[0].ds_addr; } static void dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct dwmmc_softc *sc; int idx; sc = arg; dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len); if (error != 0) panic("%s: error != 0 (%d)\n", __func__, error); for (idx = 0; idx < nsegs; idx++) { sc->desc_ring[idx].des0 = DES0_DIC | DES0_CH; sc->desc_ring[idx].des1 = segs[idx].ds_len & DES1_BS1_MASK; sc->desc_ring[idx].des2 = segs[idx].ds_addr; if (idx == 0) sc->desc_ring[idx].des0 |= DES0_FS; if (idx == (nsegs - 1)) { sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH); sc->desc_ring[idx].des0 |= DES0_LD; } wmb(); sc->desc_ring[idx].des0 |= DES0_OWN; } } static int dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits) { int reg; int i; reg = READ4(sc, SDMMC_CTRL); reg |= (reset_bits); WRITE4(sc, SDMMC_CTRL, reg); /* Wait reset done */ for (i = 0; i < 100; i++) { if (!(READ4(sc, SDMMC_CTRL) & reset_bits)) return (0); DELAY(10); } device_printf(sc->dev, "Reset failed\n"); return (1); } static int dma_setup(struct dwmmc_softc *sc) { int error; int nidx; int idx; /* * Set up TX descriptor ring, descriptors, and dma maps. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* Parent tag. */ 4096, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ IDMAC_DESC_SIZE, 1, /* maxsize, nsegments */ IDMAC_DESC_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->desc_tag); if (error != 0) { device_printf(sc->dev, "could not create ring DMA tag.\n"); return (1); } error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring, BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->desc_map); if (error != 0) { device_printf(sc->dev, "could not allocate descriptor ring.\n"); return (1); } error = bus_dmamap_load(sc->desc_tag, sc->desc_map, sc->desc_ring, IDMAC_DESC_SIZE, dwmmc_get1paddr, &sc->desc_ring_paddr, 0); if (error != 0) { device_printf(sc->dev, "could not load descriptor ring map.\n"); return (1); } for (idx = 0; idx < IDMAC_DESC_SEGS; idx++) { sc->desc_ring[idx].des0 = DES0_CH; sc->desc_ring[idx].des1 = 0; nidx = (idx + 1) % IDMAC_DESC_SEGS; sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \ (nidx * sizeof(struct idmac_desc)); } sc->desc_ring[idx - 1].des3 = sc->desc_ring_paddr; sc->desc_ring[idx - 1].des0 |= DES0_ER; error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* Parent tag. */ 8, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ IDMAC_MAX_SIZE * IDMAC_DESC_SEGS, /* maxsize */ IDMAC_DESC_SEGS, /* nsegments */ IDMAC_MAX_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->buf_tag); if (error != 0) { device_printf(sc->dev, "could not create ring DMA tag.\n"); return (1); } error = bus_dmamap_create(sc->buf_tag, 0, &sc->buf_map); if (error != 0) { device_printf(sc->dev, "could not create TX buffer DMA map.\n"); return (1); } return (0); } static void dwmmc_cmd_done(struct dwmmc_softc *sc) { struct mmc_command *cmd; #ifdef MMCCAM union ccb *ccb; #endif #ifdef MMCCAM ccb = sc->ccb; if (ccb == NULL) return; cmd = &ccb->mmcio.cmd; #else cmd = sc->curcmd; #endif if (cmd == NULL) return; if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { cmd->resp[3] = READ4(sc, SDMMC_RESP0); cmd->resp[2] = READ4(sc, SDMMC_RESP1); cmd->resp[1] = READ4(sc, SDMMC_RESP2); cmd->resp[0] = READ4(sc, SDMMC_RESP3); } else { cmd->resp[3] = 0; cmd->resp[2] = 0; cmd->resp[1] = 0; cmd->resp[0] = READ4(sc, SDMMC_RESP0); } } } static void dwmmc_tasklet(struct dwmmc_softc *sc) { struct mmc_command *cmd; cmd = sc->curcmd; if (cmd == NULL) return; if (!sc->cmd_done) return; if (cmd->error != MMC_ERR_NONE || !cmd->data) { dwmmc_next_operation(sc); } else if (cmd->data && sc->dto_rcvd) { if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && sc->use_auto_stop) { if (sc->acd_rcvd) dwmmc_next_operation(sc); } else { dwmmc_next_operation(sc); } } } static void dwmmc_intr(void *arg) { struct mmc_command *cmd; struct dwmmc_softc *sc; uint32_t reg; sc = arg; DWMMC_LOCK(sc); cmd = sc->curcmd; /* First handle SDMMC controller interrupts */ reg = READ4(sc, SDMMC_MINTSTS); if (reg) { dprintf("%s 0x%08x\n", __func__, reg); if (reg & DWMMC_CMD_ERR_FLAGS) { dprintf("cmd err 0x%08x cmd 0x%08x\n", reg, cmd->opcode); cmd->error = MMC_ERR_TIMEOUT; } if (reg & DWMMC_DATA_ERR_FLAGS) { dprintf("data err 0x%08x cmd 0x%08x\n", reg, cmd->opcode); cmd->error = MMC_ERR_FAILED; if (!sc->use_pio) { dma_done(sc, cmd); dma_stop(sc); } } if (reg & SDMMC_INTMASK_CMD_DONE) { dwmmc_cmd_done(sc); sc->cmd_done = 1; } if (reg & SDMMC_INTMASK_ACD) sc->acd_rcvd = 1; if (reg & SDMMC_INTMASK_DTO) sc->dto_rcvd = 1; if (reg & SDMMC_INTMASK_CD) { dwmmc_handle_card_present(sc, READ4(sc, SDMMC_CDETECT) == 0 ? true : false); } } /* Ack interrupts */ WRITE4(sc, SDMMC_RINTSTS, reg); if (sc->use_pio) { if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) { pio_read(sc, cmd); } if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) { pio_write(sc, cmd); } } else { /* Now handle DMA interrupts */ reg = READ4(sc, SDMMC_IDSTS); if (reg) { dprintf("dma intr 0x%08x\n", reg); if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) { WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)); WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI); dma_done(sc, cmd); } } } dwmmc_tasklet(sc); DWMMC_UNLOCK(sc); } static void dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present) { bool was_present; if (dumping || SCHEDULER_STOPPED()) return; was_present = sc->child != NULL; if (!was_present && is_present) { taskqueue_enqueue_timeout(taskqueue_swi_giant, &sc->card_delayed_task, -(hz / 2)); } else if (was_present && !is_present) { taskqueue_enqueue(taskqueue_swi_giant, &sc->card_task); } } static void dwmmc_card_task(void *arg, int pending __unused) { struct dwmmc_softc *sc = arg; #ifdef MMCCAM mmc_cam_sim_discover(&sc->mmc_sim); #else DWMMC_LOCK(sc); if (READ4(sc, SDMMC_CDETECT) == 0 || (sc->mmc_helper.props & MMC_PROP_BROKEN_CD)) { if (sc->child == NULL) { if (bootverbose) device_printf(sc->dev, "Card inserted\n"); sc->child = device_add_child(sc->dev, "mmc", -1); DWMMC_UNLOCK(sc); if (sc->child) { device_set_ivars(sc->child, sc); (void)device_probe_and_attach(sc->child); } } else DWMMC_UNLOCK(sc); } else { /* Card isn't present, detach if necessary */ if (sc->child != NULL) { if (bootverbose) device_printf(sc->dev, "Card removed\n"); DWMMC_UNLOCK(sc); device_delete_child(sc->dev, sc->child); sc->child = NULL; } else DWMMC_UNLOCK(sc); } #endif /* MMCCAM */ } static int parse_fdt(struct dwmmc_softc *sc) { pcell_t dts_value[3]; phandle_t node; uint32_t bus_hz = 0; int len; int error; if ((node = ofw_bus_get_node(sc->dev)) == -1) return (ENXIO); /* Set some defaults for freq and supported mode */ sc->host.f_min = 400000; sc->host.f_max = 200000000; sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; sc->host.caps = MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330; mmc_fdt_parse(sc->dev, node, &sc->mmc_helper, &sc->host); /* fifo-depth */ if ((len = OF_getproplen(node, "fifo-depth")) > 0) { OF_getencprop(node, "fifo-depth", dts_value, len); sc->fifo_depth = dts_value[0]; } /* num-slots (Deprecated) */ sc->num_slots = 1; if ((len = OF_getproplen(node, "num-slots")) > 0) { device_printf(sc->dev, "num-slots property is deprecated\n"); OF_getencprop(node, "num-slots", dts_value, len); sc->num_slots = dts_value[0]; } /* clock-frequency */ if ((len = OF_getproplen(node, "clock-frequency")) > 0) { OF_getencprop(node, "clock-frequency", dts_value, len); bus_hz = dts_value[0]; } /* IP block reset is optional */ error = hwreset_get_by_ofw_name(sc->dev, 0, "reset", &sc->hwreset); if (error != 0 && error != ENOENT && error != ENODEV) { device_printf(sc->dev, "Cannot get reset\n"); goto fail; } /* vmmc regulator is optional */ error = regulator_get_by_ofw_property(sc->dev, 0, "vmmc-supply", &sc->vmmc); if (error != 0 && error != ENOENT && error != ENODEV) { device_printf(sc->dev, "Cannot get regulator 'vmmc-supply'\n"); goto fail; } /* vqmmc regulator is optional */ error = regulator_get_by_ofw_property(sc->dev, 0, "vqmmc-supply", &sc->vqmmc); if (error != 0 && error != ENOENT && error != ENODEV) { device_printf(sc->dev, "Cannot get regulator 'vqmmc-supply'\n"); goto fail; } /* Assert reset first */ if (sc->hwreset != NULL) { error = hwreset_assert(sc->hwreset); if (error != 0) { device_printf(sc->dev, "Cannot assert reset\n"); goto fail; } } /* BIU (Bus Interface Unit clock) is optional */ error = clk_get_by_ofw_name(sc->dev, 0, "biu", &sc->biu); if (error != 0 && error != ENOENT && error != ENODEV) { device_printf(sc->dev, "Cannot get 'biu' clock\n"); goto fail; } if (sc->biu) { error = clk_enable(sc->biu); if (error != 0) { device_printf(sc->dev, "cannot enable biu clock\n"); goto fail; } } /* * CIU (Controller Interface Unit clock) is mandatory * if no clock-frequency property is given */ error = clk_get_by_ofw_name(sc->dev, 0, "ciu", &sc->ciu); if (error != 0 && error != ENOENT && error != ENODEV) { device_printf(sc->dev, "Cannot get 'ciu' clock\n"); goto fail; } if (sc->ciu) { if (bus_hz != 0) { error = clk_set_freq(sc->ciu, bus_hz, 0); if (error != 0) device_printf(sc->dev, "cannot set ciu clock to %u\n", bus_hz); } error = clk_enable(sc->ciu); if (error != 0) { device_printf(sc->dev, "cannot enable ciu clock\n"); goto fail; } clk_get_freq(sc->ciu, &sc->bus_hz); } /* Enable regulators */ if (sc->vmmc != NULL) { error = regulator_enable(sc->vmmc); if (error != 0) { device_printf(sc->dev, "Cannot enable vmmc regulator\n"); goto fail; } } if (sc->vqmmc != NULL) { error = regulator_enable(sc->vqmmc); if (error != 0) { device_printf(sc->dev, "Cannot enable vqmmc regulator\n"); goto fail; } } /* Take dwmmc out of reset */ if (sc->hwreset != NULL) { error = hwreset_deassert(sc->hwreset); if (error != 0) { device_printf(sc->dev, "Cannot deassert reset\n"); goto fail; } } if (sc->bus_hz == 0) { device_printf(sc->dev, "No bus speed provided\n"); goto fail; } return (0); fail: return (ENXIO); } int dwmmc_attach(device_t dev) { struct dwmmc_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; /* Why not to use Auto Stop? It save a hundred of irq per second */ sc->use_auto_stop = 1; error = parse_fdt(sc); if (error != 0) { device_printf(dev, "Can't get FDT property.\n"); return (ENXIO); } DWMMC_LOCK_INIT(sc); if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } /* Setup interrupt handler. */ error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE, NULL, dwmmc_intr, sc, &sc->intr_cookie); if (error != 0) { device_printf(dev, "could not setup interrupt handler.\n"); return (ENXIO); } device_printf(dev, "Hardware version ID is %04x\n", READ4(sc, SDMMC_VERID) & 0xffff); /* Reset all */ if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | SDMMC_CTRL_DMA_RESET))) return (ENXIO); dwmmc_setup_bus(sc, sc->host.f_min); if (sc->fifo_depth == 0) { sc->fifo_depth = 1 + ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff); device_printf(dev, "No fifo-depth, using FIFOTH %x\n", sc->fifo_depth); } if (!sc->use_pio) { dma_stop(sc); if (dma_setup(sc)) return (ENXIO); /* Install desc base */ WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr); /* Enable DMA interrupts */ WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK); WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI | SDMMC_IDINTEN_RI | SDMMC_IDINTEN_TI)); } /* Clear and disable interrups for a while */ WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); WRITE4(sc, SDMMC_INTMASK, 0); /* Maximum timeout */ WRITE4(sc, SDMMC_TMOUT, 0xffffffff); /* Enable interrupts */ WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE | SDMMC_INTMASK_DTO | SDMMC_INTMASK_ACD | SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR | DWMMC_ERR_FLAGS | SDMMC_INTMASK_CD)); WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE); TASK_INIT(&sc->card_task, 0, dwmmc_card_task, sc); TIMEOUT_TASK_INIT(taskqueue_swi_giant, &sc->card_delayed_task, 0, dwmmc_card_task, sc); #ifdef MMCCAM sc->ccb = NULL; if (mmc_cam_sim_alloc(dev, "dw_mmc", &sc->mmc_sim) != 0) { device_printf(dev, "cannot alloc cam sim\n"); dwmmc_detach(dev); return (ENXIO); } #endif /* * Schedule a card detection as we won't get an interrupt * if the card is inserted when we attach */ dwmmc_card_task(sc, 0); return (0); } int dwmmc_detach(device_t dev) { struct dwmmc_softc *sc; int ret; sc = device_get_softc(dev); ret = device_delete_children(dev); if (ret != 0) return (ret); taskqueue_drain(taskqueue_swi_giant, &sc->card_task); taskqueue_drain_timeout(taskqueue_swi_giant, &sc->card_delayed_task); if (sc->intr_cookie != NULL) { ret = bus_teardown_intr(dev, sc->res[1], sc->intr_cookie); if (ret != 0) return (ret); } bus_release_resources(dev, dwmmc_spec, sc->res); DWMMC_LOCK_DESTROY(sc); if (sc->hwreset != NULL && hwreset_deassert(sc->hwreset) != 0) device_printf(sc->dev, "cannot deassert reset\n"); if (sc->biu != NULL && clk_disable(sc->biu) != 0) device_printf(sc->dev, "cannot disable biu clock\n"); if (sc->ciu != NULL && clk_disable(sc->ciu) != 0) device_printf(sc->dev, "cannot disable ciu clock\n"); if (sc->vmmc && regulator_disable(sc->vmmc) != 0) device_printf(sc->dev, "Cannot disable vmmc regulator\n"); if (sc->vqmmc && regulator_disable(sc->vqmmc) != 0) device_printf(sc->dev, "Cannot disable vqmmc regulator\n"); #ifdef MMCCAM mmc_cam_sim_free(&sc->mmc_sim); #endif return (0); } static int dwmmc_setup_bus(struct dwmmc_softc *sc, int freq) { int tout; int div; if (freq == 0) { WRITE4(sc, SDMMC_CLKENA, 0); WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); tout = 1000; do { if (tout-- < 0) { device_printf(sc->dev, "Failed update clk\n"); return (1); } } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); return (0); } WRITE4(sc, SDMMC_CLKENA, 0); WRITE4(sc, SDMMC_CLKSRC, 0); div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0; WRITE4(sc, SDMMC_CLKDIV, div); WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); tout = 1000; do { if (tout-- < 0) { device_printf(sc->dev, "Failed to update clk\n"); return (1); } } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP)); WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA | SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START); tout = 1000; do { if (tout-- < 0) { device_printf(sc->dev, "Failed to enable clk\n"); return (1); } } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); return (0); } static int dwmmc_update_ios(device_t brdev, device_t reqdev) { struct dwmmc_softc *sc; struct mmc_ios *ios; uint32_t reg; int ret = 0; sc = device_get_softc(brdev); ios = &sc->host.ios; dprintf("Setting up clk %u bus_width %d, timming: %d\n", ios->clock, ios->bus_width, ios->timing); switch (ios->power_mode) { case power_on: break; case power_off: WRITE4(sc, SDMMC_PWREN, 0); break; case power_up: WRITE4(sc, SDMMC_PWREN, 1); break; } mmc_fdt_set_power(&sc->mmc_helper, ios->power_mode); if (ios->bus_width == bus_width_8) WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT); else if (ios->bus_width == bus_width_4) WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT); else WRITE4(sc, SDMMC_CTYPE, 0); if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) { /* XXX: take care about DDR or SDR use here */ WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing); } /* Set DDR mode */ reg = READ4(sc, SDMMC_UHS_REG); if (ios->timing == bus_timing_uhs_ddr50 || ios->timing == bus_timing_mmc_ddr52 || ios->timing == bus_timing_mmc_hs400) reg |= (SDMMC_UHS_REG_DDR); else reg &= ~(SDMMC_UHS_REG_DDR); WRITE4(sc, SDMMC_UHS_REG, reg); if (sc->update_ios) ret = sc->update_ios(sc, ios); dwmmc_setup_bus(sc, ios->clock); return (ret); } static int dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; data = cmd->data; if (data->flags & MMC_DATA_WRITE) bus_dmamap_sync(sc->buf_tag, sc->buf_map, BUS_DMASYNC_POSTWRITE); else bus_dmamap_sync(sc->buf_tag, sc->buf_map, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(sc->desc_tag, sc->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->buf_tag, sc->buf_map); return (0); } static int dma_stop(struct dwmmc_softc *sc) { int reg; reg = READ4(sc, SDMMC_CTRL); reg &= ~(SDMMC_CTRL_USE_IDMAC); reg |= (SDMMC_CTRL_DMA_RESET); WRITE4(sc, SDMMC_CTRL, reg); reg = READ4(sc, SDMMC_BMOD); reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB); reg |= (SDMMC_BMOD_SWR); WRITE4(sc, SDMMC_BMOD, reg); return (0); } static int dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; int err; int reg; data = cmd->data; reg = READ4(sc, SDMMC_INTMASK); reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR); WRITE4(sc, SDMMC_INTMASK, reg); dprintf("%s: bus_dmamap_load size: %zu\n", __func__, data->len); err = bus_dmamap_load(sc->buf_tag, sc->buf_map, data->data, data->len, dwmmc_ring_setup, sc, BUS_DMA_NOWAIT); if (err != 0) panic("dmamap_load failed\n"); /* Ensure the device can see the desc */ bus_dmamap_sync(sc->desc_tag, sc->desc_map, BUS_DMASYNC_PREWRITE); if (data->flags & MMC_DATA_WRITE) bus_dmamap_sync(sc->buf_tag, sc->buf_map, BUS_DMASYNC_PREWRITE); else bus_dmamap_sync(sc->buf_tag, sc->buf_map, BUS_DMASYNC_PREREAD); reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; WRITE4(sc, SDMMC_FIFOTH, reg); wmb(); reg = READ4(sc, SDMMC_CTRL); reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE); WRITE4(sc, SDMMC_CTRL, reg); wmb(); reg = READ4(sc, SDMMC_BMOD); reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB); WRITE4(sc, SDMMC_BMOD, reg); /* Start */ WRITE4(sc, SDMMC_PLDMND, 1); return (0); } static int pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; int reg; data = cmd->data; data->xfer_len = 0; reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; WRITE4(sc, SDMMC_FIFOTH, reg); wmb(); return (0); } static void pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; uint32_t *p, status; if (cmd == NULL || cmd->data == NULL) return; data = cmd->data; if ((data->flags & MMC_DATA_READ) == 0) return; KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); p = (uint32_t *)data->data + (data->xfer_len >> 2); while (data->xfer_len < data->len) { status = READ4(sc, SDMMC_STATUS); if (status & SDMMC_STATUS_FIFO_EMPTY) break; *p++ = READ4(sc, SDMMC_DATA); data->xfer_len += 4; } WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR); } static void pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; uint32_t *p, status; if (cmd == NULL || cmd->data == NULL) return; data = cmd->data; if ((data->flags & MMC_DATA_WRITE) == 0) return; KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); p = (uint32_t *)data->data + (data->xfer_len >> 2); while (data->xfer_len < data->len) { status = READ4(sc, SDMMC_STATUS); if (status & SDMMC_STATUS_FIFO_FULL) break; WRITE4(sc, SDMMC_DATA, *p++); data->xfer_len += 4; } WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR); } static void dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; uint32_t blksz; uint32_t cmdr; dprintf("%s\n", __func__); sc->curcmd = cmd; data = cmd->data; #ifndef MMCCAM /* XXX Upper layers don't always set this */ cmd->mrq = sc->req; #endif /* Begin setting up command register. */ cmdr = cmd->opcode; dprintf("cmd->opcode 0x%08x\n", cmd->opcode); if (cmd->opcode == MMC_STOP_TRANSMISSION || cmd->opcode == MMC_GO_IDLE_STATE || cmd->opcode == MMC_GO_INACTIVE_STATE) cmdr |= SDMMC_CMD_STOP_ABORT; else if (cmd->opcode != MMC_SEND_STATUS && data) cmdr |= SDMMC_CMD_WAIT_PRVDATA; /* Set up response handling. */ if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) { cmdr |= SDMMC_CMD_RESP_EXP; if (cmd->flags & MMC_RSP_136) cmdr |= SDMMC_CMD_RESP_LONG; } if (cmd->flags & MMC_RSP_CRC) cmdr |= SDMMC_CMD_RESP_CRC; /* * XXX: Not all platforms want this. */ cmdr |= SDMMC_CMD_USE_HOLD_REG; if ((sc->flags & CARD_INIT_DONE) == 0) { sc->flags |= (CARD_INIT_DONE); cmdr |= SDMMC_CMD_SEND_INIT; } if (data) { if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && sc->use_auto_stop) cmdr |= SDMMC_CMD_SEND_ASTOP; cmdr |= SDMMC_CMD_DATA_EXP; if (data->flags & MMC_DATA_STREAM) cmdr |= SDMMC_CMD_MODE_STREAM; if (data->flags & MMC_DATA_WRITE) cmdr |= SDMMC_CMD_DATA_WRITE; WRITE4(sc, SDMMC_TMOUT, 0xffffffff); #ifdef MMCCAM if (cmd->data->flags & MMC_DATA_BLOCK_SIZE) { WRITE4(sc, SDMMC_BLKSIZ, cmd->data->block_size); WRITE4(sc, SDMMC_BYTCNT, cmd->data->len); } else #endif { WRITE4(sc, SDMMC_BYTCNT, data->len); blksz = (data->len < MMC_SECTOR_SIZE) ? \ data->len : MMC_SECTOR_SIZE; WRITE4(sc, SDMMC_BLKSIZ, blksz); } if (sc->use_pio) { pio_prepare(sc, cmd); } else { dma_prepare(sc, cmd); } wmb(); } dprintf("cmdr 0x%08x\n", cmdr); WRITE4(sc, SDMMC_CMDARG, cmd->arg); wmb(); WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START); }; static void dwmmc_next_operation(struct dwmmc_softc *sc) { struct mmc_command *cmd; dprintf("%s\n", __func__); #ifdef MMCCAM union ccb *ccb; ccb = sc->ccb; if (ccb == NULL) return; cmd = &ccb->mmcio.cmd; #else struct mmc_request *req; req = sc->req; if (req == NULL) return; cmd = req->cmd; #endif sc->acd_rcvd = 0; sc->dto_rcvd = 0; sc->cmd_done = 0; /* * XXX: Wait until card is still busy. * We do need this to prevent data timeouts, * mostly caused by multi-block write command * followed by single-read. */ while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY)) continue; if (sc->flags & PENDING_CMD) { sc->flags &= ~PENDING_CMD; dwmmc_start_cmd(sc, cmd); return; } else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) { sc->flags &= ~PENDING_STOP; /// XXX: What to do with this? //dwmmc_start_cmd(sc, req->stop); return; } #ifdef MMCCAM sc->ccb = NULL; sc->curcmd = NULL; ccb->ccb_h.status = (ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR); xpt_done(ccb); #else sc->req = NULL; sc->curcmd = NULL; req->done(req); #endif } static int dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req) { struct dwmmc_softc *sc; sc = device_get_softc(brdev); dprintf("%s\n", __func__); DWMMC_LOCK(sc); #ifdef MMCCAM sc->flags |= PENDING_CMD; #else if (sc->req != NULL) { DWMMC_UNLOCK(sc); return (EBUSY); } sc->req = req; sc->flags |= PENDING_CMD; if (sc->req->stop) sc->flags |= PENDING_STOP; #endif dwmmc_next_operation(sc); DWMMC_UNLOCK(sc); return (0); } #ifndef MMCCAM static int dwmmc_get_ro(device_t brdev, device_t reqdev) { dprintf("%s\n", __func__); return (0); } static int dwmmc_acquire_host(device_t brdev, device_t reqdev) { struct dwmmc_softc *sc; sc = device_get_softc(brdev); DWMMC_LOCK(sc); while (sc->bus_busy) msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5); sc->bus_busy++; DWMMC_UNLOCK(sc); return (0); } static int dwmmc_release_host(device_t brdev, device_t reqdev) { struct dwmmc_softc *sc; sc = device_get_softc(brdev); DWMMC_LOCK(sc); sc->bus_busy--; wakeup(sc); DWMMC_UNLOCK(sc); return (0); } #endif /* !MMCCAM */ static int dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct dwmmc_softc *sc; sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: *(int *)result = sc->host.ios.bus_mode; break; case MMCBR_IVAR_BUS_WIDTH: *(int *)result = sc->host.ios.bus_width; break; case MMCBR_IVAR_CHIP_SELECT: *(int *)result = sc->host.ios.chip_select; break; case MMCBR_IVAR_CLOCK: *(int *)result = sc->host.ios.clock; break; case MMCBR_IVAR_F_MIN: *(int *)result = sc->host.f_min; break; case MMCBR_IVAR_F_MAX: *(int *)result = sc->host.f_max; break; case MMCBR_IVAR_HOST_OCR: *(int *)result = sc->host.host_ocr; break; case MMCBR_IVAR_MODE: *(int *)result = sc->host.mode; break; case MMCBR_IVAR_OCR: *(int *)result = sc->host.ocr; break; case MMCBR_IVAR_POWER_MODE: *(int *)result = sc->host.ios.power_mode; break; case MMCBR_IVAR_VDD: *(int *)result = sc->host.ios.vdd; break; case MMCBR_IVAR_VCCQ: *(int *)result = sc->host.ios.vccq; break; case MMCBR_IVAR_CAPS: *(int *)result = sc->host.caps; break; case MMCBR_IVAR_MAX_DATA: *(int *)result = DWMMC_MAX_DATA; break; case MMCBR_IVAR_TIMING: *(int *)result = sc->host.ios.timing; break; } return (0); } static int dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct dwmmc_softc *sc; sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: sc->host.ios.bus_mode = value; break; case MMCBR_IVAR_BUS_WIDTH: sc->host.ios.bus_width = value; break; case MMCBR_IVAR_CHIP_SELECT: sc->host.ios.chip_select = value; break; case MMCBR_IVAR_CLOCK: sc->host.ios.clock = value; break; case MMCBR_IVAR_MODE: sc->host.mode = value; break; case MMCBR_IVAR_OCR: sc->host.ocr = value; break; case MMCBR_IVAR_POWER_MODE: sc->host.ios.power_mode = value; break; case MMCBR_IVAR_VDD: sc->host.ios.vdd = value; break; case MMCBR_IVAR_TIMING: sc->host.ios.timing = value; break; case MMCBR_IVAR_VCCQ: sc->host.ios.vccq = value; break; /* These are read-only */ case MMCBR_IVAR_CAPS: case MMCBR_IVAR_HOST_OCR: case MMCBR_IVAR_F_MIN: case MMCBR_IVAR_F_MAX: case MMCBR_IVAR_MAX_DATA: return (EINVAL); } return (0); } #ifdef MMCCAM /* Note: this function likely belongs to the specific driver impl */ static int dwmmc_switch_vccq(device_t dev, device_t child) { device_printf(dev, "This is a default impl of switch_vccq() that always fails\n"); return EINVAL; } static int dwmmc_get_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts) { struct dwmmc_softc *sc; sc = device_get_softc(dev); cts->host_ocr = sc->host.host_ocr; cts->host_f_min = sc->host.f_min; cts->host_f_max = sc->host.f_max; cts->host_caps = sc->host.caps; cts->host_max_data = DWMMC_MAX_DATA; memcpy(&cts->ios, &sc->host.ios, sizeof(struct mmc_ios)); return (0); } static int dwmmc_set_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts) { struct dwmmc_softc *sc; struct mmc_ios *ios; struct mmc_ios *new_ios; int res; sc = device_get_softc(dev); ios = &sc->host.ios; new_ios = &cts->ios; /* Update only requested fields */ if (cts->ios_valid & MMC_CLK) { ios->clock = new_ios->clock; if (bootverbose) device_printf(sc->dev, "Clock => %d\n", ios->clock); } if (cts->ios_valid & MMC_VDD) { ios->vdd = new_ios->vdd; if (bootverbose) device_printf(sc->dev, "VDD => %d\n", ios->vdd); } if (cts->ios_valid & MMC_CS) { ios->chip_select = new_ios->chip_select; if (bootverbose) device_printf(sc->dev, "CS => %d\n", ios->chip_select); } if (cts->ios_valid & MMC_BW) { ios->bus_width = new_ios->bus_width; if (bootverbose) device_printf(sc->dev, "Bus width => %d\n", ios->bus_width); } if (cts->ios_valid & MMC_PM) { ios->power_mode = new_ios->power_mode; if (bootverbose) device_printf(sc->dev, "Power mode => %d\n", ios->power_mode); } if (cts->ios_valid & MMC_BT) { ios->timing = new_ios->timing; if (bootverbose) device_printf(sc->dev, "Timing => %d\n", ios->timing); } if (cts->ios_valid & MMC_BM) { ios->bus_mode = new_ios->bus_mode; if (bootverbose) device_printf(sc->dev, "Bus mode => %d\n", ios->bus_mode); } if (cts->ios_valid & MMC_VCCQ) { ios->vccq = new_ios->vccq; if (bootverbose) device_printf(sc->dev, "VCCQ => %d\n", ios->vccq); res = dwmmc_switch_vccq(sc->dev, NULL); device_printf(sc->dev, "VCCQ switch result: %d\n", res); } return (dwmmc_update_ios(sc->dev, NULL)); } static int dwmmc_cam_request(device_t dev, union ccb *ccb) { struct dwmmc_softc *sc; struct ccb_mmcio *mmcio; sc = device_get_softc(dev); mmcio = &ccb->mmcio; DWMMC_LOCK(sc); #ifdef DEBUG if (__predict_false(bootverbose)) { device_printf(sc->dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n", mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags, mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0, mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0); } #endif if (mmcio->cmd.data != NULL) { if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0) panic("data->len = %d, data->flags = %d -- something is b0rked", (int)mmcio->cmd.data->len, mmcio->cmd.data->flags); } if (sc->ccb != NULL) { device_printf(sc->dev, "Controller still has an active command\n"); return (EBUSY); } sc->ccb = ccb; DWMMC_UNLOCK(sc); dwmmc_request(sc->dev, NULL, NULL); return (0); } static void dwmmc_cam_poll(device_t dev) { struct dwmmc_softc *sc; sc = device_get_softc(dev); dwmmc_intr(sc); } #endif /* MMCCAM */ static device_method_t dwmmc_methods[] = { /* Bus interface */ DEVMETHOD(bus_read_ivar, dwmmc_read_ivar), DEVMETHOD(bus_write_ivar, dwmmc_write_ivar), #ifndef MMCCAM /* mmcbr_if */ DEVMETHOD(mmcbr_update_ios, dwmmc_update_ios), DEVMETHOD(mmcbr_request, dwmmc_request), DEVMETHOD(mmcbr_get_ro, dwmmc_get_ro), DEVMETHOD(mmcbr_acquire_host, dwmmc_acquire_host), DEVMETHOD(mmcbr_release_host, dwmmc_release_host), #endif #ifdef MMCCAM /* MMCCAM interface */ DEVMETHOD(mmc_sim_get_tran_settings, dwmmc_get_tran_settings), DEVMETHOD(mmc_sim_set_tran_settings, dwmmc_set_tran_settings), DEVMETHOD(mmc_sim_cam_request, dwmmc_cam_request), DEVMETHOD(mmc_sim_cam_poll, dwmmc_cam_poll), DEVMETHOD(bus_add_child, bus_generic_add_child), #endif DEVMETHOD_END }; DEFINE_CLASS_0(dwmmc, dwmmc_driver, dwmmc_methods, sizeof(struct dwmmc_softc)); diff --git a/sys/dev/mmc/host/dwmmc_rockchip.c b/sys/dev/mmc/host/dwmmc_rockchip.c index 5cec9f9603d1..c4b07ac3290e 100644 --- a/sys/dev/mmc/host/dwmmc_rockchip.c +++ b/sys/dev/mmc/host/dwmmc_rockchip.c @@ -1,140 +1,140 @@ /* * Copyright 2017 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include "opt_mmccam.h" enum RKTYPE { RK2928 = 1, RK3288, }; static struct ofw_compat_data compat_data[] = { {"rockchip,rk2928-dw-mshc", RK2928}, {"rockchip,rk3288-dw-mshc", RK3288}, {NULL, 0}, }; static int dwmmc_rockchip_update_ios(struct dwmmc_softc *sc, struct mmc_ios *ios); static int rockchip_dwmmc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Synopsys DesignWare Mobile " "Storage Host Controller (RockChip)"); return (BUS_PROBE_VENDOR); } static int rockchip_dwmmc_attach(device_t dev) { struct dwmmc_softc *sc; int type; sc = device_get_softc(dev); sc->hwtype = HWTYPE_ROCKCHIP; type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; switch (type) { case RK2928: sc->use_pio = 1; break; } sc->update_ios = &dwmmc_rockchip_update_ios; return (dwmmc_attach(dev)); } static int dwmmc_rockchip_update_ios(struct dwmmc_softc *sc, struct mmc_ios *ios) { unsigned int clock; int error; if (ios->clock && ios->clock != sc->bus_hz) { sc->bus_hz = clock = ios->clock; /* Set the MMC clock. */ if (sc->ciu) { /* * Apparently you need to set the ciu clock to * the double of bus_hz */ error = clk_set_freq(sc->ciu, clock * 2, CLK_SET_ROUND_DOWN); if (error != 0) { device_printf(sc->dev, "failed to set frequency to %u Hz: %d\n", clock, error); return (error); } } } return (0); } static device_method_t rockchip_dwmmc_methods[] = { /* bus interface */ DEVMETHOD(device_probe, rockchip_dwmmc_probe), DEVMETHOD(device_attach, rockchip_dwmmc_attach), DEVMETHOD(device_detach, dwmmc_detach), DEVMETHOD_END }; DEFINE_CLASS_1(rockchip_dwmmc, rockchip_dwmmc_driver, rockchip_dwmmc_methods, sizeof(struct dwmmc_softc), dwmmc_driver); DRIVER_MODULE(rockchip_dwmmc, simplebus, rockchip_dwmmc_driver, 0, 0); DRIVER_MODULE(rockchip_dwmmc, ofwbus, rockchip_dwmmc_driver, NULL, NULL); #ifndef MMCCAM MMC_DECLARE_BRIDGE(rockchip_dwmmc); #endif diff --git a/sys/dev/mmc/host/dwmmc_var.h b/sys/dev/mmc/host/dwmmc_var.h index ef9b1d5305bd..16893b110804 100644 --- a/sys/dev/mmc/host/dwmmc_var.h +++ b/sys/dev/mmc/host/dwmmc_var.h @@ -1,103 +1,103 @@ /*- * Copyright (c) 2014 Ruslan Bukin * All rights reserved. * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) * ("CTSRD"), as part of the DARPA CRASH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef DEV_MMC_HOST_DWMMC_VAR_H #define DEV_MMC_HOST_DWMMC_VAR_H -#include +#include #include #include #include "opt_mmccam.h" #include enum { HWTYPE_NONE, HWTYPE_ALTERA, HWTYPE_EXYNOS, HWTYPE_HISILICON, HWTYPE_ROCKCHIP, }; struct dwmmc_softc { struct resource *res[2]; device_t dev; void *intr_cookie; struct mmc_host host; struct mmc_helper mmc_helper; struct mtx sc_mtx; #ifdef MMCCAM union ccb * ccb; struct mmc_sim mmc_sim; #else struct mmc_request *req; #endif struct mmc_command *curcmd; uint32_t flags; uint32_t hwtype; uint32_t use_auto_stop; uint32_t use_pio; device_t child; struct task card_task; /* Card presence check task */ struct timeout_task card_delayed_task;/* Card insert delayed task */ int (*update_ios)(struct dwmmc_softc *sc, struct mmc_ios *ios); bus_dma_tag_t desc_tag; bus_dmamap_t desc_map; struct idmac_desc *desc_ring; bus_addr_t desc_ring_paddr; bus_dma_tag_t buf_tag; bus_dmamap_t buf_map; uint32_t bus_busy; uint32_t dto_rcvd; uint32_t acd_rcvd; uint32_t cmd_done; uint64_t bus_hz; uint32_t fifo_depth; uint32_t num_slots; uint32_t sdr_timing; uint32_t ddr_timing; clk_t biu; clk_t ciu; hwreset_t hwreset; regulator_t vmmc; regulator_t vqmmc; }; DECLARE_CLASS(dwmmc_driver); int dwmmc_attach(device_t); int dwmmc_detach(device_t); #endif diff --git a/sys/dev/mmc/mmc_pwrseq.c b/sys/dev/mmc/mmc_pwrseq.c index 5e2e87fd5564..46bfab41aa6c 100644 --- a/sys/dev/mmc/mmc_pwrseq.c +++ b/sys/dev/mmc/mmc_pwrseq.c @@ -1,189 +1,189 @@ /* * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2021 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include -#include +#include #include "mmc_pwrseq_if.h" enum pwrseq_type { PWRSEQ_SIMPLE = 1, PWRSEQ_EMMC, }; static struct ofw_compat_data compat_data[] = { { "mmc-pwrseq-simple", PWRSEQ_SIMPLE }, { "mmc-pwrseq-emmc", PWRSEQ_EMMC }, { NULL, 0 } }; struct mmc_pwrseq_softc { enum pwrseq_type type; clk_t ext_clock; struct gpiobus_pin *reset_gpio; uint32_t post_power_on_delay_ms; uint32_t power_off_delay_us; }; static int mmc_pwrseq_probe(device_t dev) { enum pwrseq_type type; if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); type = (enum pwrseq_type)ofw_bus_search_compatible(dev, compat_data)->ocd_data; switch (type) { case PWRSEQ_SIMPLE: device_set_desc(dev, "MMC Simple Power sequence"); break; case PWRSEQ_EMMC: device_set_desc(dev, "MMC eMMC Power sequence"); break; } return (BUS_PROBE_DEFAULT); } static int mmc_pwrseq_attach(device_t dev) { struct mmc_pwrseq_softc *sc; phandle_t node; int rv; sc = device_get_softc(dev); sc->type = (enum pwrseq_type)ofw_bus_search_compatible(dev, compat_data)->ocd_data; node = ofw_bus_get_node(dev); if (sc->type == PWRSEQ_SIMPLE) { if (OF_hasprop(node, "clocks")) { rv = clk_get_by_ofw_name(dev, 0, "ext_clock", &sc->ext_clock); if (rv != 0) { device_printf(dev, "Node have a clocks property but no clocks named \"ext_clock\"\n"); return (ENXIO); } } OF_getencprop(node, "post-power-on-delay-ms", &sc->post_power_on_delay_ms, sizeof(uint32_t)); OF_getencprop(node, "power-off-delay-us", &sc->power_off_delay_us, sizeof(uint32_t)); } if (OF_hasprop(node, "reset-gpios")) { if (gpio_pin_get_by_ofw_property(dev, node, "reset-gpios", &sc->reset_gpio) != 0) { device_printf(dev, "Cannot get the reset-gpios\n"); return (ENXIO); } gpio_pin_setflags(sc->reset_gpio, GPIO_PIN_OUTPUT); gpio_pin_set_active(sc->reset_gpio, true); } OF_device_register_xref(OF_xref_from_node(node), dev); return (0); } static int mmc_pwrseq_detach(device_t dev) { return (EBUSY); } static int mmv_pwrseq_set_power(device_t dev, bool power_on) { struct mmc_pwrseq_softc *sc; int rv; sc = device_get_softc(dev); if (power_on) { if (sc->ext_clock) { rv = clk_enable(sc->ext_clock); if (rv != 0) return (rv); } if (sc->reset_gpio) { rv = gpio_pin_set_active(sc->reset_gpio, false); if (rv != 0) return (rv); } if (sc->post_power_on_delay_ms) DELAY(sc->post_power_on_delay_ms * 1000); } else { if (sc->reset_gpio) { rv = gpio_pin_set_active(sc->reset_gpio, true); if (rv != 0) return (rv); } if (sc->ext_clock) { rv = clk_stop(sc->ext_clock); if (rv != 0) return (rv); } if (sc->power_off_delay_us) DELAY(sc->power_off_delay_us); } return (0); } static device_method_t mmc_pwrseq_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mmc_pwrseq_probe), DEVMETHOD(device_attach, mmc_pwrseq_attach), DEVMETHOD(device_detach, mmc_pwrseq_detach), DEVMETHOD(mmc_pwrseq_set_power, mmv_pwrseq_set_power), DEVMETHOD_END }; static driver_t mmc_pwrseq_driver = { "mmc_pwrseq", mmc_pwrseq_methods, sizeof(struct mmc_pwrseq_softc), }; EARLY_DRIVER_MODULE(mmc_pwrseq, simplebus, mmc_pwrseq_driver, 0, 0, BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_FIRST); MODULE_VERSION(mmc_pwrseq, 1); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/dev/neta/if_mvneta.c b/sys/dev/neta/if_mvneta.c index f7f9c042b937..8a2c2ec8512c 100644 --- a/sys/dev/neta/if_mvneta.c +++ b/sys/dev/neta/if_mvneta.c @@ -1,3624 +1,3624 @@ /* * Copyright (c) 2017 Stormshield. * Copyright (c) 2017 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #ifdef MVNETA_KTR #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #if !defined(__aarch64__) #include #include #endif #include "if_mvnetareg.h" #include "if_mvnetavar.h" #include "miibus_if.h" #include "mdio_if.h" #ifdef MVNETA_DEBUG #define STATIC /* nothing */ #else #define STATIC static #endif #define DASSERT(x) KASSERT((x), (#x)) #define A3700_TCLK_250MHZ 250000000 /* Device Register Initialization */ STATIC int mvneta_initreg(if_t); /* Descriptor Ring Control for each of queues */ STATIC int mvneta_ring_alloc_rx_queue(struct mvneta_softc *, int); STATIC int mvneta_ring_alloc_tx_queue(struct mvneta_softc *, int); STATIC void mvneta_ring_dealloc_rx_queue(struct mvneta_softc *, int); STATIC void mvneta_ring_dealloc_tx_queue(struct mvneta_softc *, int); STATIC int mvneta_ring_init_rx_queue(struct mvneta_softc *, int); STATIC int mvneta_ring_init_tx_queue(struct mvneta_softc *, int); STATIC void mvneta_ring_flush_rx_queue(struct mvneta_softc *, int); STATIC void mvneta_ring_flush_tx_queue(struct mvneta_softc *, int); STATIC void mvneta_dmamap_cb(void *, bus_dma_segment_t *, int, int); STATIC int mvneta_dma_create(struct mvneta_softc *); /* Rx/Tx Queue Control */ STATIC int mvneta_rx_queue_init(if_t, int); STATIC int mvneta_tx_queue_init(if_t, int); STATIC int mvneta_rx_queue_enable(if_t, int); STATIC int mvneta_tx_queue_enable(if_t, int); STATIC void mvneta_rx_lockq(struct mvneta_softc *, int); STATIC void mvneta_rx_unlockq(struct mvneta_softc *, int); STATIC void mvneta_tx_lockq(struct mvneta_softc *, int); STATIC void mvneta_tx_unlockq(struct mvneta_softc *, int); /* Interrupt Handlers */ STATIC void mvneta_disable_intr(struct mvneta_softc *); STATIC void mvneta_enable_intr(struct mvneta_softc *); STATIC void mvneta_rxtxth_intr(void *); STATIC int mvneta_misc_intr(struct mvneta_softc *); STATIC void mvneta_tick(void *); /* struct ifnet and mii callbacks*/ STATIC int mvneta_xmitfast_locked(struct mvneta_softc *, int, struct mbuf **); STATIC int mvneta_xmit_locked(struct mvneta_softc *, int); #ifdef MVNETA_MULTIQUEUE STATIC int mvneta_transmit(if_t, struct mbuf *); #else /* !MVNETA_MULTIQUEUE */ STATIC void mvneta_start(if_t); #endif STATIC void mvneta_qflush(if_t); STATIC void mvneta_tx_task(void *, int); STATIC int mvneta_ioctl(if_t, u_long, caddr_t); STATIC void mvneta_init(void *); STATIC void mvneta_init_locked(void *); STATIC void mvneta_stop(struct mvneta_softc *); STATIC void mvneta_stop_locked(struct mvneta_softc *); STATIC int mvneta_mediachange(if_t); STATIC void mvneta_mediastatus(if_t, struct ifmediareq *); STATIC void mvneta_portup(struct mvneta_softc *); STATIC void mvneta_portdown(struct mvneta_softc *); /* Link State Notify */ STATIC void mvneta_update_autoneg(struct mvneta_softc *, int); STATIC int mvneta_update_media(struct mvneta_softc *, int); STATIC void mvneta_adjust_link(struct mvneta_softc *); STATIC void mvneta_update_eee(struct mvneta_softc *); STATIC void mvneta_update_fc(struct mvneta_softc *); STATIC void mvneta_link_isr(struct mvneta_softc *); STATIC void mvneta_linkupdate(struct mvneta_softc *, boolean_t); STATIC void mvneta_linkup(struct mvneta_softc *); STATIC void mvneta_linkdown(struct mvneta_softc *); STATIC void mvneta_linkreset(struct mvneta_softc *); /* Tx Subroutines */ STATIC int mvneta_tx_queue(struct mvneta_softc *, struct mbuf **, int); STATIC void mvneta_tx_set_csumflag(if_t, struct mvneta_tx_desc *, struct mbuf *); STATIC void mvneta_tx_queue_complete(struct mvneta_softc *, int); STATIC void mvneta_tx_drain(struct mvneta_softc *); /* Rx Subroutines */ STATIC int mvneta_rx(struct mvneta_softc *, int, int); STATIC void mvneta_rx_queue(struct mvneta_softc *, int, int); STATIC void mvneta_rx_queue_refill(struct mvneta_softc *, int); STATIC void mvneta_rx_set_csumflag(if_t, struct mvneta_rx_desc *, struct mbuf *); STATIC void mvneta_rx_buf_free(struct mvneta_softc *, struct mvneta_buf *); /* MAC address filter */ STATIC void mvneta_filter_setup(struct mvneta_softc *); /* sysctl(9) */ STATIC int sysctl_read_mib(SYSCTL_HANDLER_ARGS); STATIC int sysctl_clear_mib(SYSCTL_HANDLER_ARGS); STATIC int sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS); STATIC void sysctl_mvneta_init(struct mvneta_softc *); /* MIB */ STATIC void mvneta_clear_mib(struct mvneta_softc *); STATIC uint64_t mvneta_read_mib(struct mvneta_softc *, int); STATIC void mvneta_update_mib(struct mvneta_softc *); /* Switch */ STATIC boolean_t mvneta_has_switch(device_t); #define mvneta_sc_lock(sc) mtx_lock(&sc->mtx) #define mvneta_sc_unlock(sc) mtx_unlock(&sc->mtx) STATIC struct mtx mii_mutex; STATIC int mii_init = 0; /* Device */ STATIC int mvneta_detach(device_t); /* MII */ STATIC int mvneta_miibus_readreg(device_t, int, int); STATIC int mvneta_miibus_writereg(device_t, int, int, int); static device_method_t mvneta_methods[] = { /* Device interface */ DEVMETHOD(device_detach, mvneta_detach), /* MII interface */ DEVMETHOD(miibus_readreg, mvneta_miibus_readreg), DEVMETHOD(miibus_writereg, mvneta_miibus_writereg), /* MDIO interface */ DEVMETHOD(mdio_readreg, mvneta_miibus_readreg), DEVMETHOD(mdio_writereg, mvneta_miibus_writereg), /* End */ DEVMETHOD_END }; DEFINE_CLASS_0(mvneta, mvneta_driver, mvneta_methods, sizeof(struct mvneta_softc)); DRIVER_MODULE(miibus, mvneta, miibus_driver, 0, 0); DRIVER_MODULE(mdio, mvneta, mdio_driver, 0, 0); MODULE_DEPEND(mvneta, mdio, 1, 1, 1); MODULE_DEPEND(mvneta, ether, 1, 1, 1); MODULE_DEPEND(mvneta, miibus, 1, 1, 1); MODULE_DEPEND(mvneta, mvxpbm, 1, 1, 1); /* * List of MIB register and names */ enum mvneta_mib_idx { MVNETA_MIB_RX_GOOD_OCT_IDX, MVNETA_MIB_RX_BAD_OCT_IDX, MVNETA_MIB_TX_MAC_TRNS_ERR_IDX, MVNETA_MIB_RX_GOOD_FRAME_IDX, MVNETA_MIB_RX_BAD_FRAME_IDX, MVNETA_MIB_RX_BCAST_FRAME_IDX, MVNETA_MIB_RX_MCAST_FRAME_IDX, MVNETA_MIB_RX_FRAME64_OCT_IDX, MVNETA_MIB_RX_FRAME127_OCT_IDX, MVNETA_MIB_RX_FRAME255_OCT_IDX, MVNETA_MIB_RX_FRAME511_OCT_IDX, MVNETA_MIB_RX_FRAME1023_OCT_IDX, MVNETA_MIB_RX_FRAMEMAX_OCT_IDX, MVNETA_MIB_TX_GOOD_OCT_IDX, MVNETA_MIB_TX_GOOD_FRAME_IDX, MVNETA_MIB_TX_EXCES_COL_IDX, MVNETA_MIB_TX_MCAST_FRAME_IDX, MVNETA_MIB_TX_BCAST_FRAME_IDX, MVNETA_MIB_TX_MAC_CTL_ERR_IDX, MVNETA_MIB_FC_SENT_IDX, MVNETA_MIB_FC_GOOD_IDX, MVNETA_MIB_FC_BAD_IDX, MVNETA_MIB_PKT_UNDERSIZE_IDX, MVNETA_MIB_PKT_FRAGMENT_IDX, MVNETA_MIB_PKT_OVERSIZE_IDX, MVNETA_MIB_PKT_JABBER_IDX, MVNETA_MIB_MAC_RX_ERR_IDX, MVNETA_MIB_MAC_CRC_ERR_IDX, MVNETA_MIB_MAC_COL_IDX, MVNETA_MIB_MAC_LATE_COL_IDX, }; STATIC struct mvneta_mib_def { uint32_t regnum; int reg64; const char *sysctl_name; const char *desc; } mvneta_mib_list[] = { [MVNETA_MIB_RX_GOOD_OCT_IDX] = {MVNETA_MIB_RX_GOOD_OCT, 1, "rx_good_oct", "Good Octets Rx"}, [MVNETA_MIB_RX_BAD_OCT_IDX] = {MVNETA_MIB_RX_BAD_OCT, 0, "rx_bad_oct", "Bad Octets Rx"}, [MVNETA_MIB_TX_MAC_TRNS_ERR_IDX] = {MVNETA_MIB_TX_MAC_TRNS_ERR, 0, "tx_mac_err", "MAC Transmit Error"}, [MVNETA_MIB_RX_GOOD_FRAME_IDX] = {MVNETA_MIB_RX_GOOD_FRAME, 0, "rx_good_frame", "Good Frames Rx"}, [MVNETA_MIB_RX_BAD_FRAME_IDX] = {MVNETA_MIB_RX_BAD_FRAME, 0, "rx_bad_frame", "Bad Frames Rx"}, [MVNETA_MIB_RX_BCAST_FRAME_IDX] = {MVNETA_MIB_RX_BCAST_FRAME, 0, "rx_bcast_frame", "Broadcast Frames Rx"}, [MVNETA_MIB_RX_MCAST_FRAME_IDX] = {MVNETA_MIB_RX_MCAST_FRAME, 0, "rx_mcast_frame", "Multicast Frames Rx"}, [MVNETA_MIB_RX_FRAME64_OCT_IDX] = {MVNETA_MIB_RX_FRAME64_OCT, 0, "rx_frame_1_64", "Frame Size 1 - 64"}, [MVNETA_MIB_RX_FRAME127_OCT_IDX] = {MVNETA_MIB_RX_FRAME127_OCT, 0, "rx_frame_65_127", "Frame Size 65 - 127"}, [MVNETA_MIB_RX_FRAME255_OCT_IDX] = {MVNETA_MIB_RX_FRAME255_OCT, 0, "rx_frame_128_255", "Frame Size 128 - 255"}, [MVNETA_MIB_RX_FRAME511_OCT_IDX] = {MVNETA_MIB_RX_FRAME511_OCT, 0, "rx_frame_256_511", "Frame Size 256 - 511"}, [MVNETA_MIB_RX_FRAME1023_OCT_IDX] = {MVNETA_MIB_RX_FRAME1023_OCT, 0, "rx_frame_512_1023", "Frame Size 512 - 1023"}, [MVNETA_MIB_RX_FRAMEMAX_OCT_IDX] = {MVNETA_MIB_RX_FRAMEMAX_OCT, 0, "rx_fame_1024_max", "Frame Size 1024 - Max"}, [MVNETA_MIB_TX_GOOD_OCT_IDX] = {MVNETA_MIB_TX_GOOD_OCT, 1, "tx_good_oct", "Good Octets Tx"}, [MVNETA_MIB_TX_GOOD_FRAME_IDX] = {MVNETA_MIB_TX_GOOD_FRAME, 0, "tx_good_frame", "Good Frames Tx"}, [MVNETA_MIB_TX_EXCES_COL_IDX] = {MVNETA_MIB_TX_EXCES_COL, 0, "tx_exces_collision", "Excessive Collision"}, [MVNETA_MIB_TX_MCAST_FRAME_IDX] = {MVNETA_MIB_TX_MCAST_FRAME, 0, "tx_mcast_frame", "Multicast Frames Tx"}, [MVNETA_MIB_TX_BCAST_FRAME_IDX] = {MVNETA_MIB_TX_BCAST_FRAME, 0, "tx_bcast_frame", "Broadcast Frames Tx"}, [MVNETA_MIB_TX_MAC_CTL_ERR_IDX] = {MVNETA_MIB_TX_MAC_CTL_ERR, 0, "tx_mac_ctl_err", "Unknown MAC Control"}, [MVNETA_MIB_FC_SENT_IDX] = {MVNETA_MIB_FC_SENT, 0, "fc_tx", "Flow Control Tx"}, [MVNETA_MIB_FC_GOOD_IDX] = {MVNETA_MIB_FC_GOOD, 0, "fc_rx_good", "Good Flow Control Rx"}, [MVNETA_MIB_FC_BAD_IDX] = {MVNETA_MIB_FC_BAD, 0, "fc_rx_bad", "Bad Flow Control Rx"}, [MVNETA_MIB_PKT_UNDERSIZE_IDX] = {MVNETA_MIB_PKT_UNDERSIZE, 0, "pkt_undersize", "Undersized Packets Rx"}, [MVNETA_MIB_PKT_FRAGMENT_IDX] = {MVNETA_MIB_PKT_FRAGMENT, 0, "pkt_fragment", "Fragmented Packets Rx"}, [MVNETA_MIB_PKT_OVERSIZE_IDX] = {MVNETA_MIB_PKT_OVERSIZE, 0, "pkt_oversize", "Oversized Packets Rx"}, [MVNETA_MIB_PKT_JABBER_IDX] = {MVNETA_MIB_PKT_JABBER, 0, "pkt_jabber", "Jabber Packets Rx"}, [MVNETA_MIB_MAC_RX_ERR_IDX] = {MVNETA_MIB_MAC_RX_ERR, 0, "mac_rx_err", "MAC Rx Errors"}, [MVNETA_MIB_MAC_CRC_ERR_IDX] = {MVNETA_MIB_MAC_CRC_ERR, 0, "mac_crc_err", "MAC CRC Errors"}, [MVNETA_MIB_MAC_COL_IDX] = {MVNETA_MIB_MAC_COL, 0, "mac_collision", "MAC Collision"}, [MVNETA_MIB_MAC_LATE_COL_IDX] = {MVNETA_MIB_MAC_LATE_COL, 0, "mac_late_collision", "MAC Late Collision"}, }; static struct resource_spec res_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0} }; static struct { driver_intr_t *handler; char * description; } mvneta_intrs[] = { { mvneta_rxtxth_intr, "MVNETA aggregated interrupt" }, }; static int mvneta_set_mac_address(struct mvneta_softc *sc, uint8_t *addr) { unsigned int mac_h; unsigned int mac_l; mac_l = (addr[4] << 8) | (addr[5]); mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | (addr[3] << 0); MVNETA_WRITE(sc, MVNETA_MACAL, mac_l); MVNETA_WRITE(sc, MVNETA_MACAH, mac_h); return (0); } static int mvneta_get_mac_address(struct mvneta_softc *sc, uint8_t *addr) { uint32_t mac_l, mac_h; #ifdef FDT if (mvneta_fdt_mac_address(sc, addr) == 0) return (0); #endif /* * Fall back -- use the currently programmed address. */ mac_l = MVNETA_READ(sc, MVNETA_MACAL); mac_h = MVNETA_READ(sc, MVNETA_MACAH); if (mac_l == 0 && mac_h == 0) { /* * Generate pseudo-random MAC. * Set lower part to random number | unit number. */ mac_l = arc4random() & ~0xff; mac_l |= device_get_unit(sc->dev) & 0xff; mac_h = arc4random(); mac_h &= ~(3 << 24); /* Clear multicast and LAA bits */ if (bootverbose) { device_printf(sc->dev, "Could not acquire MAC address. " "Using randomized one.\n"); } } addr[0] = (mac_h & 0xff000000) >> 24; addr[1] = (mac_h & 0x00ff0000) >> 16; addr[2] = (mac_h & 0x0000ff00) >> 8; addr[3] = (mac_h & 0x000000ff); addr[4] = (mac_l & 0x0000ff00) >> 8; addr[5] = (mac_l & 0x000000ff); return (0); } STATIC boolean_t mvneta_has_switch(device_t self) { #ifdef FDT return (mvneta_has_switch_fdt(self)); #endif return (false); } STATIC int mvneta_dma_create(struct mvneta_softc *sc) { size_t maxsize, maxsegsz; size_t q; int error; /* * Create Tx DMA */ maxsize = maxsegsz = sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT; error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* parent */ 16, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ maxsize, /* maxsize */ 1, /* nsegments */ maxsegsz, /* maxsegsz */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->tx_dtag); /* dmat */ if (error != 0) { device_printf(sc->dev, "Failed to create DMA tag for Tx descriptors.\n"); goto fail; } error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ MVNETA_MAX_FRAME, /* maxsize */ MVNETA_TX_SEGLIMIT, /* nsegments */ MVNETA_MAX_FRAME, /* maxsegsz */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->txmbuf_dtag); if (error != 0) { device_printf(sc->dev, "Failed to create DMA tag for Tx mbufs.\n"); goto fail; } for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { error = mvneta_ring_alloc_tx_queue(sc, q); if (error != 0) { device_printf(sc->dev, "Failed to allocate DMA safe memory for TxQ: %zu\n", q); goto fail; } } /* * Create Rx DMA. */ /* Create tag for Rx descripors */ error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* parent */ 32, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsize */ 1, /* nsegments */ sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsegsz */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->rx_dtag); /* dmat */ if (error != 0) { device_printf(sc->dev, "Failed to create DMA tag for Rx descriptors.\n"); goto fail; } /* Create tag for Rx buffers */ error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* parent */ 32, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ MVNETA_MAX_FRAME, 1, /* maxsize, nsegments */ MVNETA_MAX_FRAME, /* maxsegsz */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->rxbuf_dtag); /* dmat */ if (error != 0) { device_printf(sc->dev, "Failed to create DMA tag for Rx buffers.\n"); goto fail; } for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { if (mvneta_ring_alloc_rx_queue(sc, q) != 0) { device_printf(sc->dev, "Failed to allocate DMA safe memory for RxQ: %zu\n", q); goto fail; } } return (0); fail: mvneta_detach(sc->dev); return (error); } /* ARGSUSED */ int mvneta_attach(device_t self) { struct mvneta_softc *sc; if_t ifp; device_t child; int ifm_target; int q, error; #if !defined(__aarch64__) uint32_t reg; #endif clk_t clk; sc = device_get_softc(self); sc->dev = self; mtx_init(&sc->mtx, "mvneta_sc", NULL, MTX_DEF); error = bus_alloc_resources(self, res_spec, sc->res); if (error) { device_printf(self, "could not allocate resources\n"); return (ENXIO); } sc->version = MVNETA_READ(sc, MVNETA_PV); device_printf(self, "version is %x\n", sc->version); callout_init(&sc->tick_ch, 0); /* * make sure DMA engines are in reset state */ MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001); MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001); error = clk_get_by_ofw_index(sc->dev, ofw_bus_get_node(sc->dev), 0, &clk); if (error != 0) { #if defined(__aarch64__) device_printf(sc->dev, "Cannot get clock, using default frequency: %d\n", A3700_TCLK_250MHZ); sc->clk_freq = A3700_TCLK_250MHZ; #else device_printf(sc->dev, "Cannot get clock, using get_tclk()\n"); sc->clk_freq = get_tclk(); #endif } else { error = clk_get_freq(clk, &sc->clk_freq); if (error != 0) { device_printf(sc->dev, "Cannot obtain frequency from parent clock\n"); bus_release_resources(sc->dev, res_spec, sc->res); return (error); } } #if !defined(__aarch64__) /* * Disable port snoop for buffers and descriptors * to avoid L2 caching of both without DRAM copy. * Obtain coherency settings from the first MBUS * window attribute. */ if ((MVNETA_READ(sc, MV_WIN_NETA_BASE(0)) & IO_WIN_COH_ATTR_MASK) == 0) { reg = MVNETA_READ(sc, MVNETA_PSNPCFG); reg &= ~MVNETA_PSNPCFG_DESCSNP_MASK; reg &= ~MVNETA_PSNPCFG_BUFSNP_MASK; MVNETA_WRITE(sc, MVNETA_PSNPCFG, reg); } #endif error = bus_setup_intr(self, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE, NULL, mvneta_intrs[0].handler, sc, &sc->ih_cookie[0]); if (error) { device_printf(self, "could not setup %s\n", mvneta_intrs[0].description); mvneta_detach(self); return (error); } /* * MAC address */ if (mvneta_get_mac_address(sc, sc->enaddr)) { device_printf(self, "no mac address.\n"); return (ENXIO); } mvneta_set_mac_address(sc, sc->enaddr); mvneta_disable_intr(sc); /* Allocate network interface */ ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(self, "if_alloc() failed\n"); mvneta_detach(self); return (ENOMEM); } if_initname(ifp, device_get_name(self), device_get_unit(self)); /* * We can support 802.1Q VLAN-sized frames and jumbo * Ethernet frames. */ if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU, 0); if_setsoftc(ifp, sc); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); #ifdef MVNETA_MULTIQUEUE if_settransmitfn(ifp, mvneta_transmit); if_setqflushfn(ifp, mvneta_qflush); #else /* !MVNETA_MULTIQUEUE */ if_setstartfn(ifp, mvneta_start); if_setsendqlen(ifp, MVNETA_TX_RING_CNT - 1); if_setsendqready(ifp); #endif if_setinitfn(ifp, mvneta_init); if_setioctlfn(ifp, mvneta_ioctl); /* * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware. */ if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0); /* * As VLAN hardware tagging is not supported * but is necessary to perform VLAN hardware checksums, * it is done in the driver */ if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM, 0); /* * Currently IPv6 HW checksum is broken, so make sure it is disabled. */ if_setcapabilitiesbit(ifp, 0, IFCAP_HWCSUM_IPV6); if_setcapenable(ifp, if_getcapabilities(ifp)); /* * Disabled option(s): * - Support for Large Receive Offload */ if_setcapabilitiesbit(ifp, IFCAP_LRO, 0); if_sethwassist(ifp, CSUM_IP | CSUM_TCP | CSUM_UDP); sc->rx_frame_size = MCLBYTES; /* ether_ifattach() always sets normal mtu */ /* * Device DMA Buffer allocation. * Handles resource deallocation in case of failure. */ error = mvneta_dma_create(sc); if (error != 0) { mvneta_detach(self); return (error); } /* Initialize queues */ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { error = mvneta_ring_init_tx_queue(sc, q); if (error != 0) { mvneta_detach(self); return (error); } } for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { error = mvneta_ring_init_rx_queue(sc, q); if (error != 0) { mvneta_detach(self); return (error); } } /* * Enable DMA engines and Initialize Device Registers. */ MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000); MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000); MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM); mvneta_sc_lock(sc); mvneta_filter_setup(sc); mvneta_sc_unlock(sc); mvneta_initreg(ifp); /* * Now MAC is working, setup MII. */ if (mii_init == 0) { /* * MII bus is shared by all MACs and all PHYs in SoC. * serializing the bus access should be safe. */ mtx_init(&mii_mutex, "mvneta_mii", NULL, MTX_DEF); mii_init = 1; } /* Attach PHY(s) */ if ((sc->phy_addr != MII_PHY_ANY) && (!sc->use_inband_status)) { error = mii_attach(self, &sc->miibus, ifp, mvneta_mediachange, mvneta_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr, MII_OFFSET_ANY, 0); if (error != 0) { device_printf(self, "MII attach failed, error: %d\n", error); ether_ifdetach(sc->ifp); mvneta_detach(self); return (error); } sc->mii = device_get_softc(sc->miibus); sc->phy_attached = 1; /* Disable auto-negotiation in MAC - rely on PHY layer */ mvneta_update_autoneg(sc, FALSE); } else if (sc->use_inband_status == TRUE) { /* In-band link status */ ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange, mvneta_mediastatus); /* Configure media */ ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL); ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO); /* Enable auto-negotiation */ mvneta_update_autoneg(sc, TRUE); mvneta_sc_lock(sc); if (MVNETA_IS_LINKUP(sc)) mvneta_linkup(sc); else mvneta_linkdown(sc); mvneta_sc_unlock(sc); } else { /* Fixed-link, use predefined values */ mvneta_update_autoneg(sc, FALSE); ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange, mvneta_mediastatus); ifm_target = IFM_ETHER; switch (sc->phy_speed) { case 2500: if (sc->phy_mode != MVNETA_PHY_SGMII && sc->phy_mode != MVNETA_PHY_QSGMII) { device_printf(self, "2.5G speed can work only in (Q)SGMII mode\n"); ether_ifdetach(sc->ifp); mvneta_detach(self); return (ENXIO); } ifm_target |= IFM_2500_T; break; case 1000: ifm_target |= IFM_1000_T; break; case 100: ifm_target |= IFM_100_TX; break; case 10: ifm_target |= IFM_10_T; break; default: ether_ifdetach(sc->ifp); mvneta_detach(self); return (ENXIO); } if (sc->phy_fdx) ifm_target |= IFM_FDX; else ifm_target |= IFM_HDX; ifmedia_add(&sc->mvneta_ifmedia, ifm_target, 0, NULL); ifmedia_set(&sc->mvneta_ifmedia, ifm_target); if_link_state_change(sc->ifp, LINK_STATE_UP); if (mvneta_has_switch(self)) { if (bootverbose) device_printf(self, "This device is attached to a switch\n"); child = device_add_child(sc->dev, "mdio", -1); if (child == NULL) { ether_ifdetach(sc->ifp); mvneta_detach(self); return (ENXIO); } bus_generic_attach(sc->dev); bus_generic_attach(child); } /* Configure MAC media */ mvneta_update_media(sc, ifm_target); } ether_ifattach(ifp, sc->enaddr); callout_reset(&sc->tick_ch, 0, mvneta_tick, sc); sysctl_mvneta_init(sc); return (0); } STATIC int mvneta_detach(device_t dev) { struct mvneta_softc *sc; int q; sc = device_get_softc(dev); if (device_is_attached(dev)) { mvneta_stop(sc); callout_drain(&sc->tick_ch); ether_ifdetach(sc->ifp); } for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) mvneta_ring_dealloc_rx_queue(sc, q); for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) mvneta_ring_dealloc_tx_queue(sc, q); device_delete_children(dev); if (sc->ih_cookie[0] != NULL) bus_teardown_intr(dev, sc->res[1], sc->ih_cookie[0]); if (sc->tx_dtag != NULL) bus_dma_tag_destroy(sc->tx_dtag); if (sc->rx_dtag != NULL) bus_dma_tag_destroy(sc->rx_dtag); if (sc->txmbuf_dtag != NULL) bus_dma_tag_destroy(sc->txmbuf_dtag); if (sc->rxbuf_dtag != NULL) bus_dma_tag_destroy(sc->rxbuf_dtag); bus_release_resources(dev, res_spec, sc->res); if (sc->ifp) if_free(sc->ifp); if (mtx_initialized(&sc->mtx)) mtx_destroy(&sc->mtx); return (0); } /* * MII */ STATIC int mvneta_miibus_readreg(device_t dev, int phy, int reg) { struct mvneta_softc *sc; if_t ifp; uint32_t smi, val; int i; sc = device_get_softc(dev); ifp = sc->ifp; mtx_lock(&mii_mutex); for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) { if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0) break; DELAY(1); } if (i == MVNETA_PHY_TIMEOUT) { if_printf(ifp, "SMI busy timeout\n"); mtx_unlock(&mii_mutex); return (-1); } smi = MVNETA_SMI_PHYAD(phy) | MVNETA_SMI_REGAD(reg) | MVNETA_SMI_OPCODE_READ; MVNETA_WRITE(sc, MVNETA_SMI, smi); for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) { if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0) break; DELAY(1); } if (i == MVNETA_PHY_TIMEOUT) { if_printf(ifp, "SMI busy timeout\n"); mtx_unlock(&mii_mutex); return (-1); } for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) { smi = MVNETA_READ(sc, MVNETA_SMI); if (smi & MVNETA_SMI_READVALID) break; DELAY(1); } if (i == MVNETA_PHY_TIMEOUT) { if_printf(ifp, "SMI busy timeout\n"); mtx_unlock(&mii_mutex); return (-1); } mtx_unlock(&mii_mutex); #ifdef MVNETA_KTR CTR3(KTR_SPARE2, "%s i=%d, timeout=%d\n", if_getname(ifp), i, MVNETA_PHY_TIMEOUT); #endif val = smi & MVNETA_SMI_DATA_MASK; #ifdef MVNETA_KTR CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", if_getname(ifp), phy, reg, val); #endif return (val); } STATIC int mvneta_miibus_writereg(device_t dev, int phy, int reg, int val) { struct mvneta_softc *sc; if_t ifp; uint32_t smi; int i; sc = device_get_softc(dev); ifp = sc->ifp; #ifdef MVNETA_KTR CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", if_name(ifp), phy, reg, val); #endif mtx_lock(&mii_mutex); for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) { if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0) break; DELAY(1); } if (i == MVNETA_PHY_TIMEOUT) { if_printf(ifp, "SMI busy timeout\n"); mtx_unlock(&mii_mutex); return (0); } smi = MVNETA_SMI_PHYAD(phy) | MVNETA_SMI_REGAD(reg) | MVNETA_SMI_OPCODE_WRITE | (val & MVNETA_SMI_DATA_MASK); MVNETA_WRITE(sc, MVNETA_SMI, smi); for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) { if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0) break; DELAY(1); } mtx_unlock(&mii_mutex); if (i == MVNETA_PHY_TIMEOUT) if_printf(ifp, "phy write timed out\n"); return (0); } STATIC void mvneta_portup(struct mvneta_softc *sc) { int q; for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { mvneta_rx_lockq(sc, q); mvneta_rx_queue_enable(sc->ifp, q); mvneta_rx_unlockq(sc, q); } for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { mvneta_tx_lockq(sc, q); mvneta_tx_queue_enable(sc->ifp, q); mvneta_tx_unlockq(sc, q); } } STATIC void mvneta_portdown(struct mvneta_softc *sc) { struct mvneta_rx_ring *rx; struct mvneta_tx_ring *tx; int q, cnt; uint32_t reg; for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { rx = MVNETA_RX_RING(sc, q); mvneta_rx_lockq(sc, q); rx->queue_status = MVNETA_QUEUE_DISABLED; mvneta_rx_unlockq(sc, q); } for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { tx = MVNETA_TX_RING(sc, q); mvneta_tx_lockq(sc, q); tx->queue_status = MVNETA_QUEUE_DISABLED; mvneta_tx_unlockq(sc, q); } /* Wait for all Rx activity to terminate. */ reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK; reg = MVNETA_RQC_DIS(reg); MVNETA_WRITE(sc, MVNETA_RQC, reg); cnt = 0; do { if (cnt >= RX_DISABLE_TIMEOUT) { if_printf(sc->ifp, "timeout for RX stopped. rqc 0x%x\n", reg); break; } cnt++; reg = MVNETA_READ(sc, MVNETA_RQC); } while ((reg & MVNETA_RQC_EN_MASK) != 0); /* Wait for all Tx activity to terminate. */ reg = MVNETA_READ(sc, MVNETA_PIE); reg &= ~MVNETA_PIE_TXPKTINTRPTENB_MASK; MVNETA_WRITE(sc, MVNETA_PIE, reg); reg = MVNETA_READ(sc, MVNETA_PRXTXTIM); reg &= ~MVNETA_PRXTXTI_TBTCQ_MASK; MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg); reg = MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_EN_MASK; reg = MVNETA_TQC_DIS(reg); MVNETA_WRITE(sc, MVNETA_TQC, reg); cnt = 0; do { if (cnt >= TX_DISABLE_TIMEOUT) { if_printf(sc->ifp, "timeout for TX stopped. tqc 0x%x\n", reg); break; } cnt++; reg = MVNETA_READ(sc, MVNETA_TQC); } while ((reg & MVNETA_TQC_EN_MASK) != 0); /* Wait for all Tx FIFO is empty */ cnt = 0; do { if (cnt >= TX_FIFO_EMPTY_TIMEOUT) { if_printf(sc->ifp, "timeout for TX FIFO drained. ps0 0x%x\n", reg); break; } cnt++; reg = MVNETA_READ(sc, MVNETA_PS0); } while (((reg & MVNETA_PS0_TXFIFOEMP) == 0) && ((reg & MVNETA_PS0_TXINPROG) != 0)); } /* * Device Register Initialization * reset device registers to device driver default value. * the device is not enabled here. */ STATIC int mvneta_initreg(if_t ifp) { struct mvneta_softc *sc; int q; uint32_t reg; sc = if_getsoftc(ifp); #ifdef MVNETA_KTR CTR1(KTR_SPARE2, "%s initializing device register", if_name(ifp)); #endif /* Disable Legacy WRR, Disable EJP, Release from reset. */ MVNETA_WRITE(sc, MVNETA_TQC_1, 0); /* Enable mbus retry. */ MVNETA_WRITE(sc, MVNETA_MBUS_CONF, MVNETA_MBUS_RETRY_EN); /* Init TX/RX Queue Registers */ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { mvneta_rx_lockq(sc, q); if (mvneta_rx_queue_init(ifp, q) != 0) { device_printf(sc->dev, "initialization failed: cannot initialize queue\n"); mvneta_rx_unlockq(sc, q); return (ENOBUFS); } mvneta_rx_unlockq(sc, q); } for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { mvneta_tx_lockq(sc, q); if (mvneta_tx_queue_init(ifp, q) != 0) { device_printf(sc->dev, "initialization failed: cannot initialize queue\n"); mvneta_tx_unlockq(sc, q); return (ENOBUFS); } mvneta_tx_unlockq(sc, q); } /* * Ethernet Unit Control - disable automatic PHY management by HW. * In case the port uses SMI-controlled PHY, poll its status with * mii_tick() and update MAC settings accordingly. */ reg = MVNETA_READ(sc, MVNETA_EUC); reg &= ~MVNETA_EUC_POLLING; MVNETA_WRITE(sc, MVNETA_EUC, reg); /* EEE: Low Power Idle */ reg = MVNETA_LPIC0_LILIMIT(MVNETA_LPI_LI); reg |= MVNETA_LPIC0_TSLIMIT(MVNETA_LPI_TS); MVNETA_WRITE(sc, MVNETA_LPIC0, reg); reg = MVNETA_LPIC1_TWLIMIT(MVNETA_LPI_TW); MVNETA_WRITE(sc, MVNETA_LPIC1, reg); reg = MVNETA_LPIC2_MUSTSET; MVNETA_WRITE(sc, MVNETA_LPIC2, reg); /* Port MAC Control set 0 */ reg = MVNETA_PMACC0_MUSTSET; /* must write 0x1 */ reg &= ~MVNETA_PMACC0_PORTEN; /* port is still disabled */ reg |= MVNETA_PMACC0_FRAMESIZELIMIT(if_getmtu(ifp) + MVNETA_ETHER_SIZE); MVNETA_WRITE(sc, MVNETA_PMACC0, reg); /* Port MAC Control set 2 */ reg = MVNETA_READ(sc, MVNETA_PMACC2); switch (sc->phy_mode) { case MVNETA_PHY_QSGMII: reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN); MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_QSGMII); break; case MVNETA_PHY_SGMII: reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN); MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_SGMII); break; case MVNETA_PHY_RGMII: case MVNETA_PHY_RGMII_ID: reg |= MVNETA_PMACC2_RGMIIEN; break; } reg |= MVNETA_PMACC2_MUSTSET; reg &= ~MVNETA_PMACC2_PORTMACRESET; MVNETA_WRITE(sc, MVNETA_PMACC2, reg); /* Port Configuration Extended: enable Tx CRC generation */ reg = MVNETA_READ(sc, MVNETA_PXCX); reg &= ~MVNETA_PXCX_TXCRCDIS; MVNETA_WRITE(sc, MVNETA_PXCX, reg); /* clear MIB counter registers(clear by read) */ mvneta_sc_lock(sc); mvneta_clear_mib(sc); mvneta_sc_unlock(sc); /* Set SDC register except IPGINT bits */ reg = MVNETA_SDC_RXBSZ_16_64BITWORDS; reg |= MVNETA_SDC_TXBSZ_16_64BITWORDS; reg |= MVNETA_SDC_BLMR; reg |= MVNETA_SDC_BLMT; MVNETA_WRITE(sc, MVNETA_SDC, reg); return (0); } STATIC void mvneta_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) { if (error != 0) return; *(bus_addr_t *)arg = segs->ds_addr; } STATIC int mvneta_ring_alloc_rx_queue(struct mvneta_softc *sc, int q) { struct mvneta_rx_ring *rx; struct mvneta_buf *rxbuf; bus_dmamap_t dmap; int i, error; if (q >= MVNETA_RX_QNUM_MAX) return (EINVAL); rx = MVNETA_RX_RING(sc, q); mtx_init(&rx->ring_mtx, "mvneta_rx", NULL, MTX_DEF); /* Allocate DMA memory for Rx descriptors */ error = bus_dmamem_alloc(sc->rx_dtag, (void**)&(rx->desc), BUS_DMA_NOWAIT | BUS_DMA_ZERO, &rx->desc_map); if (error != 0 || rx->desc == NULL) goto fail; error = bus_dmamap_load(sc->rx_dtag, rx->desc_map, rx->desc, sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, mvneta_dmamap_cb, &rx->desc_pa, BUS_DMA_NOWAIT); if (error != 0) goto fail; for (i = 0; i < MVNETA_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->rxbuf_dtag, 0, &dmap); if (error != 0) { device_printf(sc->dev, "Failed to create DMA map for Rx buffer num: %d\n", i); goto fail; } rxbuf = &rx->rxbuf[i]; rxbuf->dmap = dmap; rxbuf->m = NULL; } return (0); fail: mvneta_rx_lockq(sc, q); mvneta_ring_flush_rx_queue(sc, q); mvneta_rx_unlockq(sc, q); mvneta_ring_dealloc_rx_queue(sc, q); device_printf(sc->dev, "DMA Ring buffer allocation failure.\n"); return (error); } STATIC int mvneta_ring_alloc_tx_queue(struct mvneta_softc *sc, int q) { struct mvneta_tx_ring *tx; int error; if (q >= MVNETA_TX_QNUM_MAX) return (EINVAL); tx = MVNETA_TX_RING(sc, q); mtx_init(&tx->ring_mtx, "mvneta_tx", NULL, MTX_DEF); error = bus_dmamem_alloc(sc->tx_dtag, (void**)&(tx->desc), BUS_DMA_NOWAIT | BUS_DMA_ZERO, &tx->desc_map); if (error != 0 || tx->desc == NULL) goto fail; error = bus_dmamap_load(sc->tx_dtag, tx->desc_map, tx->desc, sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT, mvneta_dmamap_cb, &tx->desc_pa, BUS_DMA_NOWAIT); if (error != 0) goto fail; #ifdef MVNETA_MULTIQUEUE tx->br = buf_ring_alloc(MVNETA_BUFRING_SIZE, M_DEVBUF, M_NOWAIT, &tx->ring_mtx); if (tx->br == NULL) { device_printf(sc->dev, "Could not setup buffer ring for TxQ(%d)\n", q); error = ENOMEM; goto fail; } #endif return (0); fail: mvneta_tx_lockq(sc, q); mvneta_ring_flush_tx_queue(sc, q); mvneta_tx_unlockq(sc, q); mvneta_ring_dealloc_tx_queue(sc, q); device_printf(sc->dev, "DMA Ring buffer allocation failure.\n"); return (error); } STATIC void mvneta_ring_dealloc_tx_queue(struct mvneta_softc *sc, int q) { struct mvneta_tx_ring *tx; struct mvneta_buf *txbuf; void *kva; int error; int i; if (q >= MVNETA_TX_QNUM_MAX) return; tx = MVNETA_TX_RING(sc, q); if (tx->taskq != NULL) { /* Remove task */ while (taskqueue_cancel(tx->taskq, &tx->task, NULL) != 0) taskqueue_drain(tx->taskq, &tx->task); } #ifdef MVNETA_MULTIQUEUE if (tx->br != NULL) drbr_free(tx->br, M_DEVBUF); #endif if (sc->txmbuf_dtag != NULL) { for (i = 0; i < MVNETA_TX_RING_CNT; i++) { txbuf = &tx->txbuf[i]; if (txbuf->dmap != NULL) { error = bus_dmamap_destroy(sc->txmbuf_dtag, txbuf->dmap); if (error != 0) { panic("%s: map busy for Tx descriptor (Q%d, %d)", __func__, q, i); } } } } if (tx->desc_pa != 0) bus_dmamap_unload(sc->tx_dtag, tx->desc_map); kva = (void *)tx->desc; if (kva != NULL) bus_dmamem_free(sc->tx_dtag, tx->desc, tx->desc_map); if (mtx_name(&tx->ring_mtx) != NULL) mtx_destroy(&tx->ring_mtx); memset(tx, 0, sizeof(*tx)); } STATIC void mvneta_ring_dealloc_rx_queue(struct mvneta_softc *sc, int q) { struct mvneta_rx_ring *rx; struct lro_ctrl *lro; void *kva; if (q >= MVNETA_RX_QNUM_MAX) return; rx = MVNETA_RX_RING(sc, q); if (rx->desc_pa != 0) bus_dmamap_unload(sc->rx_dtag, rx->desc_map); kva = (void *)rx->desc; if (kva != NULL) bus_dmamem_free(sc->rx_dtag, rx->desc, rx->desc_map); lro = &rx->lro; tcp_lro_free(lro); if (mtx_name(&rx->ring_mtx) != NULL) mtx_destroy(&rx->ring_mtx); memset(rx, 0, sizeof(*rx)); } STATIC int mvneta_ring_init_rx_queue(struct mvneta_softc *sc, int q) { struct mvneta_rx_ring *rx; struct lro_ctrl *lro; int error; if (q >= MVNETA_RX_QNUM_MAX) return (0); rx = MVNETA_RX_RING(sc, q); rx->dma = rx->cpu = 0; rx->queue_th_received = MVNETA_RXTH_COUNT; rx->queue_th_time = (sc->clk_freq / 1000) / 10; /* 0.1 [ms] */ /* Initialize LRO */ rx->lro_enabled = FALSE; if ((if_getcapenable(sc->ifp) & IFCAP_LRO) != 0) { lro = &rx->lro; error = tcp_lro_init(lro); if (error != 0) device_printf(sc->dev, "LRO Initialization failed!\n"); else { rx->lro_enabled = TRUE; lro->ifp = sc->ifp; } } return (0); } STATIC int mvneta_ring_init_tx_queue(struct mvneta_softc *sc, int q) { struct mvneta_tx_ring *tx; struct mvneta_buf *txbuf; int i, error; if (q >= MVNETA_TX_QNUM_MAX) return (0); tx = MVNETA_TX_RING(sc, q); /* Tx handle */ for (i = 0; i < MVNETA_TX_RING_CNT; i++) { txbuf = &tx->txbuf[i]; txbuf->m = NULL; /* Tx handle needs DMA map for busdma_load_mbuf() */ error = bus_dmamap_create(sc->txmbuf_dtag, 0, &txbuf->dmap); if (error != 0) { device_printf(sc->dev, "can't create dma map (tx ring %d)\n", i); return (error); } } tx->dma = tx->cpu = 0; tx->used = 0; tx->drv_error = 0; tx->queue_status = MVNETA_QUEUE_DISABLED; tx->queue_hung = FALSE; tx->ifp = sc->ifp; tx->qidx = q; TASK_INIT(&tx->task, 0, mvneta_tx_task, tx); tx->taskq = taskqueue_create_fast("mvneta_tx_taskq", M_WAITOK, taskqueue_thread_enqueue, &tx->taskq); taskqueue_start_threads(&tx->taskq, 1, PI_NET, "%s: tx_taskq(%d)", device_get_nameunit(sc->dev), q); return (0); } STATIC void mvneta_ring_flush_tx_queue(struct mvneta_softc *sc, int q) { struct mvneta_tx_ring *tx; struct mvneta_buf *txbuf; int i; tx = MVNETA_TX_RING(sc, q); KASSERT_TX_MTX(sc, q); /* Tx handle */ for (i = 0; i < MVNETA_TX_RING_CNT; i++) { txbuf = &tx->txbuf[i]; bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap); if (txbuf->m != NULL) { m_freem(txbuf->m); txbuf->m = NULL; } } tx->dma = tx->cpu = 0; tx->used = 0; } STATIC void mvneta_ring_flush_rx_queue(struct mvneta_softc *sc, int q) { struct mvneta_rx_ring *rx; struct mvneta_buf *rxbuf; int i; rx = MVNETA_RX_RING(sc, q); KASSERT_RX_MTX(sc, q); /* Rx handle */ for (i = 0; i < MVNETA_RX_RING_CNT; i++) { rxbuf = &rx->rxbuf[i]; mvneta_rx_buf_free(sc, rxbuf); } rx->dma = rx->cpu = 0; } /* * Rx/Tx Queue Control */ STATIC int mvneta_rx_queue_init(if_t ifp, int q) { struct mvneta_softc *sc; struct mvneta_rx_ring *rx; uint32_t reg; sc = if_getsoftc(ifp); KASSERT_RX_MTX(sc, q); rx = MVNETA_RX_RING(sc, q); DASSERT(rx->desc_pa != 0); /* descriptor address */ MVNETA_WRITE(sc, MVNETA_PRXDQA(q), rx->desc_pa); /* Rx buffer size and descriptor ring size */ reg = MVNETA_PRXDQS_BUFFERSIZE(sc->rx_frame_size >> 3); reg |= MVNETA_PRXDQS_DESCRIPTORSQUEUESIZE(MVNETA_RX_RING_CNT); MVNETA_WRITE(sc, MVNETA_PRXDQS(q), reg); #ifdef MVNETA_KTR CTR3(KTR_SPARE2, "%s PRXDQS(%d): %#x", if_name(ifp), q, MVNETA_READ(sc, MVNETA_PRXDQS(q))); #endif /* Rx packet offset address */ reg = MVNETA_PRXC_PACKETOFFSET(MVNETA_PACKET_OFFSET >> 3); MVNETA_WRITE(sc, MVNETA_PRXC(q), reg); #ifdef MVNETA_KTR CTR3(KTR_SPARE2, "%s PRXC(%d): %#x", if_name(ifp), q, MVNETA_READ(sc, MVNETA_PRXC(q))); #endif /* if DMA is not working, register is not updated */ DASSERT(MVNETA_READ(sc, MVNETA_PRXDQA(q)) == rx->desc_pa); return (0); } STATIC int mvneta_tx_queue_init(if_t ifp, int q) { struct mvneta_softc *sc; struct mvneta_tx_ring *tx; uint32_t reg; sc = if_getsoftc(ifp); KASSERT_TX_MTX(sc, q); tx = MVNETA_TX_RING(sc, q); DASSERT(tx->desc_pa != 0); /* descriptor address */ MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa); /* descriptor ring size */ reg = MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT); MVNETA_WRITE(sc, MVNETA_PTXDQS(q), reg); /* if DMA is not working, register is not updated */ DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa); return (0); } STATIC int mvneta_rx_queue_enable(if_t ifp, int q) { struct mvneta_softc *sc; struct mvneta_rx_ring *rx; uint32_t reg; sc = if_getsoftc(ifp); rx = MVNETA_RX_RING(sc, q); KASSERT_RX_MTX(sc, q); /* Set Rx interrupt threshold */ reg = MVNETA_PRXDQTH_ODT(rx->queue_th_received); MVNETA_WRITE(sc, MVNETA_PRXDQTH(q), reg); reg = MVNETA_PRXITTH_RITT(rx->queue_th_time); MVNETA_WRITE(sc, MVNETA_PRXITTH(q), reg); /* Unmask RXTX_TH Intr. */ reg = MVNETA_READ(sc, MVNETA_PRXTXTIM); reg |= MVNETA_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */ MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg); /* Enable Rx queue */ reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK; reg |= MVNETA_RQC_ENQ(q); MVNETA_WRITE(sc, MVNETA_RQC, reg); rx->queue_status = MVNETA_QUEUE_WORKING; return (0); } STATIC int mvneta_tx_queue_enable(if_t ifp, int q) { struct mvneta_softc *sc; struct mvneta_tx_ring *tx; sc = if_getsoftc(ifp); tx = MVNETA_TX_RING(sc, q); KASSERT_TX_MTX(sc, q); /* Enable Tx queue */ MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(q)); tx->queue_status = MVNETA_QUEUE_IDLE; tx->queue_hung = FALSE; return (0); } STATIC __inline void mvneta_rx_lockq(struct mvneta_softc *sc, int q) { DASSERT(q >= 0); DASSERT(q < MVNETA_RX_QNUM_MAX); mtx_lock(&sc->rx_ring[q].ring_mtx); } STATIC __inline void mvneta_rx_unlockq(struct mvneta_softc *sc, int q) { DASSERT(q >= 0); DASSERT(q < MVNETA_RX_QNUM_MAX); mtx_unlock(&sc->rx_ring[q].ring_mtx); } STATIC __inline int __unused mvneta_tx_trylockq(struct mvneta_softc *sc, int q) { DASSERT(q >= 0); DASSERT(q < MVNETA_TX_QNUM_MAX); return (mtx_trylock(&sc->tx_ring[q].ring_mtx)); } STATIC __inline void mvneta_tx_lockq(struct mvneta_softc *sc, int q) { DASSERT(q >= 0); DASSERT(q < MVNETA_TX_QNUM_MAX); mtx_lock(&sc->tx_ring[q].ring_mtx); } STATIC __inline void mvneta_tx_unlockq(struct mvneta_softc *sc, int q) { DASSERT(q >= 0); DASSERT(q < MVNETA_TX_QNUM_MAX); mtx_unlock(&sc->tx_ring[q].ring_mtx); } /* * Interrupt Handlers */ STATIC void mvneta_disable_intr(struct mvneta_softc *sc) { MVNETA_WRITE(sc, MVNETA_EUIM, 0); MVNETA_WRITE(sc, MVNETA_EUIC, 0); MVNETA_WRITE(sc, MVNETA_PRXTXTIM, 0); MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0); MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0); MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0); MVNETA_WRITE(sc, MVNETA_PMIM, 0); MVNETA_WRITE(sc, MVNETA_PMIC, 0); MVNETA_WRITE(sc, MVNETA_PIE, 0); } STATIC void mvneta_enable_intr(struct mvneta_softc *sc) { uint32_t reg; /* Enable Summary Bit to check all interrupt cause. */ reg = MVNETA_READ(sc, MVNETA_PRXTXTIM); reg |= MVNETA_PRXTXTI_PMISCICSUMMARY; MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg); if (!sc->phy_attached || sc->use_inband_status) { /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */ MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG | MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE); } /* Enable All Queue Interrupt */ reg = MVNETA_READ(sc, MVNETA_PIE); reg |= MVNETA_PIE_RXPKTINTRPTENB_MASK; reg |= MVNETA_PIE_TXPKTINTRPTENB_MASK; MVNETA_WRITE(sc, MVNETA_PIE, reg); } STATIC void mvneta_rxtxth_intr(void *arg) { struct mvneta_softc *sc; if_t ifp; uint32_t ic, queues; sc = arg; ifp = sc->ifp; #ifdef MVNETA_KTR CTR1(KTR_SPARE2, "%s got RXTX_TH_Intr", if_name(ifp)); #endif ic = MVNETA_READ(sc, MVNETA_PRXTXTIC); if (ic == 0) return; MVNETA_WRITE(sc, MVNETA_PRXTXTIC, ~ic); /* Ack maintenance interrupt first */ if (__predict_false((ic & MVNETA_PRXTXTI_PMISCICSUMMARY) && (!sc->phy_attached || sc->use_inband_status))) { mvneta_sc_lock(sc); mvneta_misc_intr(sc); mvneta_sc_unlock(sc); } if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) return; /* RxTxTH interrupt */ queues = MVNETA_PRXTXTI_GET_RBICTAPQ(ic); if (__predict_true(queues)) { #ifdef MVNETA_KTR CTR1(KTR_SPARE2, "%s got PRXTXTIC: +RXEOF", if_name(ifp)); #endif /* At the moment the driver support only one RX queue. */ DASSERT(MVNETA_IS_QUEUE_SET(queues, 0)); mvneta_rx(sc, 0, 0); } } STATIC int mvneta_misc_intr(struct mvneta_softc *sc) { uint32_t ic; int claimed = 0; #ifdef MVNETA_KTR CTR1(KTR_SPARE2, "%s got MISC_INTR", if_name(sc->ifp)); #endif KASSERT_SC_MTX(sc); for (;;) { ic = MVNETA_READ(sc, MVNETA_PMIC); ic &= MVNETA_READ(sc, MVNETA_PMIM); if (ic == 0) break; MVNETA_WRITE(sc, MVNETA_PMIC, ~ic); claimed = 1; if (ic & (MVNETA_PMI_PHYSTATUSCHNG | MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE)) mvneta_link_isr(sc); } return (claimed); } STATIC void mvneta_tick(void *arg) { struct mvneta_softc *sc; struct mvneta_tx_ring *tx; struct mvneta_rx_ring *rx; int q; uint32_t fc_prev, fc_curr; sc = arg; /* * This is done before mib update to get the right stats * for this tick. */ mvneta_tx_drain(sc); /* Extract previous flow-control frame received counter. */ fc_prev = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter; /* Read mib registers (clear by read). */ mvneta_update_mib(sc); /* Extract current flow-control frame received counter. */ fc_curr = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter; if (sc->phy_attached && if_getflags(sc->ifp) & IFF_UP) { mvneta_sc_lock(sc); mii_tick(sc->mii); /* Adjust MAC settings */ mvneta_adjust_link(sc); mvneta_sc_unlock(sc); } /* * We were unable to refill the rx queue and left the rx func, leaving * the ring without mbuf and no way to call the refill func. */ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { rx = MVNETA_RX_RING(sc, q); if (rx->needs_refill == TRUE) { mvneta_rx_lockq(sc, q); mvneta_rx_queue_refill(sc, q); mvneta_rx_unlockq(sc, q); } } /* * Watchdog: * - check if queue is mark as hung. * - ignore hung status if we received some pause frame * as hardware may have paused packet transmit. */ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { /* * We should take queue lock, but as we only read * queue status we can do it without lock, we may * only missdetect queue status for one tick. */ tx = MVNETA_TX_RING(sc, q); if (tx->queue_hung && (fc_curr - fc_prev) == 0) goto timeout; } callout_schedule(&sc->tick_ch, hz); return; timeout: if_printf(sc->ifp, "watchdog timeout\n"); mvneta_sc_lock(sc); sc->counter_watchdog++; sc->counter_watchdog_mib++; /* Trigger reinitialize sequence. */ mvneta_stop_locked(sc); mvneta_init_locked(sc); mvneta_sc_unlock(sc); } STATIC void mvneta_qflush(if_t ifp) { #ifdef MVNETA_MULTIQUEUE struct mvneta_softc *sc; struct mvneta_tx_ring *tx; struct mbuf *m; size_t q; sc = if_getsoftc(ifp); for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { tx = MVNETA_TX_RING(sc, q); mvneta_tx_lockq(sc, q); while ((m = buf_ring_dequeue_sc(tx->br)) != NULL) m_freem(m); mvneta_tx_unlockq(sc, q); } #endif if_qflush(ifp); } STATIC void mvneta_tx_task(void *arg, int pending) { struct mvneta_softc *sc; struct mvneta_tx_ring *tx; if_t ifp; int error; tx = arg; ifp = tx->ifp; sc = if_getsoftc(ifp); mvneta_tx_lockq(sc, tx->qidx); error = mvneta_xmit_locked(sc, tx->qidx); mvneta_tx_unlockq(sc, tx->qidx); /* Try again */ if (__predict_false(error != 0 && error != ENETDOWN)) { pause("mvneta_tx_task_sleep", 1); taskqueue_enqueue(tx->taskq, &tx->task); } } STATIC int mvneta_xmitfast_locked(struct mvneta_softc *sc, int q, struct mbuf **m) { struct mvneta_tx_ring *tx; if_t ifp; int error; KASSERT_TX_MTX(sc, q); tx = MVNETA_TX_RING(sc, q); error = 0; ifp = sc->ifp; /* Dont enqueue packet if the queue is disabled. */ if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) { m_freem(*m); *m = NULL; return (ENETDOWN); } /* Reclaim mbuf if above threshold. */ if (__predict_true(tx->used > MVNETA_TX_RECLAIM_COUNT)) mvneta_tx_queue_complete(sc, q); /* Do not call transmit path if queue is already too full. */ if (__predict_false(tx->used > MVNETA_TX_RING_CNT - MVNETA_TX_SEGLIMIT)) return (ENOBUFS); error = mvneta_tx_queue(sc, m, q); if (__predict_false(error != 0)) return (error); /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, *m); /* Set watchdog on */ tx->watchdog_time = ticks; tx->queue_status = MVNETA_QUEUE_WORKING; return (error); } #ifdef MVNETA_MULTIQUEUE STATIC int mvneta_transmit(if_t ifp, struct mbuf *m) { struct mvneta_softc *sc; struct mvneta_tx_ring *tx; int error; int q; sc = if_getsoftc(ifp); /* Use default queue if there is no flow id as thread can migrate. */ if (__predict_true(M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)) q = m->m_pkthdr.flowid % MVNETA_TX_QNUM_MAX; else q = 0; tx = MVNETA_TX_RING(sc, q); /* If buf_ring is full start transmit immediately. */ if (buf_ring_full(tx->br)) { mvneta_tx_lockq(sc, q); mvneta_xmit_locked(sc, q); mvneta_tx_unlockq(sc, q); } /* * If the buf_ring is empty we will not reorder packets. * If the lock is available transmit without using buf_ring. */ if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) { error = mvneta_xmitfast_locked(sc, q, &m); mvneta_tx_unlockq(sc, q); if (__predict_true(error == 0)) return (0); /* Transmit can fail in fastpath. */ if (__predict_false(m == NULL)) return (error); } /* Enqueue then schedule taskqueue. */ error = drbr_enqueue(ifp, tx->br, m); if (__predict_false(error != 0)) return (error); taskqueue_enqueue(tx->taskq, &tx->task); return (0); } STATIC int mvneta_xmit_locked(struct mvneta_softc *sc, int q) { if_t ifp; struct mvneta_tx_ring *tx; struct mbuf *m; int error; KASSERT_TX_MTX(sc, q); ifp = sc->ifp; tx = MVNETA_TX_RING(sc, q); error = 0; while ((m = drbr_peek(ifp, tx->br)) != NULL) { error = mvneta_xmitfast_locked(sc, q, &m); if (__predict_false(error != 0)) { if (m != NULL) drbr_putback(ifp, tx->br, m); else drbr_advance(ifp, tx->br); break; } drbr_advance(ifp, tx->br); } return (error); } #else /* !MVNETA_MULTIQUEUE */ STATIC void mvneta_start(if_t ifp) { struct mvneta_softc *sc; struct mvneta_tx_ring *tx; int error; sc = if_getsoftc(ifp); tx = MVNETA_TX_RING(sc, 0); mvneta_tx_lockq(sc, 0); error = mvneta_xmit_locked(sc, 0); mvneta_tx_unlockq(sc, 0); /* Handle retransmit in the background taskq. */ if (__predict_false(error != 0 && error != ENETDOWN)) taskqueue_enqueue(tx->taskq, &tx->task); } STATIC int mvneta_xmit_locked(struct mvneta_softc *sc, int q) { if_t ifp; struct mbuf *m; int error; KASSERT_TX_MTX(sc, q); ifp = sc->ifp; error = 0; while (!if_sendq_empty(ifp)) { m = if_dequeue(ifp); if (m == NULL) break; error = mvneta_xmitfast_locked(sc, q, &m); if (__predict_false(error != 0)) { if (m != NULL) if_sendq_prepend(ifp, m); break; } } return (error); } #endif STATIC int mvneta_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct mvneta_softc *sc; struct mvneta_rx_ring *rx; struct ifreq *ifr; int error, mask; uint32_t flags; bool reinit; int q; error = 0; reinit = false; sc = if_getsoftc(ifp); ifr = (struct ifreq *)data; switch (cmd) { case SIOCSIFFLAGS: mvneta_sc_lock(sc); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { flags = if_getflags(ifp) ^ sc->mvneta_if_flags; if (flags != 0) sc->mvneta_if_flags = if_getflags(ifp); if ((flags & IFF_PROMISC) != 0) mvneta_filter_setup(sc); } else { mvneta_init_locked(sc); sc->mvneta_if_flags = if_getflags(ifp); if (sc->phy_attached) mii_mediachg(sc->mii); mvneta_sc_unlock(sc); break; } } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) mvneta_stop_locked(sc); sc->mvneta_if_flags = if_getflags(ifp); mvneta_sc_unlock(sc); break; case SIOCSIFCAP: if (if_getmtu(ifp) > sc->tx_csum_limit && ifr->ifr_reqcap & IFCAP_TXCSUM) ifr->ifr_reqcap &= ~IFCAP_TXCSUM; mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap; if (mask & IFCAP_HWCSUM) { if_setcapenablebit(ifp, IFCAP_HWCSUM & ifr->ifr_reqcap, IFCAP_HWCSUM); if (if_getcapenable(ifp) & IFCAP_TXCSUM) if_sethwassist(ifp, CSUM_IP | CSUM_TCP | CSUM_UDP); else if_sethwassist(ifp, 0); } if (mask & IFCAP_LRO) { mvneta_sc_lock(sc); if_togglecapenable(ifp, IFCAP_LRO); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { rx = MVNETA_RX_RING(sc, q); rx->lro_enabled = !rx->lro_enabled; } } mvneta_sc_unlock(sc); } VLAN_CAPABILITIES(ifp); break; case SIOCSIFMEDIA: if ((IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T || IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_T) && (ifr->ifr_media & IFM_FDX) == 0) { device_printf(sc->dev, "%s half-duplex unsupported\n", IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ? "1000Base-T" : "2500Base-T"); error = EINVAL; break; } case SIOCGIFMEDIA: /* FALLTHROUGH */ case SIOCGIFXMEDIA: if (!sc->phy_attached) error = ifmedia_ioctl(ifp, ifr, &sc->mvneta_ifmedia, cmd); else error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, cmd); break; case SIOCSIFMTU: if (ifr->ifr_mtu < 68 || ifr->ifr_mtu > MVNETA_MAX_FRAME - MVNETA_ETHER_SIZE) { error = EINVAL; } else { if_setmtu(ifp, ifr->ifr_mtu); mvneta_sc_lock(sc); if (if_getmtu(ifp) + MVNETA_ETHER_SIZE <= MCLBYTES) { sc->rx_frame_size = MCLBYTES; } else { sc->rx_frame_size = MJUM9BYTES; } if (if_getmtu(ifp) > sc->tx_csum_limit) { if_setcapenablebit(ifp, 0, IFCAP_TXCSUM); if_sethwassist(ifp, 0); } else { if_setcapenablebit(ifp, IFCAP_TXCSUM, 0); if_sethwassist(ifp, CSUM_IP | CSUM_TCP | CSUM_UDP); } /* * Reinitialize RX queues. * We need to update RX descriptor size. */ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { reinit = true; mvneta_stop_locked(sc); } for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { mvneta_rx_lockq(sc, q); if (mvneta_rx_queue_init(ifp, q) != 0) { device_printf(sc->dev, "initialization failed:" " cannot initialize queue\n"); mvneta_rx_unlockq(sc, q); error = ENOBUFS; break; } mvneta_rx_unlockq(sc, q); } if (reinit) mvneta_init_locked(sc); mvneta_sc_unlock(sc); } break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } STATIC void mvneta_init_locked(void *arg) { struct mvneta_softc *sc; if_t ifp; uint32_t reg; int q, cpu; sc = arg; ifp = sc->ifp; if (!device_is_attached(sc->dev) || (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) return; mvneta_disable_intr(sc); callout_stop(&sc->tick_ch); /* Get the latest mac address */ bcopy(if_getlladdr(ifp), sc->enaddr, ETHER_ADDR_LEN); mvneta_set_mac_address(sc, sc->enaddr); mvneta_filter_setup(sc); /* Start DMA Engine */ MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000); MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000); MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM); /* Enable port */ reg = MVNETA_READ(sc, MVNETA_PMACC0); reg |= MVNETA_PMACC0_PORTEN; reg &= ~MVNETA_PMACC0_FRAMESIZELIMIT_MASK; reg |= MVNETA_PMACC0_FRAMESIZELIMIT(if_getmtu(ifp) + MVNETA_ETHER_SIZE); MVNETA_WRITE(sc, MVNETA_PMACC0, reg); /* Allow access to each TXQ/RXQ from both CPU's */ for (cpu = 0; cpu < mp_ncpus; ++cpu) MVNETA_WRITE(sc, MVNETA_PCP2Q(cpu), MVNETA_PCP2Q_TXQEN_MASK | MVNETA_PCP2Q_RXQEN_MASK); for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { mvneta_rx_lockq(sc, q); mvneta_rx_queue_refill(sc, q); mvneta_rx_unlockq(sc, q); } if (!sc->phy_attached) mvneta_linkup(sc); /* Enable interrupt */ mvneta_enable_intr(sc); /* Set Counter */ callout_schedule(&sc->tick_ch, hz); if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); } STATIC void mvneta_init(void *arg) { struct mvneta_softc *sc; sc = arg; mvneta_sc_lock(sc); mvneta_init_locked(sc); if (sc->phy_attached) mii_mediachg(sc->mii); mvneta_sc_unlock(sc); } /* ARGSUSED */ STATIC void mvneta_stop_locked(struct mvneta_softc *sc) { if_t ifp; uint32_t reg; int q; ifp = sc->ifp; if (ifp == NULL || (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; mvneta_disable_intr(sc); callout_stop(&sc->tick_ch); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); /* Link down */ if (sc->linkup == TRUE) mvneta_linkdown(sc); /* Reset the MAC Port Enable bit */ reg = MVNETA_READ(sc, MVNETA_PMACC0); reg &= ~MVNETA_PMACC0_PORTEN; MVNETA_WRITE(sc, MVNETA_PMACC0, reg); /* Disable each of queue */ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { mvneta_rx_lockq(sc, q); mvneta_ring_flush_rx_queue(sc, q); mvneta_rx_unlockq(sc, q); } /* * Hold Reset state of DMA Engine * (must write 0x0 to restart it) */ MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001); MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001); for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { mvneta_tx_lockq(sc, q); mvneta_ring_flush_tx_queue(sc, q); mvneta_tx_unlockq(sc, q); } } STATIC void mvneta_stop(struct mvneta_softc *sc) { mvneta_sc_lock(sc); mvneta_stop_locked(sc); mvneta_sc_unlock(sc); } STATIC int mvneta_mediachange(if_t ifp) { struct mvneta_softc *sc; sc = if_getsoftc(ifp); if (!sc->phy_attached && !sc->use_inband_status) { /* We shouldn't be here */ if_printf(ifp, "Cannot change media in fixed-link mode!\n"); return (0); } if (sc->use_inband_status) { mvneta_update_media(sc, sc->mvneta_ifmedia.ifm_media); return (0); } mvneta_sc_lock(sc); /* Update PHY */ mii_mediachg(sc->mii); mvneta_sc_unlock(sc); return (0); } STATIC void mvneta_get_media(struct mvneta_softc *sc, struct ifmediareq *ifmr) { uint32_t psr; psr = MVNETA_READ(sc, MVNETA_PSR); /* Speed */ if (psr & MVNETA_PSR_GMIISPEED) ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_1000_T); else if (psr & MVNETA_PSR_MIISPEED) ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_100_TX); else if (psr & MVNETA_PSR_LINKUP) ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_10_T); /* Duplex */ if (psr & MVNETA_PSR_FULLDX) ifmr->ifm_active |= IFM_FDX; /* Link */ ifmr->ifm_status = IFM_AVALID; if (psr & MVNETA_PSR_LINKUP) ifmr->ifm_status |= IFM_ACTIVE; } STATIC void mvneta_mediastatus(if_t ifp, struct ifmediareq *ifmr) { struct mvneta_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); if (!sc->phy_attached && !sc->use_inband_status) { ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; return; } mvneta_sc_lock(sc); if (sc->use_inband_status) { mvneta_get_media(sc, ifmr); mvneta_sc_unlock(sc); return; } mii = sc->mii; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; mvneta_sc_unlock(sc); } /* * Link State Notify */ STATIC void mvneta_update_autoneg(struct mvneta_softc *sc, int enable) { int reg; if (enable) { reg = MVNETA_READ(sc, MVNETA_PANC); reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS | MVNETA_PANC_ANFCEN); reg |= MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN | MVNETA_PANC_INBANDANEN; MVNETA_WRITE(sc, MVNETA_PANC, reg); reg = MVNETA_READ(sc, MVNETA_PMACC2); reg |= MVNETA_PMACC2_INBANDANMODE; MVNETA_WRITE(sc, MVNETA_PMACC2, reg); reg = MVNETA_READ(sc, MVNETA_PSOMSCD); reg |= MVNETA_PSOMSCD_ENABLE; MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg); } else { reg = MVNETA_READ(sc, MVNETA_PANC); reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS | MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN | MVNETA_PANC_INBANDANEN); MVNETA_WRITE(sc, MVNETA_PANC, reg); reg = MVNETA_READ(sc, MVNETA_PMACC2); reg &= ~MVNETA_PMACC2_INBANDANMODE; MVNETA_WRITE(sc, MVNETA_PMACC2, reg); reg = MVNETA_READ(sc, MVNETA_PSOMSCD); reg &= ~MVNETA_PSOMSCD_ENABLE; MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg); } } STATIC int mvneta_update_media(struct mvneta_softc *sc, int media) { int reg, err; boolean_t running; err = 0; mvneta_sc_lock(sc); mvneta_linkreset(sc); running = (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0; if (running) mvneta_stop_locked(sc); sc->autoneg = (IFM_SUBTYPE(media) == IFM_AUTO); if (!sc->phy_attached || sc->use_inband_status) mvneta_update_autoneg(sc, IFM_SUBTYPE(media) == IFM_AUTO); mvneta_update_eee(sc); mvneta_update_fc(sc); if (IFM_SUBTYPE(media) != IFM_AUTO) { reg = MVNETA_READ(sc, MVNETA_PANC); reg &= ~(MVNETA_PANC_SETGMIISPEED | MVNETA_PANC_SETMIISPEED | MVNETA_PANC_SETFULLDX); if (IFM_SUBTYPE(media) == IFM_1000_T || IFM_SUBTYPE(media) == IFM_2500_T) { if ((media & IFM_FDX) == 0) { device_printf(sc->dev, "%s half-duplex unsupported\n", IFM_SUBTYPE(media) == IFM_1000_T ? "1000Base-T" : "2500Base-T"); err = EINVAL; goto out; } reg |= MVNETA_PANC_SETGMIISPEED; } else if (IFM_SUBTYPE(media) == IFM_100_TX) reg |= MVNETA_PANC_SETMIISPEED; if (media & IFM_FDX) reg |= MVNETA_PANC_SETFULLDX; MVNETA_WRITE(sc, MVNETA_PANC, reg); } out: if (running) mvneta_init_locked(sc); mvneta_sc_unlock(sc); return (err); } STATIC void mvneta_adjust_link(struct mvneta_softc *sc) { boolean_t phy_linkup; int reg; /* Update eee/fc */ mvneta_update_eee(sc); mvneta_update_fc(sc); /* Check for link change */ phy_linkup = (sc->mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == (IFM_AVALID | IFM_ACTIVE); if (sc->linkup != phy_linkup) mvneta_linkupdate(sc, phy_linkup); /* Don't update media on disabled link */ if (!phy_linkup) return; /* Check for media type change */ if (sc->mvneta_media != sc->mii->mii_media_active) { sc->mvneta_media = sc->mii->mii_media_active; reg = MVNETA_READ(sc, MVNETA_PANC); reg &= ~(MVNETA_PANC_SETGMIISPEED | MVNETA_PANC_SETMIISPEED | MVNETA_PANC_SETFULLDX); if (IFM_SUBTYPE(sc->mvneta_media) == IFM_1000_T || IFM_SUBTYPE(sc->mvneta_media) == IFM_2500_T) { reg |= MVNETA_PANC_SETGMIISPEED; } else if (IFM_SUBTYPE(sc->mvneta_media) == IFM_100_TX) reg |= MVNETA_PANC_SETMIISPEED; if (sc->mvneta_media & IFM_FDX) reg |= MVNETA_PANC_SETFULLDX; MVNETA_WRITE(sc, MVNETA_PANC, reg); } } STATIC void mvneta_link_isr(struct mvneta_softc *sc) { int linkup; KASSERT_SC_MTX(sc); linkup = MVNETA_IS_LINKUP(sc) ? TRUE : FALSE; if (sc->linkup == linkup) return; if (linkup == TRUE) mvneta_linkup(sc); else mvneta_linkdown(sc); #ifdef DEBUG device_printf(sc->dev, "%s: link %s\n", if_name(sc->ifp), linkup ? "up" : "down"); #endif } STATIC void mvneta_linkupdate(struct mvneta_softc *sc, boolean_t linkup) { KASSERT_SC_MTX(sc); if (linkup == TRUE) mvneta_linkup(sc); else mvneta_linkdown(sc); #ifdef DEBUG device_printf(sc->dev, "%s: link %s\n", if_name(sc->ifp), linkup ? "up" : "down"); #endif } STATIC void mvneta_update_eee(struct mvneta_softc *sc) { uint32_t reg; KASSERT_SC_MTX(sc); /* set EEE parameters */ reg = MVNETA_READ(sc, MVNETA_LPIC1); if (sc->cf_lpi) reg |= MVNETA_LPIC1_LPIRE; else reg &= ~MVNETA_LPIC1_LPIRE; MVNETA_WRITE(sc, MVNETA_LPIC1, reg); } STATIC void mvneta_update_fc(struct mvneta_softc *sc) { uint32_t reg; KASSERT_SC_MTX(sc); reg = MVNETA_READ(sc, MVNETA_PANC); if (sc->cf_fc) { /* Flow control negotiation */ reg |= MVNETA_PANC_PAUSEADV; reg |= MVNETA_PANC_ANFCEN; } else { /* Disable flow control negotiation */ reg &= ~MVNETA_PANC_PAUSEADV; reg &= ~MVNETA_PANC_ANFCEN; } MVNETA_WRITE(sc, MVNETA_PANC, reg); } STATIC void mvneta_linkup(struct mvneta_softc *sc) { uint32_t reg; KASSERT_SC_MTX(sc); if (!sc->phy_attached || !sc->use_inband_status) { reg = MVNETA_READ(sc, MVNETA_PANC); reg |= MVNETA_PANC_FORCELINKPASS; reg &= ~MVNETA_PANC_FORCELINKFAIL; MVNETA_WRITE(sc, MVNETA_PANC, reg); } mvneta_qflush(sc->ifp); mvneta_portup(sc); sc->linkup = TRUE; if_link_state_change(sc->ifp, LINK_STATE_UP); } STATIC void mvneta_linkdown(struct mvneta_softc *sc) { uint32_t reg; KASSERT_SC_MTX(sc); if (!sc->phy_attached || !sc->use_inband_status) { reg = MVNETA_READ(sc, MVNETA_PANC); reg &= ~MVNETA_PANC_FORCELINKPASS; reg |= MVNETA_PANC_FORCELINKFAIL; MVNETA_WRITE(sc, MVNETA_PANC, reg); } mvneta_portdown(sc); mvneta_qflush(sc->ifp); sc->linkup = FALSE; if_link_state_change(sc->ifp, LINK_STATE_DOWN); } STATIC void mvneta_linkreset(struct mvneta_softc *sc) { struct mii_softc *mii; if (sc->phy_attached) { /* Force reset PHY */ mii = LIST_FIRST(&sc->mii->mii_phys); if (mii) mii_phy_reset(mii); } } /* * Tx Subroutines */ STATIC int mvneta_tx_queue(struct mvneta_softc *sc, struct mbuf **mbufp, int q) { if_t ifp; bus_dma_segment_t txsegs[MVNETA_TX_SEGLIMIT]; struct mbuf *mtmp, *mbuf; struct mvneta_tx_ring *tx; struct mvneta_buf *txbuf; struct mvneta_tx_desc *t; uint32_t ptxsu; int used, error, i, txnsegs; mbuf = *mbufp; tx = MVNETA_TX_RING(sc, q); DASSERT(tx->used >= 0); DASSERT(tx->used <= MVNETA_TX_RING_CNT); t = NULL; ifp = sc->ifp; if (__predict_false(mbuf->m_flags & M_VLANTAG)) { mbuf = ether_vlanencap(mbuf, mbuf->m_pkthdr.ether_vtag); if (mbuf == NULL) { tx->drv_error++; *mbufp = NULL; return (ENOBUFS); } mbuf->m_flags &= ~M_VLANTAG; *mbufp = mbuf; } if (__predict_false(mbuf->m_next != NULL && (mbuf->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP)) != 0)) { if (M_WRITABLE(mbuf) == 0) { mtmp = m_dup(mbuf, M_NOWAIT); m_freem(mbuf); if (mtmp == NULL) { tx->drv_error++; *mbufp = NULL; return (ENOBUFS); } *mbufp = mbuf = mtmp; } } /* load mbuf using dmamap of 1st descriptor */ txbuf = &tx->txbuf[tx->cpu]; error = bus_dmamap_load_mbuf_sg(sc->txmbuf_dtag, txbuf->dmap, mbuf, txsegs, &txnsegs, BUS_DMA_NOWAIT); if (__predict_false(error != 0)) { #ifdef MVNETA_KTR CTR3(KTR_SPARE2, "%s:%u bus_dmamap_load_mbuf_sg error=%d", if_name(ifp), q, error); #endif /* This is the only recoverable error (except EFBIG). */ if (error != ENOMEM) { tx->drv_error++; m_freem(mbuf); *mbufp = NULL; return (ENOBUFS); } return (error); } if (__predict_false(txnsegs <= 0 || (txnsegs + tx->used) > MVNETA_TX_RING_CNT)) { /* we have no enough descriptors or mbuf is broken */ #ifdef MVNETA_KTR CTR3(KTR_SPARE2, "%s:%u not enough descriptors txnsegs=%d", if_name(ifp), q, txnsegs); #endif bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap); return (ENOBUFS); } DASSERT(txbuf->m == NULL); /* remember mbuf using 1st descriptor */ txbuf->m = mbuf; bus_dmamap_sync(sc->txmbuf_dtag, txbuf->dmap, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); /* load to tx descriptors */ used = 0; for (i = 0; i < txnsegs; i++) { t = &tx->desc[tx->cpu]; t->command = 0; t->l4ichk = 0; t->flags = 0; if (__predict_true(i == 0)) { /* 1st descriptor */ t->command |= MVNETA_TX_CMD_W_PACKET_OFFSET(0); t->command |= MVNETA_TX_CMD_F; mvneta_tx_set_csumflag(ifp, t, mbuf); } t->bufptr_pa = txsegs[i].ds_addr; t->bytecnt = txsegs[i].ds_len; tx->cpu = tx_counter_adv(tx->cpu, 1); tx->used++; used++; } /* t is last descriptor here */ DASSERT(t != NULL); t->command |= MVNETA_TX_CMD_L|MVNETA_TX_CMD_PADDING; bus_dmamap_sync(sc->tx_dtag, tx->desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); while (__predict_false(used > 255)) { ptxsu = MVNETA_PTXSU_NOWD(255); MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu); used -= 255; } if (__predict_true(used > 0)) { ptxsu = MVNETA_PTXSU_NOWD(used); MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu); } return (0); } STATIC void mvneta_tx_set_csumflag(if_t ifp, struct mvneta_tx_desc *t, struct mbuf *m) { struct ether_header *eh; struct ether_vlan_header *evh; int csum_flags; uint32_t iphl, ipoff; struct ip *ip; iphl = ipoff = 0; csum_flags = if_gethwassist(ifp) & m->m_pkthdr.csum_flags; eh = mtod(m, struct ether_header *); switch (ntohs(eh->ether_type)) { case ETHERTYPE_IP: ipoff = ETHER_HDR_LEN; break; case ETHERTYPE_VLAN: ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; evh = mtod(m, struct ether_vlan_header *); if (ntohs(evh->evl_proto) == ETHERTYPE_VLAN) ipoff += ETHER_VLAN_ENCAP_LEN; break; default: csum_flags = 0; } if (__predict_true(csum_flags & (CSUM_IP|CSUM_IP_TCP|CSUM_IP_UDP))) { ip = (struct ip *)(m->m_data + ipoff); iphl = ip->ip_hl<<2; t->command |= MVNETA_TX_CMD_L3_IP4; } else { t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE; return; } /* L3 */ if (csum_flags & CSUM_IP) { t->command |= MVNETA_TX_CMD_IP4_CHECKSUM; } /* L4 */ if (csum_flags & CSUM_IP_TCP) { t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG; t->command |= MVNETA_TX_CMD_L4_TCP; } else if (csum_flags & CSUM_IP_UDP) { t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG; t->command |= MVNETA_TX_CMD_L4_UDP; } else t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE; t->l4ichk = 0; t->command |= MVNETA_TX_CMD_IP_HEADER_LEN(iphl >> 2); t->command |= MVNETA_TX_CMD_L3_OFFSET(ipoff); } STATIC void mvneta_tx_queue_complete(struct mvneta_softc *sc, int q) { struct mvneta_tx_ring *tx; struct mvneta_buf *txbuf; struct mvneta_tx_desc *t __diagused; uint32_t ptxs, ptxsu, ndesc; int i; KASSERT_TX_MTX(sc, q); tx = MVNETA_TX_RING(sc, q); if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) return; ptxs = MVNETA_READ(sc, MVNETA_PTXS(q)); ndesc = MVNETA_PTXS_GET_TBC(ptxs); if (__predict_false(ndesc == 0)) { if (tx->used == 0) tx->queue_status = MVNETA_QUEUE_IDLE; else if (tx->queue_status == MVNETA_QUEUE_WORKING && ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG)) tx->queue_hung = TRUE; return; } #ifdef MVNETA_KTR CTR3(KTR_SPARE2, "%s:%u tx_complete begin ndesc=%u", if_name(sc->ifp), q, ndesc); #endif bus_dmamap_sync(sc->tx_dtag, tx->desc_map, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); for (i = 0; i < ndesc; i++) { t = &tx->desc[tx->dma]; #ifdef MVNETA_KTR if (t->flags & MVNETA_TX_F_ES) CTR3(KTR_SPARE2, "%s tx error queue %d desc %d", if_name(sc->ifp), q, tx->dma); #endif txbuf = &tx->txbuf[tx->dma]; if (__predict_true(txbuf->m != NULL)) { DASSERT((t->command & MVNETA_TX_CMD_F) != 0); bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap); m_freem(txbuf->m); txbuf->m = NULL; } else DASSERT((t->flags & MVNETA_TX_CMD_F) == 0); tx->dma = tx_counter_adv(tx->dma, 1); tx->used--; } DASSERT(tx->used >= 0); DASSERT(tx->used <= MVNETA_TX_RING_CNT); while (__predict_false(ndesc > 255)) { ptxsu = MVNETA_PTXSU_NORB(255); MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu); ndesc -= 255; } if (__predict_true(ndesc > 0)) { ptxsu = MVNETA_PTXSU_NORB(ndesc); MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu); } #ifdef MVNETA_KTR CTR5(KTR_SPARE2, "%s:%u tx_complete tx_cpu=%d tx_dma=%d tx_used=%d", if_name(sc->ifp), q, tx->cpu, tx->dma, tx->used); #endif tx->watchdog_time = ticks; if (tx->used == 0) tx->queue_status = MVNETA_QUEUE_IDLE; } /* * Do a final TX complete when TX is idle. */ STATIC void mvneta_tx_drain(struct mvneta_softc *sc) { struct mvneta_tx_ring *tx; int q; /* * Handle trailing mbuf on TX queue. * Check is done lockess to avoid TX path contention. */ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { tx = MVNETA_TX_RING(sc, q); if ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG_TXCOMP && tx->used > 0) { mvneta_tx_lockq(sc, q); mvneta_tx_queue_complete(sc, q); mvneta_tx_unlockq(sc, q); } } } /* * Rx Subroutines */ STATIC int mvneta_rx(struct mvneta_softc *sc, int q, int count) { uint32_t prxs, npkt; int more; more = 0; mvneta_rx_lockq(sc, q); prxs = MVNETA_READ(sc, MVNETA_PRXS(q)); npkt = MVNETA_PRXS_GET_ODC(prxs); if (__predict_false(npkt == 0)) goto out; if (count > 0 && npkt > count) { more = 1; npkt = count; } mvneta_rx_queue(sc, q, npkt); out: mvneta_rx_unlockq(sc, q); return more; } /* * Helper routine for updating PRXSU register of a given queue. * Handles number of processed descriptors bigger than maximum acceptable value. */ STATIC __inline void mvneta_prxsu_update(struct mvneta_softc *sc, int q, int processed) { uint32_t prxsu; while (__predict_false(processed > 255)) { prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(255); MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu); processed -= 255; } prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(processed); MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu); } static __inline void mvneta_prefetch(void *p) { __builtin_prefetch(p); } STATIC void mvneta_rx_queue(struct mvneta_softc *sc, int q, int npkt) { if_t ifp; struct mvneta_rx_ring *rx; struct mvneta_rx_desc *r; struct mvneta_buf *rxbuf; struct mbuf *m; struct lro_ctrl *lro; struct lro_entry *queued; void *pktbuf; int i, pktlen, processed, ndma; KASSERT_RX_MTX(sc, q); ifp = sc->ifp; rx = MVNETA_RX_RING(sc, q); processed = 0; if (__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED)) return; bus_dmamap_sync(sc->rx_dtag, rx->desc_map, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); for (i = 0; i < npkt; i++) { /* Prefetch next desc, rxbuf. */ ndma = rx_counter_adv(rx->dma, 1); mvneta_prefetch(&rx->desc[ndma]); mvneta_prefetch(&rx->rxbuf[ndma]); /* get descriptor and packet */ r = &rx->desc[rx->dma]; rxbuf = &rx->rxbuf[rx->dma]; m = rxbuf->m; rxbuf->m = NULL; DASSERT(m != NULL); bus_dmamap_sync(sc->rxbuf_dtag, rxbuf->dmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap); /* Prefetch mbuf header. */ mvneta_prefetch(m); processed++; /* Drop desc with error status or not in a single buffer. */ DASSERT((r->status & (MVNETA_RX_F|MVNETA_RX_L)) == (MVNETA_RX_F|MVNETA_RX_L)); if (__predict_false((r->status & MVNETA_RX_ES) || (r->status & (MVNETA_RX_F|MVNETA_RX_L)) != (MVNETA_RX_F|MVNETA_RX_L))) goto rx_error; /* * [ OFF | MH | PKT | CRC ] * bytecnt cover MH, PKT, CRC */ pktlen = r->bytecnt - ETHER_CRC_LEN - MVNETA_HWHEADER_SIZE; pktbuf = (uint8_t *)rx->rxbuf_virt_addr[rx->dma] + MVNETA_PACKET_OFFSET + MVNETA_HWHEADER_SIZE; /* Prefetch mbuf data. */ mvneta_prefetch(pktbuf); /* Write value to mbuf (avoid read). */ m->m_data = pktbuf; m->m_len = m->m_pkthdr.len = pktlen; m->m_pkthdr.rcvif = ifp; mvneta_rx_set_csumflag(ifp, r, m); /* Increase rx_dma before releasing the lock. */ rx->dma = ndma; if (__predict_false(rx->lro_enabled && ((r->status & MVNETA_RX_L3_IP) != 0) && ((r->status & MVNETA_RX_L4_MASK) == MVNETA_RX_L4_TCP) && (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == (CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) { if (rx->lro.lro_cnt != 0) { if (tcp_lro_rx(&rx->lro, m, 0) == 0) goto rx_done; } } mvneta_rx_unlockq(sc, q); if_input(ifp, m); mvneta_rx_lockq(sc, q); /* * Check whether this queue has been disabled in the * meantime. If yes, then clear LRO and exit. */ if(__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED)) goto rx_lro; rx_done: /* Refresh receive ring to avoid stall and minimize jitter. */ if (processed >= MVNETA_RX_REFILL_COUNT) { mvneta_prxsu_update(sc, q, processed); mvneta_rx_queue_refill(sc, q); processed = 0; } continue; rx_error: m_freem(m); rx->dma = ndma; /* Refresh receive ring to avoid stall and minimize jitter. */ if (processed >= MVNETA_RX_REFILL_COUNT) { mvneta_prxsu_update(sc, q, processed); mvneta_rx_queue_refill(sc, q); processed = 0; } } #ifdef MVNETA_KTR CTR3(KTR_SPARE2, "%s:%u %u packets received", if_name(ifp), q, npkt); #endif /* DMA status update */ mvneta_prxsu_update(sc, q, processed); /* Refill the rest of buffers if there are any to refill */ mvneta_rx_queue_refill(sc, q); rx_lro: /* * Flush any outstanding LRO work */ lro = &rx->lro; while (__predict_false((queued = LIST_FIRST(&lro->lro_active)) != NULL)) { LIST_REMOVE(LIST_FIRST((&lro->lro_active)), next); tcp_lro_flush(lro, queued); } } STATIC void mvneta_rx_buf_free(struct mvneta_softc *sc, struct mvneta_buf *rxbuf) { bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap); /* This will remove all data at once */ m_freem(rxbuf->m); } STATIC void mvneta_rx_queue_refill(struct mvneta_softc *sc, int q) { struct mvneta_rx_ring *rx; struct mvneta_rx_desc *r; struct mvneta_buf *rxbuf; bus_dma_segment_t segs; struct mbuf *m; uint32_t prxs, prxsu, ndesc; int npkt, refill, nsegs, error; KASSERT_RX_MTX(sc, q); rx = MVNETA_RX_RING(sc, q); prxs = MVNETA_READ(sc, MVNETA_PRXS(q)); ndesc = MVNETA_PRXS_GET_NODC(prxs) + MVNETA_PRXS_GET_ODC(prxs); refill = MVNETA_RX_RING_CNT - ndesc; #ifdef MVNETA_KTR CTR3(KTR_SPARE2, "%s:%u refill %u packets", if_name(sc->ifp), q, refill); #endif if (__predict_false(refill <= 0)) return; for (npkt = 0; npkt < refill; npkt++) { rxbuf = &rx->rxbuf[rx->cpu]; m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->rx_frame_size); if (__predict_false(m == NULL)) { error = ENOBUFS; break; } m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; error = bus_dmamap_load_mbuf_sg(sc->rxbuf_dtag, rxbuf->dmap, m, &segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(error != 0 || nsegs != 1)) { KASSERT(1, ("Failed to load Rx mbuf DMA map")); m_freem(m); break; } /* Add the packet to the ring */ rxbuf->m = m; r = &rx->desc[rx->cpu]; r->bufptr_pa = segs.ds_addr; rx->rxbuf_virt_addr[rx->cpu] = m->m_data; rx->cpu = rx_counter_adv(rx->cpu, 1); } if (npkt == 0) { if (refill == MVNETA_RX_RING_CNT) rx->needs_refill = TRUE; return; } rx->needs_refill = FALSE; bus_dmamap_sync(sc->rx_dtag, rx->desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); while (__predict_false(npkt > 255)) { prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(255); MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu); npkt -= 255; } if (__predict_true(npkt > 0)) { prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(npkt); MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu); } } STATIC __inline void mvneta_rx_set_csumflag(if_t ifp, struct mvneta_rx_desc *r, struct mbuf *m) { uint32_t csum_flags; csum_flags = 0; if (__predict_false((r->status & (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) == 0)) return; /* not a IP packet */ /* L3 */ if (__predict_true((r->status & MVNETA_RX_IP_HEADER_OK) == MVNETA_RX_IP_HEADER_OK)) csum_flags |= CSUM_L3_CALC|CSUM_L3_VALID; if (__predict_true((r->status & (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) == (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP))) { /* L4 */ switch (r->status & MVNETA_RX_L4_MASK) { case MVNETA_RX_L4_TCP: case MVNETA_RX_L4_UDP: csum_flags |= CSUM_L4_CALC; if (__predict_true((r->status & MVNETA_RX_L4_CHECKSUM_OK) == MVNETA_RX_L4_CHECKSUM_OK)) { csum_flags |= CSUM_L4_VALID; m->m_pkthdr.csum_data = htons(0xffff); } break; case MVNETA_RX_L4_OTH: default: break; } } m->m_pkthdr.csum_flags = csum_flags; } /* * MAC address filter */ STATIC void mvneta_filter_setup(struct mvneta_softc *sc) { if_t ifp; uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT]; uint32_t pxc; int i; KASSERT_SC_MTX(sc); memset(dfut, 0, sizeof(dfut)); memset(dfsmt, 0, sizeof(dfsmt)); memset(dfomt, 0, sizeof(dfomt)); ifp = sc->ifp; if_setflagbits(ifp, IFF_ALLMULTI, 0); if (if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) { for (i = 0; i < MVNETA_NDFSMT; i++) { dfsmt[i] = dfomt[i] = MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS); } } pxc = MVNETA_READ(sc, MVNETA_PXC); pxc &= ~(MVNETA_PXC_UPM | MVNETA_PXC_RXQ_MASK | MVNETA_PXC_RXQARP_MASK | MVNETA_PXC_TCPQ_MASK | MVNETA_PXC_UDPQ_MASK | MVNETA_PXC_BPDUQ_MASK); pxc |= MVNETA_PXC_RXQ(MVNETA_RX_QNUM_MAX-1); pxc |= MVNETA_PXC_RXQARP(MVNETA_RX_QNUM_MAX-1); pxc |= MVNETA_PXC_TCPQ(MVNETA_RX_QNUM_MAX-1); pxc |= MVNETA_PXC_UDPQ(MVNETA_RX_QNUM_MAX-1); pxc |= MVNETA_PXC_BPDUQ(MVNETA_RX_QNUM_MAX-1); pxc |= MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP; if (if_getflags(ifp) & IFF_BROADCAST) { pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP); } if (if_getflags(ifp) & IFF_PROMISC) { pxc |= MVNETA_PXC_UPM; } MVNETA_WRITE(sc, MVNETA_PXC, pxc); /* Set Destination Address Filter Unicast Table */ if (if_getflags(ifp) & IFF_PROMISC) { /* pass all unicast addresses */ for (i = 0; i < MVNETA_NDFUT; i++) { dfut[i] = MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS); } } else { i = sc->enaddr[5] & 0xf; /* last nibble */ dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS); } MVNETA_WRITE_REGION(sc, MVNETA_DFUT(0), dfut, MVNETA_NDFUT); /* Set Destination Address Filter Multicast Tables */ MVNETA_WRITE_REGION(sc, MVNETA_DFSMT(0), dfsmt, MVNETA_NDFSMT); MVNETA_WRITE_REGION(sc, MVNETA_DFOMT(0), dfomt, MVNETA_NDFOMT); } /* * sysctl(9) */ STATIC int sysctl_read_mib(SYSCTL_HANDLER_ARGS) { struct mvneta_sysctl_mib *arg; struct mvneta_softc *sc; uint64_t val; arg = (struct mvneta_sysctl_mib *)arg1; if (arg == NULL) return (EINVAL); sc = arg->sc; if (sc == NULL) return (EINVAL); if (arg->index < 0 || arg->index > MVNETA_PORTMIB_NOCOUNTER) return (EINVAL); mvneta_sc_lock(sc); val = arg->counter; mvneta_sc_unlock(sc); return sysctl_handle_64(oidp, &val, 0, req); } STATIC int sysctl_clear_mib(SYSCTL_HANDLER_ARGS) { struct mvneta_softc *sc; int err, val; val = 0; sc = (struct mvneta_softc *)arg1; if (sc == NULL) return (EINVAL); err = sysctl_handle_int(oidp, &val, 0, req); if (err != 0) return (err); if (val < 0 || val > 1) return (EINVAL); if (val == 1) { mvneta_sc_lock(sc); mvneta_clear_mib(sc); mvneta_sc_unlock(sc); } return (0); } STATIC int sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS) { struct mvneta_sysctl_queue *arg; struct mvneta_rx_ring *rx; struct mvneta_softc *sc; uint32_t reg, time_mvtclk; int err, time_us; rx = NULL; arg = (struct mvneta_sysctl_queue *)arg1; if (arg == NULL) return (EINVAL); if (arg->queue < 0 || arg->queue > MVNETA_RX_RING_CNT) return (EINVAL); if (arg->rxtx != MVNETA_SYSCTL_RX) return (EINVAL); sc = arg->sc; if (sc == NULL) return (EINVAL); /* read queue length */ mvneta_sc_lock(sc); mvneta_rx_lockq(sc, arg->queue); rx = MVNETA_RX_RING(sc, arg->queue); time_mvtclk = rx->queue_th_time; time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / sc->clk_freq; mvneta_rx_unlockq(sc, arg->queue); mvneta_sc_unlock(sc); err = sysctl_handle_int(oidp, &time_us, 0, req); if (err != 0) return (err); mvneta_sc_lock(sc); mvneta_rx_lockq(sc, arg->queue); /* update queue length (0[sec] - 1[sec]) */ if (time_us < 0 || time_us > (1000 * 1000)) { mvneta_rx_unlockq(sc, arg->queue); mvneta_sc_unlock(sc); return (EINVAL); } time_mvtclk = sc->clk_freq * (uint64_t)time_us / (1000ULL * 1000ULL); rx->queue_th_time = time_mvtclk; reg = MVNETA_PRXITTH_RITT(rx->queue_th_time); MVNETA_WRITE(sc, MVNETA_PRXITTH(arg->queue), reg); mvneta_rx_unlockq(sc, arg->queue); mvneta_sc_unlock(sc); return (0); } STATIC void sysctl_mvneta_init(struct mvneta_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid_list *rxchildren; struct sysctl_oid_list *qchildren, *mchildren; struct sysctl_oid *tree; int i, q; struct mvneta_sysctl_queue *rxarg; #define MVNETA_SYSCTL_NAME(num) "queue" # num static const char *sysctl_queue_names[] = { MVNETA_SYSCTL_NAME(0), MVNETA_SYSCTL_NAME(1), MVNETA_SYSCTL_NAME(2), MVNETA_SYSCTL_NAME(3), MVNETA_SYSCTL_NAME(4), MVNETA_SYSCTL_NAME(5), MVNETA_SYSCTL_NAME(6), MVNETA_SYSCTL_NAME(7), }; #undef MVNETA_SYSCTL_NAME #ifndef NO_SYSCTL_DESCR #define MVNETA_SYSCTL_DESCR(num) "configuration parameters for queue " # num static const char *sysctl_queue_descrs[] = { MVNETA_SYSCTL_DESCR(0), MVNETA_SYSCTL_DESCR(1), MVNETA_SYSCTL_DESCR(2), MVNETA_SYSCTL_DESCR(3), MVNETA_SYSCTL_DESCR(4), MVNETA_SYSCTL_DESCR(5), MVNETA_SYSCTL_DESCR(6), MVNETA_SYSCTL_DESCR(7), }; #undef MVNETA_SYSCTL_DESCR #endif ctx = device_get_sysctl_ctx(sc->dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rx", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "NETA RX"); rxchildren = SYSCTL_CHILDREN(tree); tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "mib", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "NETA MIB"); mchildren = SYSCTL_CHILDREN(tree); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "flow_control", CTLFLAG_RW, &sc->cf_fc, 0, "flow control"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpi", CTLFLAG_RW, &sc->cf_lpi, 0, "Low Power Idle"); /* * MIB access */ /* dev.mvneta.[unit].mib. */ for (i = 0; i < MVNETA_PORTMIB_NOCOUNTER; i++) { struct mvneta_sysctl_mib *mib_arg = &sc->sysctl_mib[i]; mib_arg->sc = sc; mib_arg->index = i; SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, mvneta_mib_list[i].sysctl_name, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, (void *)mib_arg, 0, sysctl_read_mib, "I", mvneta_mib_list[i].desc); } SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "rx_discard", CTLFLAG_RD, &sc->counter_pdfc, "Port Rx Discard Frame Counter"); SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "overrun", CTLFLAG_RD, &sc->counter_pofc, "Port Overrun Frame Counter"); SYSCTL_ADD_UINT(ctx, mchildren, OID_AUTO, "watchdog", CTLFLAG_RD, &sc->counter_watchdog, 0, "TX Watchdog Counter"); SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, "reset", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)sc, 0, sysctl_clear_mib, "I", "Reset MIB counters"); for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { rxarg = &sc->sysctl_rx_queue[q]; rxarg->sc = sc; rxarg->queue = q; rxarg->rxtx = MVNETA_SYSCTL_RX; /* hw.mvneta.mvneta[unit].rx.[queue] */ tree = SYSCTL_ADD_NODE(ctx, rxchildren, OID_AUTO, sysctl_queue_names[q], CTLFLAG_RD | CTLFLAG_MPSAFE, 0, sysctl_queue_descrs[q]); qchildren = SYSCTL_CHILDREN(tree); /* hw.mvneta.mvneta[unit].rx.[queue].threshold_timer_us */ SYSCTL_ADD_PROC(ctx, qchildren, OID_AUTO, "threshold_timer_us", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, rxarg, 0, sysctl_set_queue_rxthtime, "I", "interrupt coalescing threshold timer [us]"); } } /* * MIB */ STATIC uint64_t mvneta_read_mib(struct mvneta_softc *sc, int index) { struct mvneta_mib_def *mib; uint64_t val; mib = &mvneta_mib_list[index]; val = MVNETA_READ_MIB(sc, mib->regnum); if (mib->reg64) val |= (uint64_t)MVNETA_READ_MIB(sc, mib->regnum + 4) << 32; return (val); } STATIC void mvneta_clear_mib(struct mvneta_softc *sc) { int i; KASSERT_SC_MTX(sc); for (i = 0; i < nitems(mvneta_mib_list); i++) { (void)mvneta_read_mib(sc, i); sc->sysctl_mib[i].counter = 0; } MVNETA_READ(sc, MVNETA_PDFC); sc->counter_pdfc = 0; MVNETA_READ(sc, MVNETA_POFC); sc->counter_pofc = 0; sc->counter_watchdog = 0; } STATIC void mvneta_update_mib(struct mvneta_softc *sc) { struct mvneta_tx_ring *tx; int i; uint64_t val; uint32_t reg; for (i = 0; i < nitems(mvneta_mib_list); i++) { val = mvneta_read_mib(sc, i); if (val == 0) continue; sc->sysctl_mib[i].counter += val; switch (mvneta_mib_list[i].regnum) { case MVNETA_MIB_RX_GOOD_OCT: if_inc_counter(sc->ifp, IFCOUNTER_IBYTES, val); break; case MVNETA_MIB_RX_BAD_FRAME: if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, val); break; case MVNETA_MIB_RX_GOOD_FRAME: if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, val); break; case MVNETA_MIB_RX_MCAST_FRAME: if_inc_counter(sc->ifp, IFCOUNTER_IMCASTS, val); break; case MVNETA_MIB_TX_GOOD_OCT: if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, val); break; case MVNETA_MIB_TX_GOOD_FRAME: if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, val); break; case MVNETA_MIB_TX_MCAST_FRAME: if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, val); break; case MVNETA_MIB_MAC_COL: if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, val); break; case MVNETA_MIB_TX_MAC_TRNS_ERR: case MVNETA_MIB_TX_EXCES_COL: case MVNETA_MIB_MAC_LATE_COL: if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, val); break; } } reg = MVNETA_READ(sc, MVNETA_PDFC); sc->counter_pdfc += reg; if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg); reg = MVNETA_READ(sc, MVNETA_POFC); sc->counter_pofc += reg; if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg); /* TX watchdog. */ if (sc->counter_watchdog_mib > 0) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, sc->counter_watchdog_mib); sc->counter_watchdog_mib = 0; } /* * TX driver errors: * We do not take queue locks to not disrupt TX path. * We may only miss one drv error which will be fixed at * next mib update. We may also clear counter when TX path * is incrementing it but we only do it if counter was not zero * thus we may only loose one error. */ for (i = 0; i < MVNETA_TX_QNUM_MAX; i++) { tx = MVNETA_TX_RING(sc, i); if (tx->drv_error > 0) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, tx->drv_error); tx->drv_error = 0; } } } diff --git a/sys/dev/ofw/ofw_cpu.c b/sys/dev/ofw/ofw_cpu.c index bd9164fd9f5c..e18004ae19d2 100644 --- a/sys/dev/ofw/ofw_cpu.c +++ b/sys/dev/ofw/ofw_cpu.c @@ -1,401 +1,401 @@ /*- * Copyright (C) 2009 Nathan Whitehorn * Copyright (C) 2015 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Andrew Turner * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__arm__) || defined(__arm64__) || defined(__riscv__) -#include +#include #endif static int ofw_cpulist_probe(device_t); static int ofw_cpulist_attach(device_t); static const struct ofw_bus_devinfo *ofw_cpulist_get_devinfo(device_t dev, device_t child); static MALLOC_DEFINE(M_OFWCPU, "ofwcpu", "OFW CPU device information"); struct ofw_cpulist_softc { pcell_t sc_addr_cells; }; static device_method_t ofw_cpulist_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ofw_cpulist_probe), DEVMETHOD(device_attach, ofw_cpulist_attach), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), DEVMETHOD(bus_get_device_path, ofw_bus_gen_get_device_path), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, ofw_cpulist_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; static driver_t ofw_cpulist_driver = { "cpulist", ofw_cpulist_methods, sizeof(struct ofw_cpulist_softc) }; DRIVER_MODULE(ofw_cpulist, ofwbus, ofw_cpulist_driver, 0, 0); static int ofw_cpulist_probe(device_t dev) { const char *name; name = ofw_bus_get_name(dev); if (name == NULL || strcmp(name, "cpus") != 0) return (ENXIO); device_set_desc(dev, "Open Firmware CPU Group"); return (0); } static int ofw_cpulist_attach(device_t dev) { struct ofw_cpulist_softc *sc; phandle_t root, child; device_t cdev; struct ofw_bus_devinfo *dinfo; sc = device_get_softc(dev); root = ofw_bus_get_node(dev); sc->sc_addr_cells = 1; OF_getencprop(root, "#address-cells", &sc->sc_addr_cells, sizeof(sc->sc_addr_cells)); for (child = OF_child(root); child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_OFWCPU, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(dinfo, child) != 0) { free(dinfo, M_OFWCPU); continue; } cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->obd_name); ofw_bus_gen_destroy_devinfo(dinfo); free(dinfo, M_OFWCPU); continue; } device_set_ivars(cdev, dinfo); } return (bus_generic_attach(dev)); } static const struct ofw_bus_devinfo * ofw_cpulist_get_devinfo(device_t dev, device_t child) { return (device_get_ivars(child)); } static int ofw_cpu_probe(device_t); static int ofw_cpu_attach(device_t); static int ofw_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); struct ofw_cpu_softc { struct pcpu *sc_cpu_pcpu; uint32_t sc_nominal_mhz; boolean_t sc_reg_valid; pcell_t sc_reg[2]; }; static device_method_t ofw_cpu_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ofw_cpu_probe), DEVMETHOD(device_attach, ofw_cpu_attach), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_read_ivar, ofw_cpu_read_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource,bus_generic_activate_resource), DEVMETHOD_END }; static driver_t ofw_cpu_driver = { "cpu", ofw_cpu_methods, sizeof(struct ofw_cpu_softc) }; DRIVER_MODULE(ofw_cpu, cpulist, ofw_cpu_driver, 0, 0); static int ofw_cpu_probe(device_t dev) { const char *type = ofw_bus_get_type(dev); if (type == NULL || strcmp(type, "cpu") != 0) return (ENXIO); device_set_desc(dev, "Open Firmware CPU"); if (!bootverbose && device_get_unit(dev) != 0) { device_quiet(dev); device_quiet_children(dev); } return (0); } static int ofw_cpu_attach(device_t dev) { struct ofw_cpulist_softc *psc; struct ofw_cpu_softc *sc; phandle_t node; pcell_t cell; int rv; #if defined(__arm__) || defined(__arm64__) || defined(__riscv__) clk_t cpuclk; uint64_t freq; #endif sc = device_get_softc(dev); psc = device_get_softc(device_get_parent(dev)); if (nitems(sc->sc_reg) < psc->sc_addr_cells) { if (bootverbose) device_printf(dev, "Too many address cells\n"); return (EINVAL); } node = ofw_bus_get_node(dev); /* Read and validate the reg property for use later */ sc->sc_reg_valid = false; rv = OF_getencprop(node, "reg", sc->sc_reg, sizeof(sc->sc_reg)); if (rv < 0) device_printf(dev, "missing 'reg' property\n"); else if ((rv % 4) != 0) { if (bootverbose) device_printf(dev, "Malformed reg property\n"); } else if ((rv / 4) != psc->sc_addr_cells) { if (bootverbose) device_printf(dev, "Invalid reg size %u\n", rv); } else sc->sc_reg_valid = true; #ifdef __powerpc__ /* * On powerpc, "interrupt-servers" denotes a SMT CPU. Look for any * thread on this CPU, and assign that. */ if (OF_hasprop(node, "ibm,ppc-interrupt-server#s")) { struct cpuref cpuref; cell_t *servers; int i, nservers, rv; if ((nservers = OF_getencprop_alloc(node, "ibm,ppc-interrupt-server#s", (void **)&servers)) < 0) return (ENXIO); nservers /= sizeof(cell_t); for (i = 0; i < nservers; i++) { for (rv = platform_smp_first_cpu(&cpuref); rv == 0; rv = platform_smp_next_cpu(&cpuref)) { if (cpuref.cr_hwref == servers[i]) { sc->sc_cpu_pcpu = pcpu_find(cpuref.cr_cpuid); if (sc->sc_cpu_pcpu == NULL) { OF_prop_free(servers); return (ENXIO); } break; } } if (rv != ENOENT) break; } OF_prop_free(servers); if (sc->sc_cpu_pcpu == NULL) { device_printf(dev, "No CPU found for this device.\n"); return (ENXIO); } } else #endif sc->sc_cpu_pcpu = pcpu_find(device_get_unit(dev)); if (OF_getencprop(node, "clock-frequency", &cell, sizeof(cell)) < 0) { #if defined(__arm__) || defined(__arm64__) || defined(__riscv__) rv = clk_get_by_ofw_index(dev, 0, 0, &cpuclk); if (rv == 0) { rv = clk_get_freq(cpuclk, &freq); if (rv != 0 && bootverbose) device_printf(dev, "Cannot get freq of property clocks\n"); else sc->sc_nominal_mhz = freq / 1000000; } else #endif { if (bootverbose) device_printf(dev, "missing 'clock-frequency' property\n"); } } else sc->sc_nominal_mhz = cell / 1000000; /* convert to MHz */ if (sc->sc_nominal_mhz != 0 && bootverbose) device_printf(dev, "Nominal frequency %dMhz\n", sc->sc_nominal_mhz); bus_generic_probe(dev); return (bus_generic_attach(dev)); } static int ofw_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct ofw_cpulist_softc *psc; struct ofw_cpu_softc *sc; sc = device_get_softc(dev); switch (index) { case CPU_IVAR_PCPU: *result = (uintptr_t)sc->sc_cpu_pcpu; return (0); case CPU_IVAR_NOMINAL_MHZ: if (sc->sc_nominal_mhz > 0) { *result = (uintptr_t)sc->sc_nominal_mhz; return (0); } break; case CPU_IVAR_CPUID_SIZE: psc = device_get_softc(device_get_parent(dev)); *result = psc->sc_addr_cells; return (0); case CPU_IVAR_CPUID: if (sc->sc_reg_valid) { *result = (uintptr_t)sc->sc_reg; return (0); } break; } return (ENOENT); } int ofw_cpu_early_foreach(ofw_cpu_foreach_cb callback, boolean_t only_runnable) { phandle_t node, child; pcell_t addr_cells, reg[2]; char status[16]; char device_type[16]; u_int id, next_id; int count, rv; count = 0; id = 0; next_id = 0; node = OF_finddevice("/cpus"); if (node == -1) return (-1); /* Find the number of cells in the cpu register */ if (OF_getencprop(node, "#address-cells", &addr_cells, sizeof(addr_cells)) < 0) return (-1); for (child = OF_child(node); child != 0; child = OF_peer(child), id = next_id) { /* Check if child is a CPU */ memset(device_type, 0, sizeof(device_type)); rv = OF_getprop(child, "device_type", device_type, sizeof(device_type) - 1); if (rv < 0) continue; if (strcmp(device_type, "cpu") != 0) continue; /* We're processing CPU, update next_id used in the next iteration */ next_id++; /* * If we are filtering by runnable then limit to only * those that have been enabled, or do provide a method * to enable them. */ if (only_runnable) { status[0] = '\0'; OF_getprop(child, "status", status, sizeof(status)); if (status[0] != '\0' && strcmp(status, "okay") != 0 && strcmp(status, "ok") != 0 && !OF_hasprop(child, "enable-method")) continue; } /* * Check we have a register to identify the cpu */ rv = OF_getencprop(child, "reg", reg, addr_cells * sizeof(cell_t)); if (rv != addr_cells * sizeof(cell_t)) continue; if (callback == NULL || callback(id, child, addr_cells, reg)) count++; } return (only_runnable ? count : id); } diff --git a/sys/dev/pci/pci_dw_mv.c b/sys/dev/pci/pci_dw_mv.c index b37baf044e8b..4f0671cb23d9 100644 --- a/sys/dev/pci/pci_dw_mv.c +++ b/sys/dev/pci/pci_dw_mv.c @@ -1,327 +1,327 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* Armada 8k DesignWare PCIe driver */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include "pci_dw_if.h" #define MV_GLOBAL_CONTROL_REG 0x8000 #define PCIE_APP_LTSSM_EN (1 << 2) #define MV_GLOBAL_STATUS_REG 0x8008 #define MV_STATUS_RDLH_LINK_UP (1 << 1) #define MV_STATUS_PHY_LINK_UP (1 << 9) #define MV_INT_CAUSE1 0x801C #define MV_INT_MASK1 0x8020 #define INT_A_ASSERT_MASK (1 << 9) #define INT_B_ASSERT_MASK (1 << 10) #define INT_C_ASSERT_MASK (1 << 11) #define INT_D_ASSERT_MASK (1 << 12) #define MV_INT_CAUSE2 0x8024 #define MV_INT_MASK2 0x8028 #define MV_ERR_INT_CAUSE 0x802C #define MV_ERR_INT_MASK 0x8030 #define MV_ARCACHE_TRC_REG 0x8050 #define MV_AWCACHE_TRC_REG 0x8054 #define MV_ARUSER_REG 0x805C #define MV_AWUSER_REG 0x8060 #define MV_MAX_LANES 8 struct pci_mv_softc { struct pci_dw_softc dw_sc; device_t dev; phandle_t node; struct resource *irq_res; void *intr_cookie; phy_t phy[MV_MAX_LANES]; clk_t clk_core; clk_t clk_reg; }; /* Compatible devices. */ static struct ofw_compat_data compat_data[] = { {"marvell,armada8k-pcie", 1}, {NULL, 0}, }; static int pci_mv_phy_init(struct pci_mv_softc *sc) { int i, rv; for (i = 0; i < MV_MAX_LANES; i++) { rv = phy_get_by_ofw_idx(sc->dev, sc->node, i, &(sc->phy[i])); if (rv != 0 && rv != ENOENT) { device_printf(sc->dev, "Cannot get phy[%d]\n", i); /* XXX revert when phy driver will be implemented */ #if 0 goto fail; #else continue; #endif } if (sc->phy[i] == NULL) continue; rv = phy_enable(sc->phy[i]); if (rv != 0) { device_printf(sc->dev, "Cannot enable phy[%d]\n", i); goto fail; } } return (0); fail: for (i = 0; i < MV_MAX_LANES; i++) { if (sc->phy[i] == NULL) continue; phy_release(sc->phy[i]); } return (rv); } static void pci_mv_init(struct pci_mv_softc *sc) { uint32_t reg; /* Set device configuration to RC */ reg = pci_dw_dbi_rd4(sc->dev, MV_GLOBAL_CONTROL_REG); reg &= ~0x000000F0; reg |= 0x000000040; pci_dw_dbi_wr4(sc->dev, MV_GLOBAL_CONTROL_REG, reg); /* AxCache master transaction attribures */ pci_dw_dbi_wr4(sc->dev, MV_ARCACHE_TRC_REG, 0x3511); pci_dw_dbi_wr4(sc->dev, MV_AWCACHE_TRC_REG, 0x5311); /* AxDomain master transaction attribures */ pci_dw_dbi_wr4(sc->dev, MV_ARUSER_REG, 0x0002); pci_dw_dbi_wr4(sc->dev, MV_AWUSER_REG, 0x0002); /* Enable all INTx interrupt (virtuual) pins */ reg = pci_dw_dbi_rd4(sc->dev, MV_INT_MASK1); reg |= INT_A_ASSERT_MASK | INT_B_ASSERT_MASK | INT_C_ASSERT_MASK | INT_D_ASSERT_MASK; pci_dw_dbi_wr4(sc->dev, MV_INT_MASK1, reg); /* Enable local interrupts */ pci_dw_dbi_wr4(sc->dev, DW_MSI_INTR0_MASK, 0xFFFFFFFF); pci_dw_dbi_wr4(sc->dev, MV_INT_MASK1, 0x0001FE00); pci_dw_dbi_wr4(sc->dev, MV_INT_MASK2, 0x00000000); pci_dw_dbi_wr4(sc->dev, MV_INT_CAUSE1, 0xFFFFFFFF); pci_dw_dbi_wr4(sc->dev, MV_INT_CAUSE2, 0xFFFFFFFF); /* Errors have own interrupt, not yet populated in DTt */ pci_dw_dbi_wr4(sc->dev, MV_ERR_INT_MASK, 0); } static int pci_mv_intr(void *arg) { struct pci_mv_softc *sc = arg; uint32_t cause1, cause2; /* Ack all interrups */ cause1 = pci_dw_dbi_rd4(sc->dev, MV_INT_CAUSE1); cause2 = pci_dw_dbi_rd4(sc->dev, MV_INT_CAUSE2); pci_dw_dbi_wr4(sc->dev, MV_INT_CAUSE1, cause1); pci_dw_dbi_wr4(sc->dev, MV_INT_CAUSE2, cause2); return (FILTER_HANDLED); } static int pci_mv_get_link(device_t dev, bool *status) { uint32_t reg; reg = pci_dw_dbi_rd4(dev, MV_GLOBAL_STATUS_REG); if ((reg & (MV_STATUS_RDLH_LINK_UP | MV_STATUS_PHY_LINK_UP)) == (MV_STATUS_RDLH_LINK_UP | MV_STATUS_PHY_LINK_UP)) *status = true; else *status = false; return (0); } static int pci_mv_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Marvell Armada8K PCI-E Controller"); return (BUS_PROBE_DEFAULT); } static int pci_mv_attach(device_t dev) { struct resource_map_request req; struct resource_map map; struct pci_mv_softc *sc; phandle_t node; int rv; int rid; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); sc->dev = dev; sc->node = node; rid = 0; sc->dw_sc.dbi_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE | RF_UNMAPPED); if (sc->dw_sc.dbi_res == NULL) { device_printf(dev, "Cannot allocate DBI memory\n"); rv = ENXIO; goto out; } resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE_NP; rv = bus_map_resource(dev, SYS_RES_MEMORY, sc->dw_sc.dbi_res, &req, &map); if (rv != 0) { device_printf(dev, "could not map memory.\n"); return (rv); } rman_set_mapping(sc->dw_sc.dbi_res, &map); /* PCI interrupt */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate IRQ resources\n"); rv = ENXIO; goto out; } /* Clocks */ rv = clk_get_by_ofw_name(sc->dev, 0, "core", &sc->clk_core); if (rv != 0) { device_printf(sc->dev, "Cannot get 'core' clock\n"); rv = ENXIO; goto out; } rv = clk_get_by_ofw_name(sc->dev, 0, "reg", &sc->clk_reg); if (rv != 0) { device_printf(sc->dev, "Cannot get 'reg' clock\n"); rv = ENXIO; goto out; } rv = clk_enable(sc->clk_core); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'core' clock\n"); rv = ENXIO; goto out; } rv = clk_enable(sc->clk_reg); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'reg' clock\n"); rv = ENXIO; goto out; } rv = pci_mv_phy_init(sc); if (rv) goto out; rv = pci_dw_init(dev); if (rv != 0) goto out; pci_mv_init(sc); /* Setup interrupt */ if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, pci_mv_intr, NULL, sc, &sc->intr_cookie)) { device_printf(dev, "cannot setup interrupt handler\n"); rv = ENXIO; goto out; } return (bus_generic_attach(dev)); out: /* XXX Cleanup */ return (rv); } static device_method_t pci_mv_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pci_mv_probe), DEVMETHOD(device_attach, pci_mv_attach), DEVMETHOD(pci_dw_get_link, pci_mv_get_link), DEVMETHOD_END }; DEFINE_CLASS_1(pcib, pci_mv_driver, pci_mv_methods, sizeof(struct pci_mv_softc), pci_dw_driver); DRIVER_MODULE( pci_mv, simplebus, pci_mv_driver, NULL, NULL); diff --git a/sys/dev/pwm/controller/allwinner/aw_pwm.c b/sys/dev/pwm/controller/allwinner/aw_pwm.c index 85ccf4352423..8d7e38834fdd 100644 --- a/sys/dev/pwm/controller/allwinner/aw_pwm.c +++ b/sys/dev/pwm/controller/allwinner/aw_pwm.c @@ -1,398 +1,398 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include "pwmbus_if.h" #define AW_PWM_CTRL 0x00 #define AW_PWM_CTRL_PRESCALE_MASK 0xF #define AW_PWM_CTRL_EN (1 << 4) #define AW_PWM_CTRL_ACTIVE_LEVEL_HIGH (1 << 5) #define AW_PWM_CTRL_GATE (1 << 6) #define AW_PWM_CTRL_MODE_MASK 0x80 #define AW_PWM_CTRL_PULSE_MODE (1 << 7) #define AW_PWM_CTRL_CYCLE_MODE (0 << 7) #define AW_PWM_CTRL_PULSE_START (1 << 8) #define AW_PWM_CTRL_CLK_BYPASS (1 << 9) #define AW_PWM_CTRL_PERIOD_BUSY (1 << 28) #define AW_PWM_PERIOD 0x04 #define AW_PWM_PERIOD_TOTAL_MASK 0xFFFF #define AW_PWM_PERIOD_TOTAL_SHIFT 16 #define AW_PWM_PERIOD_ACTIVE_MASK 0xFFFF #define AW_PWM_PERIOD_ACTIVE_SHIFT 0 #define AW_PWM_MAX_FREQ 24000000 #define NS_PER_SEC 1000000000 static struct ofw_compat_data compat_data[] = { { "allwinner,sun5i-a13-pwm", 1 }, { "allwinner,sun8i-h3-pwm", 1 }, { NULL, 0 } }; static struct resource_spec aw_pwm_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; struct aw_pwm_softc { device_t dev; device_t busdev; clk_t clk; struct resource *res; uint64_t clk_freq; unsigned int period; unsigned int duty; uint32_t flags; bool enabled; }; static uint32_t aw_pwm_clk_prescaler[] = { 120, 180, 240, 360, 480, 0, 0, 0, 12000, 24000, 36000, 48000, 72000, 0, 0, 1, }; #define AW_PWM_READ(sc, reg) bus_read_4((sc)->res, (reg)) #define AW_PWM_WRITE(sc, reg, val) bus_write_4((sc)->res, (reg), (val)) static int aw_pwm_probe(device_t dev); static int aw_pwm_attach(device_t dev); static int aw_pwm_detach(device_t dev); static int aw_pwm_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Allwinner PWM"); return (BUS_PROBE_DEFAULT); } static int aw_pwm_attach(device_t dev) { struct aw_pwm_softc *sc; uint64_t clk_freq; uint32_t reg; phandle_t node; int error; sc = device_get_softc(dev); sc->dev = dev; error = clk_get_by_ofw_index(dev, 0, 0, &sc->clk); if (error != 0) { device_printf(dev, "cannot get clock\n"); goto fail; } error = clk_enable(sc->clk); if (error != 0) { device_printf(dev, "cannot enable clock\n"); goto fail; } error = clk_get_freq(sc->clk, &sc->clk_freq); if (error != 0) { device_printf(dev, "cannot get clock frequency\n"); goto fail; } if (bus_alloc_resources(dev, aw_pwm_spec, &sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); error = ENXIO; goto fail; } /* Read the configuration left by U-Boot */ reg = AW_PWM_READ(sc, AW_PWM_CTRL); if (reg & (AW_PWM_CTRL_GATE | AW_PWM_CTRL_EN)) sc->enabled = true; reg = AW_PWM_READ(sc, AW_PWM_CTRL); reg &= AW_PWM_CTRL_PRESCALE_MASK; if (reg > nitems(aw_pwm_clk_prescaler)) { device_printf(dev, "Bad prescaler %x, cannot guess current settings\n", reg); goto skipcfg; } clk_freq = sc->clk_freq / aw_pwm_clk_prescaler[reg]; reg = AW_PWM_READ(sc, AW_PWM_PERIOD); sc->period = NS_PER_SEC / (clk_freq / ((reg >> AW_PWM_PERIOD_TOTAL_SHIFT) & AW_PWM_PERIOD_TOTAL_MASK)); sc->duty = NS_PER_SEC / (clk_freq / ((reg >> AW_PWM_PERIOD_ACTIVE_SHIFT) & AW_PWM_PERIOD_ACTIVE_MASK)); skipcfg: /* * Note that we don't check for failure to attach pwmbus -- even without * it we can still service clients who connect via fdt xref data. */ node = ofw_bus_get_node(dev); OF_device_register_xref(OF_xref_from_node(node), dev); sc->busdev = device_add_child(dev, "pwmbus", -1); return (bus_generic_attach(dev)); fail: aw_pwm_detach(dev); return (error); } static int aw_pwm_detach(device_t dev) { struct aw_pwm_softc *sc; int error; sc = device_get_softc(dev); if ((error = bus_generic_detach(sc->dev)) != 0) { device_printf(sc->dev, "cannot detach child devices\n"); return (error); } if (sc->busdev != NULL) device_delete_child(dev, sc->busdev); if (sc->res != NULL) bus_release_resources(dev, aw_pwm_spec, &sc->res); return (0); } static phandle_t aw_pwm_get_node(device_t bus, device_t dev) { /* * Share our controller node with our pwmbus child; it instantiates * devices by walking the children contained within our node. */ return ofw_bus_get_node(bus); } static int aw_pwm_channel_count(device_t dev, u_int *nchannel) { *nchannel = 1; return (0); } static int aw_pwm_channel_config(device_t dev, u_int channel, u_int period, u_int duty) { struct aw_pwm_softc *sc; uint64_t period_freq, duty_freq; uint64_t clk_rate, div; uint32_t reg; int prescaler; int i; sc = device_get_softc(dev); period_freq = NS_PER_SEC / period; if (period_freq > AW_PWM_MAX_FREQ) return (EINVAL); /* * FIXME. The hardware is capable of sub-Hz frequencies, that is, * periods longer than a second. But the current code cannot deal * with those properly. */ if (period_freq == 0) return (EINVAL); /* * FIXME. There is a great loss of precision when the period and the * duty are near 1 second. In some cases period_freq and duty_freq can * be equal even if the period and the duty are significantly different. */ duty_freq = NS_PER_SEC / duty; if (duty_freq < period_freq) { device_printf(sc->dev, "duty < period\n"); return (EINVAL); } /* First test without prescaler */ clk_rate = AW_PWM_MAX_FREQ; prescaler = AW_PWM_CTRL_PRESCALE_MASK; div = AW_PWM_MAX_FREQ / period_freq; if ((div - 1) > AW_PWM_PERIOD_TOTAL_MASK) { /* Test all prescaler */ for (i = 0; i < nitems(aw_pwm_clk_prescaler); i++) { if (aw_pwm_clk_prescaler[i] == 0) continue; div = AW_PWM_MAX_FREQ / aw_pwm_clk_prescaler[i] / period_freq; if ((div - 1) < AW_PWM_PERIOD_TOTAL_MASK ) { prescaler = i; clk_rate = AW_PWM_MAX_FREQ / aw_pwm_clk_prescaler[i]; break; } } if (prescaler == AW_PWM_CTRL_PRESCALE_MASK) return (EINVAL); } reg = AW_PWM_READ(sc, AW_PWM_CTRL); /* Write the prescalar */ reg &= ~AW_PWM_CTRL_PRESCALE_MASK; reg |= prescaler; reg &= ~AW_PWM_CTRL_MODE_MASK; reg |= AW_PWM_CTRL_CYCLE_MODE; reg &= ~AW_PWM_CTRL_PULSE_START; reg &= ~AW_PWM_CTRL_CLK_BYPASS; AW_PWM_WRITE(sc, AW_PWM_CTRL, reg); /* Write the total/active cycles */ reg = ((clk_rate / period_freq - 1) << AW_PWM_PERIOD_TOTAL_SHIFT) | ((clk_rate / duty_freq) << AW_PWM_PERIOD_ACTIVE_SHIFT); AW_PWM_WRITE(sc, AW_PWM_PERIOD, reg); sc->period = period; sc->duty = duty; return (0); } static int aw_pwm_channel_get_config(device_t dev, u_int channel, u_int *period, u_int *duty) { struct aw_pwm_softc *sc; sc = device_get_softc(dev); *period = sc->period; *duty = sc->duty; return (0); } static int aw_pwm_channel_enable(device_t dev, u_int channel, bool enable) { struct aw_pwm_softc *sc; uint32_t reg; sc = device_get_softc(dev); if (enable && sc->enabled) return (0); reg = AW_PWM_READ(sc, AW_PWM_CTRL); if (enable) reg |= AW_PWM_CTRL_GATE | AW_PWM_CTRL_EN; else reg &= ~(AW_PWM_CTRL_GATE | AW_PWM_CTRL_EN); AW_PWM_WRITE(sc, AW_PWM_CTRL, reg); sc->enabled = enable; return (0); } static int aw_pwm_channel_is_enabled(device_t dev, u_int channel, bool *enabled) { struct aw_pwm_softc *sc; sc = device_get_softc(dev); *enabled = sc->enabled; return (0); } static device_method_t aw_pwm_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aw_pwm_probe), DEVMETHOD(device_attach, aw_pwm_attach), DEVMETHOD(device_detach, aw_pwm_detach), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, aw_pwm_get_node), /* pwmbus interface */ DEVMETHOD(pwmbus_channel_count, aw_pwm_channel_count), DEVMETHOD(pwmbus_channel_config, aw_pwm_channel_config), DEVMETHOD(pwmbus_channel_get_config, aw_pwm_channel_get_config), DEVMETHOD(pwmbus_channel_enable, aw_pwm_channel_enable), DEVMETHOD(pwmbus_channel_is_enabled, aw_pwm_channel_is_enabled), DEVMETHOD_END }; static driver_t aw_pwm_driver = { "pwm", aw_pwm_methods, sizeof(struct aw_pwm_softc), }; DRIVER_MODULE(aw_pwm, simplebus, aw_pwm_driver, 0, 0); MODULE_VERSION(aw_pwm, 1); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/dev/pwm/controller/rockchip/rk_pwm.c b/sys/dev/pwm/controller/rockchip/rk_pwm.c index 260899f15d89..f1f3946e4d59 100644 --- a/sys/dev/pwm/controller/rockchip/rk_pwm.c +++ b/sys/dev/pwm/controller/rockchip/rk_pwm.c @@ -1,397 +1,397 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * Copyright (c) 2019 Brandon Bergren * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include "pwmbus_if.h" /* Register offsets. */ #define RK_PWM_COUNTER 0x00 #define RK_PWM_PERIOD 0x04 #define RK_PWM_DUTY 0x08 #define RK_PWM_CTRL 0x0c #define SET(reg,mask,val) reg = ((reg & ~mask) | val) #define RK_PWM_CTRL_ENABLE_MASK (1 << 0) #define RK_PWM_CTRL_ENABLED (1 << 0) #define RK_PWM_CTRL_DISABLED (0) #define RK_PWM_CTRL_MODE_MASK (3 << 1) #define RK_PWM_CTRL_MODE_ONESHOT (0) #define RK_PWM_CTRL_MODE_CONTINUOUS (1 << 1) #define RK_PWM_CTRL_MODE_CAPTURE (1 << 2) #define RK_PWM_CTRL_DUTY_MASK (1 << 3) #define RK_PWM_CTRL_DUTY_POSITIVE (1 << 3) #define RK_PWM_CTRL_DUTY_NEGATIVE (0) #define RK_PWM_CTRL_INACTIVE_MASK (1 << 4) #define RK_PWM_CTRL_INACTIVE_POSITIVE (1 << 4) #define RK_PWM_CTRL_INACTIVE_NEGATIVE (0) /* PWM Output Alignment */ #define RK_PWM_CTRL_ALIGN_MASK (1 << 5) #define RK_PWM_CTRL_ALIGN_CENTER (1 << 5) #define RK_PWM_CTRL_ALIGN_LEFT (0) /* Low power mode: disable prescaler when inactive */ #define RK_PWM_CTRL_LP_MASK (1 << 8) #define RK_PWM_CTRL_LP_ENABLE (1 << 8) #define RK_PWM_CTRL_LP_DISABLE (0) /* Clock source: bypass the scaler or not */ #define RK_PWM_CTRL_CLOCKSRC_MASK (1 << 9) #define RK_PWM_CTRL_CLOCKSRC_NONSCALED (0) #define RK_PWM_CTRL_CLOCKSRC_SCALED (1 << 9) #define RK_PWM_CTRL_PRESCALE_MASK (7 << 12) #define RK_PWM_CTRL_PRESCALE_SHIFT 12 #define RK_PWM_CTRL_SCALE_MASK (0xFF << 16) #define RK_PWM_CTRL_SCALE_SHIFT 16 #define RK_PWM_CTRL_REPEAT_MASK (0xFF << 24) #define RK_PWM_CTRL_REPEAT_SHIFT 24 #define NS_PER_SEC 1000000000 static struct ofw_compat_data compat_data[] = { { "rockchip,rk3288-pwm", 1 }, { "rockchip,rk3399-pwm", 1 }, { NULL, 0 } }; static struct resource_spec rk_pwm_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; struct rk_pwm_softc { device_t dev; device_t busdev; clk_t clk; struct resource *res; uint64_t clk_freq; unsigned int period; unsigned int duty; uint32_t flags; uint8_t prescaler; uint8_t scaler; bool using_scaler; bool enabled; }; #define RK_PWM_READ(sc, reg) bus_read_4((sc)->res, (reg)) #define RK_PWM_WRITE(sc, reg, val) bus_write_4((sc)->res, (reg), (val)) static int rk_pwm_probe(device_t dev); static int rk_pwm_attach(device_t dev); static int rk_pwm_detach(device_t dev); static int rk_pwm_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Rockchip PWM"); return (BUS_PROBE_DEFAULT); } static int rk_pwm_attach(device_t dev) { struct rk_pwm_softc *sc; phandle_t node; uint64_t clk_freq; uint32_t reg; int error; sc = device_get_softc(dev); sc->dev = dev; error = clk_get_by_ofw_index(dev, 0, 0, &sc->clk); if (error != 0) { device_printf(dev, "cannot get clock\n"); goto fail; } error = clk_enable(sc->clk); if (error != 0) { device_printf(dev, "cannot enable clock\n"); goto fail; } error = clk_get_freq(sc->clk, &sc->clk_freq); if (error != 0) { device_printf(dev, "cannot get base frequency\n"); goto fail; } if (bus_alloc_resources(dev, rk_pwm_spec, &sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); error = ENXIO; goto fail; } /* Read the configuration left by U-Boot */ reg = RK_PWM_READ(sc, RK_PWM_CTRL); if ((reg & RK_PWM_CTRL_ENABLE_MASK) == RK_PWM_CTRL_ENABLED) sc->enabled = true; reg = RK_PWM_READ(sc, RK_PWM_CTRL); reg &= RK_PWM_CTRL_PRESCALE_MASK; sc->prescaler = reg >> RK_PWM_CTRL_PRESCALE_SHIFT; reg = RK_PWM_READ(sc, RK_PWM_CTRL); reg &= RK_PWM_CTRL_SCALE_MASK; sc->scaler = reg >> RK_PWM_CTRL_SCALE_SHIFT; reg = RK_PWM_READ(sc, RK_PWM_CTRL); if ((reg & RK_PWM_CTRL_CLOCKSRC_MASK) == RK_PWM_CTRL_CLOCKSRC_SCALED) sc->using_scaler = true; else sc->using_scaler = false; clk_freq = sc->clk_freq / (2 ^ sc->prescaler); if (sc->using_scaler) { if (sc->scaler == 0) clk_freq /= 512; else clk_freq /= (sc->scaler * 2); } reg = RK_PWM_READ(sc, RK_PWM_PERIOD); sc->period = NS_PER_SEC / (clk_freq / reg); reg = RK_PWM_READ(sc, RK_PWM_DUTY); sc->duty = NS_PER_SEC / (clk_freq / reg); node = ofw_bus_get_node(dev); OF_device_register_xref(OF_xref_from_node(node), dev); sc->busdev = device_add_child(dev, "pwmbus", -1); return (bus_generic_attach(dev)); fail: rk_pwm_detach(dev); return (error); } static int rk_pwm_detach(device_t dev) { struct rk_pwm_softc *sc; sc = device_get_softc(dev); bus_generic_detach(sc->dev); bus_release_resources(dev, rk_pwm_spec, &sc->res); return (0); } static phandle_t aw_pwm_get_node(device_t bus, device_t dev) { /* * Share our controller node with our pwmbus child; it instantiates * devices by walking the children contained within our node. */ return ofw_bus_get_node(bus); } static int rk_pwm_channel_count(device_t dev, u_int *nchannel) { /* The device supports 4 channels, but attaches multiple times in the * device tree. This interferes with advanced usage though, as * the interrupt capability and channel 3 FIFO register offsets * don't work right in this situation. * But since we don't support those yet, pretend we are singlechannel. */ *nchannel = 1; return (0); } static int rk_pwm_channel_config(device_t dev, u_int channel, u_int period, u_int duty) { struct rk_pwm_softc *sc; uint64_t period_freq, duty_freq; uint32_t reg; uint32_t period_out; uint32_t duty_out; uint8_t prescaler; uint8_t scaler; bool using_scaler; sc = device_get_softc(dev); period_freq = NS_PER_SEC / period; /* Datasheet doesn't define, so use Nyquist frequency. */ if (period_freq > (sc->clk_freq / 2)) return (EINVAL); duty_freq = NS_PER_SEC / duty; if (duty_freq < period_freq) { device_printf(sc->dev, "duty < period\n"); return (EINVAL); } /* Assuming 24 MHz reference, we should never actually have to use the divider due to pwm API limitations. */ prescaler = 0; scaler = 0; using_scaler = false; /* XXX Expand API to allow for 64 bit period/duty. */ period_out = (sc->clk_freq * period) / NS_PER_SEC; duty_out = (sc->clk_freq * duty) / NS_PER_SEC; reg = RK_PWM_READ(sc, RK_PWM_CTRL); if ((reg & RK_PWM_CTRL_MODE_MASK) != RK_PWM_CTRL_MODE_CONTINUOUS) { /* Switching modes, disable just in case. */ SET(reg, RK_PWM_CTRL_ENABLE_MASK, RK_PWM_CTRL_DISABLED); RK_PWM_WRITE(sc, RK_PWM_CTRL, reg); } RK_PWM_WRITE(sc, RK_PWM_PERIOD, period_out); RK_PWM_WRITE(sc, RK_PWM_DUTY, duty_out); SET(reg, RK_PWM_CTRL_ENABLE_MASK, RK_PWM_CTRL_ENABLED); SET(reg, RK_PWM_CTRL_MODE_MASK, RK_PWM_CTRL_MODE_CONTINUOUS); SET(reg, RK_PWM_CTRL_ALIGN_MASK, RK_PWM_CTRL_ALIGN_LEFT); SET(reg, RK_PWM_CTRL_CLOCKSRC_MASK, using_scaler); SET(reg, RK_PWM_CTRL_PRESCALE_MASK, prescaler << RK_PWM_CTRL_PRESCALE_SHIFT); SET(reg, RK_PWM_CTRL_SCALE_MASK, scaler << RK_PWM_CTRL_SCALE_SHIFT); RK_PWM_WRITE(sc, RK_PWM_CTRL, reg); sc->period = period; sc->duty = duty; return (0); } static int rk_pwm_channel_get_config(device_t dev, u_int channel, u_int *period, u_int *duty) { struct rk_pwm_softc *sc; sc = device_get_softc(dev); *period = sc->period; *duty = sc->duty; return (0); } static int rk_pwm_channel_enable(device_t dev, u_int channel, bool enable) { struct rk_pwm_softc *sc; uint32_t reg; sc = device_get_softc(dev); if (enable && sc->enabled) return (0); reg = RK_PWM_READ(sc, RK_PWM_CTRL); SET(reg, RK_PWM_CTRL_ENABLE_MASK, enable); RK_PWM_WRITE(sc, RK_PWM_CTRL, reg); sc->enabled = enable; return (0); } static int rk_pwm_channel_is_enabled(device_t dev, u_int channel, bool *enabled) { struct rk_pwm_softc *sc; sc = device_get_softc(dev); *enabled = sc->enabled; return (0); } static device_method_t rk_pwm_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk_pwm_probe), DEVMETHOD(device_attach, rk_pwm_attach), DEVMETHOD(device_detach, rk_pwm_detach), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, aw_pwm_get_node), /* pwm interface */ DEVMETHOD(pwmbus_channel_count, rk_pwm_channel_count), DEVMETHOD(pwmbus_channel_config, rk_pwm_channel_config), DEVMETHOD(pwmbus_channel_get_config, rk_pwm_channel_get_config), DEVMETHOD(pwmbus_channel_enable, rk_pwm_channel_enable), DEVMETHOD(pwmbus_channel_is_enabled, rk_pwm_channel_is_enabled), DEVMETHOD_END }; static driver_t rk_pwm_driver = { "pwm", rk_pwm_methods, sizeof(struct rk_pwm_softc), }; DRIVER_MODULE(rk_pwm, simplebus, rk_pwm_driver, 0, 0); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/dev/qcom_clk/qcom_clk_apssdiv.c b/sys/dev/qcom_clk/qcom_clk_apssdiv.c index d33134f0ba86..52f6bfa9945f 100644 --- a/sys/dev/qcom_clk/qcom_clk_apssdiv.c +++ b/sys/dev/qcom_clk/qcom_clk_apssdiv.c @@ -1,281 +1,281 @@ /*- * Copyright (c) 2021 Adrian Chadd . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include "qcom_clk_freqtbl.h" #include "qcom_clk_apssdiv.h" #include "clkdev_if.h" /* * This is a combination gate, divisor/PLL configuration * for the APSS CPU clock. */ #if 0 #define DPRINTF(dev, msg...) device_printf(dev, "cpufreq_dt: " msg); #else #define DPRINTF(dev, msg...) #endif struct qcom_clk_apssdiv_sc { struct clknode *clknode; uint32_t div_offset; uint32_t div_width; uint32_t div_shift; uint32_t enable_offset; uint32_t enable_shift; const struct qcom_clk_freq_tbl *freq_tbl; }; static uint64_t qcom_clk_apssdiv_calc_rate(struct clknode *clk, uint64_t freq, uint32_t cdiv) { uint32_t pre_div; /* * The divisor isn't a linear map with a linear pre-divisor. */ if (cdiv > 10) { pre_div = (cdiv + 1) * 2; } else { pre_div = cdiv + 12; } /* * Multiplier is a fixed "2" here. */ return (freq * 2L) / pre_div; } static int qcom_clk_apssdiv_recalc(struct clknode *clk, uint64_t *freq) { struct qcom_clk_apssdiv_sc *sc; uint32_t reg, cdiv; sc = clknode_get_softc(clk); if (freq == NULL || *freq == 0) { printf("%s: called; NULL or 0 frequency\n", __func__); return (ENXIO); } CLKDEV_DEVICE_LOCK(clknode_get_device(sc->clknode)); CLKDEV_READ_4(clknode_get_device(sc->clknode), sc->div_offset, ®); CLKDEV_DEVICE_UNLOCK(clknode_get_device(sc->clknode)); cdiv = (reg >> sc->div_shift) & ((1U << sc->div_width) - 1); DPRINTF(clknode_get_device(sc->clknode), "%s: called; cdiv=0x%x, freq=%llu\n", __func__, cdiv, *freq); *freq = qcom_clk_apssdiv_calc_rate(clk, *freq, cdiv); DPRINTF(clknode_get_device(sc->clknode), "%s: called; freq is %llu\n", __func__, *freq); return (0); } #if 0 static bool qcom_clk_apssdiv_get_gate_locked(struct qcom_clk_apssdiv_sc *sc) { uint32_t reg; if (sc->enable_offset == 0) return (false); CLKDEV_READ_4(clknode_get_device(sc->clknode), sc->enable_offset, ®); return (!! (reg & (1U << sc->enable_shift))); } #endif static int qcom_clk_apssdiv_init(struct clknode *clk, device_t dev) { /* * There's only a single parent here for an fixed divisor, * so just set it to 0; the caller doesn't need to supply it. * * Note that the freqtbl entries have an upstream clock, * but the APSS div/gate only has a single upstream and we * don't program anything else specific in here. */ clknode_init_parent_idx(clk, 0); return (0); } static int qcom_clk_apssdiv_set_gate(struct clknode *clk, bool enable) { struct qcom_clk_apssdiv_sc *sc; uint32_t reg; sc = clknode_get_softc(clk); if (sc->enable_offset == 0) { return (ENXIO); } DPRINTF(clknode_get_device(sc->clknode), "%s: called; enable=%d\n", __func__, enable); CLKDEV_DEVICE_LOCK(clknode_get_device(sc->clknode)); CLKDEV_READ_4(clknode_get_device(sc->clknode), sc->enable_offset, ®); if (enable) { reg |= (1U << sc->enable_shift); } else { reg &= ~(1U << sc->enable_shift); } CLKDEV_WRITE_4(clknode_get_device(sc->clknode), sc->enable_offset, reg); CLKDEV_DEVICE_UNLOCK(clknode_get_device(sc->clknode)); return (0); } /* * Set frequency * * fin - the parent frequency, if exists * fout - starts as the requested frequency, ends with the configured * or dry-run frequency * Flags - CLK_SET_DRYRUN, CLK_SET_ROUND_UP, CLK_SET_ROUND_DOWN * retval - 0, ERANGE */ static int qcom_clk_apssdiv_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout, int flags, int *stop) { const struct qcom_clk_freq_tbl *f; struct qcom_clk_apssdiv_sc *sc; uint64_t f_freq; uint32_t reg; sc = clknode_get_softc(clk); /* There are no further PLLs to set in this chain */ *stop = 1; /* Search the table for a suitable frequency */ f = qcom_clk_freq_tbl_lookup(sc->freq_tbl, *fout); if (f == NULL) { return (ERANGE); } /* * Calculate what the resultant frequency would be based on the * parent PLL. */ f_freq = qcom_clk_apssdiv_calc_rate(clk, fin, f->pre_div); DPRINTF(clknode_get_device(sc->clknode), "%s: dryrun: %d, fin=%llu fout=%llu f_freq=%llu pre_div=%u" " target_freq=%llu\n", __func__, !! (flags & CLK_SET_DRYRUN), fin, *fout, f_freq, f->pre_div, f->freq); if (flags & CLK_SET_DRYRUN) { *fout = f_freq; return (0); } /* * Program in the new pre-divisor. */ CLKDEV_DEVICE_LOCK(clknode_get_device(sc->clknode)); CLKDEV_READ_4(clknode_get_device(sc->clknode), sc->div_offset, ®); reg &= ~(((1U << sc->div_width) - 1) << sc->div_shift); reg |= (f->pre_div << sc->div_shift); CLKDEV_WRITE_4(clknode_get_device(sc->clknode), sc->div_offset, reg); CLKDEV_DEVICE_UNLOCK(clknode_get_device(sc->clknode)); /* * The linux driver notes there's no status/completion bit to poll. * So sleep for a bit and hope that's enough time for it to * settle. */ DELAY(1); *fout = f_freq; return (0); } static clknode_method_t qcom_clk_apssdiv_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, qcom_clk_apssdiv_init), CLKNODEMETHOD(clknode_recalc_freq, qcom_clk_apssdiv_recalc), CLKNODEMETHOD(clknode_set_gate, qcom_clk_apssdiv_set_gate), CLKNODEMETHOD(clknode_set_freq, qcom_clk_apssdiv_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(qcom_clk_apssdiv, qcom_clk_apssdiv_class, qcom_clk_apssdiv_methods, sizeof(struct qcom_clk_apssdiv_sc), clknode_class); int qcom_clk_apssdiv_register(struct clkdom *clkdom, struct qcom_clk_apssdiv_def *clkdef) { struct clknode *clk; struct qcom_clk_apssdiv_sc *sc; clk = clknode_create(clkdom, &qcom_clk_apssdiv_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->clknode = clk; sc->div_offset = clkdef->div_offset; sc->div_width = clkdef->div_width; sc->div_shift = clkdef->div_shift; sc->freq_tbl = clkdef->freq_tbl; sc->enable_offset = clkdef->enable_offset; sc->enable_shift = clkdef->enable_shift; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/qcom_clk/qcom_clk_branch2.c b/sys/dev/qcom_clk/qcom_clk_branch2.c index 7cbd9a2b73e9..dac9d83ddfbb 100644 --- a/sys/dev/qcom_clk/qcom_clk_branch2.c +++ b/sys/dev/qcom_clk/qcom_clk_branch2.c @@ -1,287 +1,287 @@ /*- * Copyright (c) 2021 Adrian Chadd . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include "qcom_clk_branch2.h" #include "qcom_clk_branch2_reg.h" #include "clkdev_if.h" /* * This is a combination gate/status and dynamic hardware clock gating with * voting. */ #if 0 #define DPRINTF(dev, msg...) device_printf(dev, msg); #else #define DPRINTF(dev, msg...) #endif struct qcom_clk_branch2_sc { struct clknode *clknode; uint32_t flags; uint32_t enable_offset; uint32_t enable_shift; uint32_t hwcg_reg; uint32_t hwcg_bit; uint32_t halt_reg; uint32_t halt_check_type; bool halt_check_voted; }; #if 0 static bool qcom_clk_branch2_get_gate_locked(struct qcom_clk_branch2_sc *sc) { uint32_t reg; CLKDEV_READ_4(clknode_get_device(sc->clknode), sc->enable_offset, ®); DPRINTF(clknode_get_device(sc->clknode), "%s: offset=0x%x, reg=0x%x\n", __func__, sc->enable_offset, reg); return (!! (reg & (1U << sc->enable_shift))); } #endif static int qcom_clk_branch2_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static bool qcom_clk_branch2_in_hwcg_mode_locked(struct qcom_clk_branch2_sc *sc) { uint32_t reg; if (sc->hwcg_reg == 0) return (false); CLKDEV_READ_4(clknode_get_device(sc->clknode), sc->hwcg_reg, ®); return (!! (reg & (1U << sc->hwcg_bit))); } static bool qcom_clk_branch2_check_halt_locked(struct qcom_clk_branch2_sc *sc, bool enable) { uint32_t reg; CLKDEV_READ_4(clknode_get_device(sc->clknode), sc->halt_reg, ®); if (enable) { /* * The upstream Linux code is .. unfortunate. * * Here it says "return true if BRANCH_CLK_OFF is not set, * or if the status field = FSM_STATUS_ON AND * the clk_off field is 0. * * Which .. is weird, because I can't currently see * how we'd ever need to check FSM_STATUS_ON - the only * valid check for the FSM status also requires clk_off=0. */ return !! ((reg & QCOM_CLK_BRANCH2_CLK_OFF) == 0); } else { return !! (reg & QCOM_CLK_BRANCH2_CLK_OFF); } } /* * Check if the given type/voted flag match what is configured. */ static bool qcom_clk_branch2_halt_check_type(struct qcom_clk_branch2_sc *sc, uint32_t type, bool voted) { return ((sc->halt_check_type == type) && (sc->halt_check_voted == voted)); } static bool qcom_clk_branch2_wait_locked(struct qcom_clk_branch2_sc *sc, bool enable) { if (qcom_clk_branch2_halt_check_type(sc, QCOM_CLK_BRANCH2_BRANCH_HALT_SKIP, false)) return (true); if (qcom_clk_branch2_in_hwcg_mode_locked(sc)) return (true); if ((qcom_clk_branch2_halt_check_type(sc, QCOM_CLK_BRANCH2_BRANCH_HALT_DELAY, false)) || (enable == false && sc->halt_check_voted)) { DELAY(10); return (true); } if ((qcom_clk_branch2_halt_check_type(sc, QCOM_CLK_BRANCH2_BRANCH_HALT_INVERTED, false)) || (qcom_clk_branch2_halt_check_type(sc, QCOM_CLK_BRANCH2_BRANCH_HALT, false)) || (enable && sc->halt_check_voted)) { int count; for (count = 0; count < 200; count++) { if (qcom_clk_branch2_check_halt_locked(sc, enable)) return (true); DELAY(1); } DPRINTF(clknode_get_device(sc->clknode), "%s: enable stuck (%d)!\n", __func__, enable); return (false); } /* Default */ return (true); } static int qcom_clk_branch2_set_gate(struct clknode *clk, bool enable) { struct qcom_clk_branch2_sc *sc; uint32_t reg; sc = clknode_get_softc(clk); DPRINTF(clknode_get_device(sc->clknode), "%s: called\n", __func__); if (sc->enable_offset == 0) { DPRINTF(clknode_get_device(sc->clknode), "%s: no enable_offset", __func__); return (ENXIO); } DPRINTF(clknode_get_device(sc->clknode), "%s: called; enable=%d\n", __func__, enable); CLKDEV_DEVICE_LOCK(clknode_get_device(sc->clknode)); CLKDEV_READ_4(clknode_get_device(sc->clknode), sc->enable_offset, ®); if (enable) { reg |= (1U << sc->enable_shift); } else { reg &= ~(1U << sc->enable_shift); } CLKDEV_WRITE_4(clknode_get_device(sc->clknode), sc->enable_offset, reg); /* * Now wait for the clock branch to update! */ if (! qcom_clk_branch2_wait_locked(sc, enable)) { CLKDEV_DEVICE_UNLOCK(clknode_get_device(sc->clknode)); DPRINTF(clknode_get_device(sc->clknode), "%s: failed to wait!\n", __func__); return (ENXIO); } CLKDEV_DEVICE_UNLOCK(clknode_get_device(sc->clknode)); return (0); } static int qcom_clk_branch2_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout, int flags, int *stop) { struct qcom_clk_branch2_sc *sc; sc = clknode_get_softc(clk); /* We only support what our parent clock is currently set as */ *fout = fin; /* .. and stop here if we don't have SET_RATE_PARENT */ if (sc->flags & QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT) *stop = 0; else *stop = 1; return (0); } static clknode_method_t qcom_clk_branch2_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, qcom_clk_branch2_init), CLKNODEMETHOD(clknode_set_gate, qcom_clk_branch2_set_gate), CLKNODEMETHOD(clknode_set_freq, qcom_clk_branch2_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(qcom_clk_branch2, qcom_clk_branch2_class, qcom_clk_branch2_methods, sizeof(struct qcom_clk_branch2_sc), clknode_class); int qcom_clk_branch2_register(struct clkdom *clkdom, struct qcom_clk_branch2_def *clkdef) { struct clknode *clk; struct qcom_clk_branch2_sc *sc; if (clkdef->flags & QCOM_CLK_BRANCH2_FLAGS_CRITICAL) clkdef->clkdef.flags |= CLK_NODE_CANNOT_STOP; clk = clknode_create(clkdom, &qcom_clk_branch2_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->clknode = clk; sc->enable_offset = clkdef->enable_offset; sc->enable_shift = clkdef->enable_shift; sc->halt_reg = clkdef->halt_reg; sc->hwcg_reg = clkdef->hwcg_reg; sc->hwcg_bit = clkdef->hwcg_bit; sc->halt_check_type = clkdef->halt_check_type; sc->halt_check_voted = clkdef->halt_check_voted; sc->flags = clkdef->flags; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/qcom_clk/qcom_clk_fdiv.c b/sys/dev/qcom_clk/qcom_clk_fdiv.c index 6d7e7cfa8bb5..1fb1975446e6 100644 --- a/sys/dev/qcom_clk/qcom_clk_fdiv.c +++ b/sys/dev/qcom_clk/qcom_clk_fdiv.c @@ -1,112 +1,112 @@ /*- * Copyright (c) 2021 Adrian Chadd . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include "qcom_clk_fdiv.h" #include "clkdev_if.h" /* * This is a fixed divisor node. It represents some divisor * that is setup by the boot environment and we don't have * any need for the driver to go and fiddle with. * * It likely should just live in the extres/clk code. */ struct qcom_clk_fdiv_sc { struct clknode *clknode; uint32_t divisor; }; static int qcom_clk_fdiv_recalc(struct clknode *clk, uint64_t *freq) { struct qcom_clk_fdiv_sc *sc; sc = clknode_get_softc(clk); if (freq == NULL || *freq == 0) { printf("%s: called; NULL or 0 frequency\n", __func__); return (ENXIO); } *freq = *freq / sc->divisor; return (0); } static int qcom_clk_fdiv_init(struct clknode *clk, device_t dev) { /* * There's only a single parent here for an fixed divisor, * so just set it to 0; the caller doesn't need to supply it. */ clknode_init_parent_idx(clk, 0); return(0); } static clknode_method_t qcom_clk_fdiv_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, qcom_clk_fdiv_init), CLKNODEMETHOD(clknode_recalc_freq, qcom_clk_fdiv_recalc), CLKNODEMETHOD_END }; DEFINE_CLASS_1(qcom_clk_fepll, qcom_clk_fdiv_class, qcom_clk_fdiv_methods, sizeof(struct qcom_clk_fdiv_sc), clknode_class); int qcom_clk_fdiv_register(struct clkdom *clkdom, struct qcom_clk_fdiv_def *clkdef) { struct clknode *clk; struct qcom_clk_fdiv_sc *sc; clk = clknode_create(clkdom, &qcom_clk_fdiv_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->clknode = clk; sc->divisor = clkdef->divisor; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/qcom_clk/qcom_clk_fepll.c b/sys/dev/qcom_clk/qcom_clk_fepll.c index e50b4823fee5..4704186d91ed 100644 --- a/sys/dev/qcom_clk/qcom_clk_fepll.c +++ b/sys/dev/qcom_clk/qcom_clk_fepll.c @@ -1,150 +1,150 @@ /*- * Copyright (c) 2021 Adrian Chadd . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include "qcom_clk_fepll.h" #include "clkdev_if.h" #if 0 #define DPRINTF(dev, msg...) device_printf(dev, "cpufreq_dt: " msg); #else #define DPRINTF(dev, msg...) #endif /* * This is the top-level PLL clock on the IPQ4018/IPQ4019. * It's a fixed PLL clock that feeds a bunch of divisors into * downstrem FEPLL* and DDR clocks. * * Now, on Linux the clock code creates multiple instances of this * with an inbuilt divisor. Here instead there'll be a single * instance of the FEPLL, and then normal divisors will feed into * the multiple PLL nodes. */ struct qcom_clk_fepll_sc { struct clknode *clknode; uint32_t offset; uint32_t fdbkdiv_shift; /* FDBKDIV base */ uint32_t fdbkdiv_width; /* FDBKDIV width */ uint32_t refclkdiv_shift; /* REFCLKDIV base */ uint32_t refclkdiv_width; /* REFCLKDIV width */ }; static int qcom_clk_fepll_recalc(struct clknode *clk, uint64_t *freq) { struct qcom_clk_fepll_sc *sc; uint64_t vco, parent_rate; uint32_t reg, fdbkdiv, refclkdiv; sc = clknode_get_softc(clk); if (freq == NULL || *freq == 0) { device_printf(clknode_get_device(sc->clknode), "%s: called; NULL or 0 frequency\n", __func__); return (ENXIO); } parent_rate = *freq; CLKDEV_DEVICE_LOCK(clknode_get_device(sc->clknode)); CLKDEV_READ_4(clknode_get_device(sc->clknode), sc->offset, ®); CLKDEV_DEVICE_UNLOCK(clknode_get_device(sc->clknode)); fdbkdiv = (reg >> sc->fdbkdiv_shift) & ((1U << sc->fdbkdiv_width) - 1); refclkdiv = (reg >> sc->refclkdiv_shift) & ((1U << sc->refclkdiv_width) - 1); vco = parent_rate / refclkdiv; vco = vco * 2; vco = vco * fdbkdiv; *freq = vco; return (0); } static int qcom_clk_fepll_init(struct clknode *clk, device_t dev) { /* * There's only a single parent here for an FEPLL, so just set it * to 0; the caller doesn't need to supply it. */ clknode_init_parent_idx(clk, 0); return (0); } static clknode_method_t qcom_clk_fepll_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, qcom_clk_fepll_init), CLKNODEMETHOD(clknode_recalc_freq, qcom_clk_fepll_recalc), CLKNODEMETHOD_END }; DEFINE_CLASS_1(qcom_clk_fepll, qcom_clk_fepll_class, qcom_clk_fepll_methods, sizeof(struct qcom_clk_fepll_sc), clknode_class); int qcom_clk_fepll_register(struct clkdom *clkdom, struct qcom_clk_fepll_def *clkdef) { struct clknode *clk; struct qcom_clk_fepll_sc *sc; clk = clknode_create(clkdom, &qcom_clk_fepll_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->clknode = clk; sc->offset = clkdef->offset; sc->fdbkdiv_shift = clkdef->fdbkdiv_shift; sc->fdbkdiv_width = clkdef->fdbkdiv_width; sc->refclkdiv_shift = clkdef->refclkdiv_shift; sc->refclkdiv_width = clkdef->refclkdiv_width; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/qcom_clk/qcom_clk_rcg2.c b/sys/dev/qcom_clk/qcom_clk_rcg2.c index 03de356aa64d..0407706dd138 100644 --- a/sys/dev/qcom_clk/qcom_clk_rcg2.c +++ b/sys/dev/qcom_clk/qcom_clk_rcg2.c @@ -1,659 +1,659 @@ /*- * Copyright (c) 2021 Adrian Chadd . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include "qcom_clk_freqtbl.h" #include "qcom_clk_rcg2.h" #include "qcom_clk_rcg2_reg.h" #include "clkdev_if.h" #if 0 #define DPRINTF(dev, msg...) device_printf(dev, msg); #else #define DPRINTF(dev, msg...) #endif #define QCOM_CLK_RCG2_CFG_OFFSET(sc) \ ((sc)->cmd_rcgr + (sc)->cfg_offset + QCOM_CLK_RCG2_CFG_REG) #define QCOM_CLK_RCG2_CMD_REGISTER(sc) \ ((sc)->cmd_rcgr + QCOM_CLK_RCG2_CMD_REG) #define QCOM_CLK_RCG2_M_OFFSET(sc) \ ((sc)->cmd_rcgr + (sc)->cfg_offset + QCOM_CLK_RCG2_M_REG) #define QCOM_CLK_RCG2_N_OFFSET(sc) \ ((sc)->cmd_rcgr + (sc)->cfg_offset + QCOM_CLK_RCG2_N_REG) #define QCOM_CLK_RCG2_D_OFFSET(sc) \ ((sc)->cmd_rcgr + (sc)->cfg_offset + QCOM_CLK_RCG2_D_REG) struct qcom_clk_rcg2_sc { struct clknode *clknode; uint32_t cmd_rcgr; uint32_t hid_width; uint32_t mnd_width; int32_t safe_src_idx; uint32_t cfg_offset; int safe_pre_parent_idx; uint32_t flags; const struct qcom_clk_freq_tbl *freq_tbl; }; /* * Finish a clock update. * * This instructs the configuration to take effect. */ static bool qcom_clk_rcg2_update_config_locked(struct qcom_clk_rcg2_sc *sc) { uint32_t reg, count; /* * Send "update" to the controller. */ CLKDEV_READ_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_CMD_REGISTER(sc), ®); reg |= QCOM_CLK_RCG2_CMD_UPDATE; CLKDEV_WRITE_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_CMD_REGISTER(sc), reg); wmb(); /* * Poll for completion of update. */ for (count = 0; count < 1000; count++) { CLKDEV_READ_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_CMD_REGISTER(sc), ®); if ((reg & QCOM_CLK_RCG2_CMD_UPDATE) == 0) { return (true); } DELAY(10); rmb(); } CLKDEV_READ_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_CMD_REGISTER(sc), ®); DPRINTF(clknode_get_device(sc->clknode), "%s: failed; reg=0x%08x\n", __func__, reg); return (false); } /* * Calculate the output frequency given an input frequency and the m/n:d * configuration. */ static uint64_t qcom_clk_rcg2_calc_rate(uint64_t rate, uint32_t mode, uint32_t m, uint32_t n, uint32_t hid_div) { if (hid_div != 0) { rate = rate * 2; rate = rate / (hid_div + 1); } /* Note: assume n is not 0 here; bad things happen if it is */ if (mode != 0) { rate = (rate * m) / n; } return (rate); } /* * The inverse of calc_rate() - calculate the required input frequency * given the desired output freqency and m/n:d configuration. */ static uint64_t qcom_clk_rcg2_calc_input_freq(uint64_t freq, uint32_t m, uint32_t n, uint32_t hid_div) { if (hid_div != 0) { freq = freq / 2; freq = freq * (hid_div + 1); } if (n != 0) { freq = (freq * n) / m; } return (freq); } static int qcom_clk_rcg2_recalc(struct clknode *clk, uint64_t *freq) { struct qcom_clk_rcg2_sc *sc; uint32_t cfg, m = 0, n = 0, hid_div = 0; uint32_t mode = 0, mask; sc = clknode_get_softc(clk); /* Read the MODE, CFG, M and N parameters */ CLKDEV_DEVICE_LOCK(clknode_get_device(sc->clknode)); CLKDEV_READ_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_CFG_OFFSET(sc), &cfg); if (sc->mnd_width != 0) { mask = (1U << sc->mnd_width) - 1; CLKDEV_READ_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_M_OFFSET(sc), &m); CLKDEV_READ_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_N_OFFSET(sc), &n); m = m & mask; n = ~ n; n = n & mask; n = n + m; mode = (cfg & QCOM_CLK_RCG2_CFG_MODE_MASK) >> QCOM_CLK_RCG2_CFG_MODE_SHIFT; } CLKDEV_DEVICE_UNLOCK(clknode_get_device(sc->clknode)); /* Fetch the divisor */ mask = (1U << sc->hid_width) - 1; hid_div = (cfg >> QCOM_CLK_RCG2_CFG_SRC_DIV_SHIFT) & mask; /* Calculate the rate based on the parent rate and config */ *freq = qcom_clk_rcg2_calc_rate(*freq, mode, m, n, hid_div); return (0); } /* * configure the mn:d divisor, pre-divisor, and parent. */ static void qcom_clk_rcg2_set_config_locked(struct qcom_clk_rcg2_sc *sc, const struct qcom_clk_freq_tbl *f, int parent_idx) { uint32_t mask, reg; /* If we have MN:D, then update it */ if (sc->mnd_width != 0 && f->n != 0) { mask = (1U << sc->mnd_width) - 1; CLKDEV_READ_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_M_OFFSET(sc), ®); reg &= ~mask; reg |= (f->m & mask); CLKDEV_WRITE_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_M_OFFSET(sc), reg); CLKDEV_READ_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_N_OFFSET(sc), ®); reg &= ~mask; reg |= ((~(f->n - f->m)) & mask); CLKDEV_WRITE_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_N_OFFSET(sc), reg); CLKDEV_READ_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_D_OFFSET(sc), ®); reg &= ~mask; reg |= ((~f->n) & mask); CLKDEV_WRITE_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_D_OFFSET(sc), reg); } mask = (1U << sc->hid_width) - 1; /* * Mask out register fields we're going to modify along with * the pre-divisor. */ mask |= QCOM_CLK_RCG2_CFG_SRC_SEL_MASK | QCOM_CLK_RCG2_CFG_MODE_MASK | QCOM_CLK_RCG2_CFG_HW_CLK_CTRL_MASK; CLKDEV_READ_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_CFG_OFFSET(sc), ®); reg &= ~mask; /* Configure pre-divisor */ reg = reg | ((f->pre_div) << QCOM_CLK_RCG2_CFG_SRC_DIV_SHIFT); /* Configure parent clock */ reg = reg | (((parent_idx << QCOM_CLK_RCG2_CFG_SRC_SEL_SHIFT) & QCOM_CLK_RCG2_CFG_SRC_SEL_MASK)); /* Configure dual-edge if needed */ if (sc->mnd_width != 0 && f->n != 0 && (f->m != f->n)) reg |= QCOM_CLK_RCG2_CFG_MODE_DUAL_EDGE; CLKDEV_WRITE_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_CFG_OFFSET(sc), reg); } static int qcom_clk_rcg2_init(struct clknode *clk, device_t dev) { struct qcom_clk_rcg2_sc *sc; uint32_t reg; uint32_t idx; bool enabled __unused; sc = clknode_get_softc(clk); /* * Read the mux setting to set the right parent. * Whilst here, read the config to get whether we're enabled * or not. */ CLKDEV_DEVICE_LOCK(clknode_get_device(sc->clknode)); /* check if rcg2 root clock is enabled */ CLKDEV_READ_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_CMD_REGISTER(sc), ®); if (reg & QCOM_CLK_RCG2_CMD_ROOT_OFF) enabled = false; else enabled = true; /* mux settings */ CLKDEV_READ_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_CFG_OFFSET(sc), ®); CLKDEV_DEVICE_UNLOCK(clknode_get_device(sc->clknode)); idx = (reg & QCOM_CLK_RCG2_CFG_SRC_SEL_MASK) >> QCOM_CLK_RCG2_CFG_SRC_SEL_SHIFT; DPRINTF(clknode_get_device(sc->clknode), "%s: mux index %u, enabled=%d\n", __func__, idx, enabled); clknode_init_parent_idx(clk, idx); /* * If we could be sure our parent clocks existed here in the tree, * we could calculate our current frequency by fetching the parent * frequency and then do our divider math. Unfortunately that * currently isn't the case. */ return(0); } static int qcom_clk_rcg2_set_gate(struct clknode *clk, bool enable) { /* * For now this isn't supported; there's some support for * "shared" rcg2 nodes in the Qualcomm/upstream Linux trees but * it's not currently needed for the supported platforms. */ return (0); } /* * Program the parent index. * * This doesn't do the update. It also must be called with the device * lock held. */ static void qcom_clk_rcg2_set_parent_index_locked(struct qcom_clk_rcg2_sc *sc, uint32_t index) { uint32_t reg; CLKDEV_READ_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_CFG_OFFSET(sc), ®); reg = reg & ~QCOM_CLK_RCG2_CFG_SRC_SEL_MASK; reg = reg | (((index << QCOM_CLK_RCG2_CFG_SRC_SEL_SHIFT) & QCOM_CLK_RCG2_CFG_SRC_SEL_MASK)); CLKDEV_WRITE_4(clknode_get_device(sc->clknode), QCOM_CLK_RCG2_CFG_OFFSET(sc), reg); } /* * Set frequency * * fin - the parent frequency, if exists * fout - starts as the requested frequency, ends with the configured * or dry-run frequency * Flags - CLK_SET_DRYRUN, CLK_SET_ROUND_UP, CLK_SET_ROUND_DOWN * retval - 0, ERANGE */ static int qcom_clk_rcg2_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout, int flags, int *stop) { struct qcom_clk_rcg2_sc *sc; const struct qcom_clk_freq_tbl *f; const char **parent_names; uint64_t p_freq, p_clk_freq; int parent_cnt; struct clknode *p_clk; int i; sc = clknode_get_softc(clk); /* * Find a suitable frequency in the frequency table. * * TODO: should pay attention to ROUND_UP / ROUND_DOWN and add * a freqtbl method to handle both accordingly. */ f = qcom_clk_freq_tbl_lookup(sc->freq_tbl, *fout); if (f == NULL) { device_printf(clknode_get_device(sc->clknode), "%s: no suitable freqtbl entry found for freq %llu\n", __func__, *fout); return (ERANGE); } /* * Find the parent index for the given parent clock. * Abort if we can't actually find it. * * XXX TODO: this should be a clk API call! */ parent_cnt = clknode_get_parents_num(clk); parent_names = clknode_get_parent_names(clk); for (i = 0; i < parent_cnt; i++) { if (parent_names[i] == NULL) continue; if (strcmp(parent_names[i], f->parent) == 0) break; } if (i >= parent_cnt) { device_printf(clknode_get_device(sc->clknode), "%s: couldn't find suitable parent?\n", __func__); return (ENXIO); } /* * If we aren't setting the parent clock, then we need * to just program the new parent clock in and update. * (or for DRYRUN just skip that and return the new * frequency.) */ if ((sc->flags & QCOM_CLK_RCG2_FLAGS_SET_RATE_PARENT) == 0) { if (flags & CLK_SET_DRYRUN) { *fout = f->freq; return (0); } if (sc->safe_pre_parent_idx > -1) { DPRINTF(clknode_get_device(sc->clknode), "%s: setting to safe parent idx %d\n", __func__, sc->safe_pre_parent_idx); CLKDEV_DEVICE_LOCK(clknode_get_device(sc->clknode)); qcom_clk_rcg2_set_parent_index_locked(sc, sc->safe_pre_parent_idx); DPRINTF(clknode_get_device(sc->clknode), "%s: safe parent: updating config\n", __func__); if (! qcom_clk_rcg2_update_config_locked(sc)) { CLKDEV_DEVICE_UNLOCK(clknode_get_device(sc->clknode)); DPRINTF(clknode_get_device(sc->clknode), "%s: error updating config\n", __func__); return (ENXIO); } CLKDEV_DEVICE_UNLOCK(clknode_get_device(sc->clknode)); DPRINTF(clknode_get_device(sc->clknode), "%s: safe parent: done\n", __func__); clknode_set_parent_by_idx(sc->clknode, sc->safe_pre_parent_idx); } /* Program parent index, then schedule update */ CLKDEV_DEVICE_LOCK(clknode_get_device(sc->clknode)); qcom_clk_rcg2_set_parent_index_locked(sc, i); if (! qcom_clk_rcg2_update_config_locked(sc)) { CLKDEV_DEVICE_UNLOCK(clknode_get_device(sc->clknode)); device_printf(clknode_get_device(sc->clknode), "%s: couldn't program in parent idx %u!\n", __func__, i); return (ENXIO); } CLKDEV_DEVICE_UNLOCK(clknode_get_device(sc->clknode)); clknode_set_parent_by_idx(sc->clknode, i); *fout = f->freq; return (0); } /* * If we /are/ setting the parent clock, then we need * to determine what frequency we need the parent to * be, and then reconfigure the parent to the new * frequency, and then change our parent. * * (Again, if we're doing DRYRUN, just skip that * and return the new frequency.) */ p_clk = clknode_find_by_name(f->parent); if (p_clk == NULL) { device_printf(clknode_get_device(sc->clknode), "%s: couldn't find parent clk (%s)\n", __func__, f->parent); return (ENXIO); } /* * Calculate required frequency from said parent clock to * meet the needs of our target clock. */ p_freq = qcom_clk_rcg2_calc_input_freq(f->freq, f->m, f->n, f->pre_div); DPRINTF(clknode_get_device(sc->clknode), "%s: request %llu, parent %s freq %llu, parent freq %llu\n", __func__, *fout, f->parent, f->freq, p_freq); /* * To ensure glitch-free operation on some clocks, set it to * a safe parent before programming our divisor and the parent * clock configuration. Then once it's done, flip the parent * to the new parent. * * If we're doing a dry-run then we don't need to re-parent the * clock just yet! */ if (((flags & CLK_SET_DRYRUN) == 0) && (sc->safe_pre_parent_idx > -1)) { DPRINTF(clknode_get_device(sc->clknode), "%s: setting to safe parent idx %d\n", __func__, sc->safe_pre_parent_idx); CLKDEV_DEVICE_LOCK(clknode_get_device(sc->clknode)); qcom_clk_rcg2_set_parent_index_locked(sc, sc->safe_pre_parent_idx); DPRINTF(clknode_get_device(sc->clknode), "%s: safe parent: updating config\n", __func__); if (! qcom_clk_rcg2_update_config_locked(sc)) { CLKDEV_DEVICE_UNLOCK(clknode_get_device(sc->clknode)); DPRINTF(clknode_get_device(sc->clknode), "%s: error updating config\n", __func__); return (ENXIO); } CLKDEV_DEVICE_UNLOCK(clknode_get_device(sc->clknode)); DPRINTF(clknode_get_device(sc->clknode), "%s: safe parent: done\n", __func__); clknode_set_parent_by_idx(sc->clknode, sc->safe_pre_parent_idx); } /* * Set the parent frequency before we change our mux and divisor * configuration. */ if (clknode_get_freq(p_clk, &p_clk_freq) != 0) { device_printf(clknode_get_device(sc->clknode), "%s: couldn't get freq for parent clock %s\n", __func__, f->parent); return (ENXIO); } if (p_clk_freq != p_freq) { uint64_t n_freq; int rv; /* * If we're doing a dryrun then call test_freq() not set_freq(). * That way we get the frequency back that we would be set to. * * If we're not doing a dry run then set the frequency, then * call get_freq to get what it was set to. */ if (flags & CLK_SET_DRYRUN) { n_freq = p_freq; rv = clknode_test_freq(p_clk, n_freq, flags, 0, &p_freq); } else { rv = clknode_set_freq(p_clk, p_freq, flags, 0); } if (rv != 0) { device_printf(clknode_get_device(sc->clknode), "%s: couldn't set parent clock %s frequency to " "%llu\n", __func__, f->parent, p_freq); return (ENXIO); } /* Frequency was set, fetch what it was set to */ if ((flags & CLK_SET_DRYRUN) == 0) { rv = clknode_get_freq(p_clk, &p_freq); if (rv != 0) { device_printf(clknode_get_device(sc->clknode), "%s: couldn't get parent frequency", __func__); return (ENXIO); } } } DPRINTF(clknode_get_device(sc->clknode), "%s: requested freq=%llu, target freq=%llu," " parent choice=%s, parent_freq=%llu\n", __func__, *fout, f->freq, f->parent, p_freq); /* * Set the parent node, the parent programming and the divisor * config. Because they're done together, we don't go via * a mux method on this node. */ /* * Program the divisor and parent. */ if ((flags & CLK_SET_DRYRUN) == 0) { CLKDEV_DEVICE_LOCK(clknode_get_device(sc->clknode)); qcom_clk_rcg2_set_config_locked(sc, f, i); if (! qcom_clk_rcg2_update_config_locked(sc)) { CLKDEV_DEVICE_UNLOCK(clknode_get_device(sc->clknode)); device_printf(clknode_get_device(sc->clknode), "%s: couldn't program in divisor, help!\n", __func__); return (ENXIO); } CLKDEV_DEVICE_UNLOCK(clknode_get_device(sc->clknode)); clknode_set_parent_by_idx(sc->clknode, i); } /* * p_freq is now the frequency that the parent /is/ set to. * (Or would be set to for a dry run.) * * Calculate what the eventual frequency would be, we'll want * this to return when we're done - and again, if it's a dryrun, * don't set anything up. This doesn't rely on the register * contents. */ *fout = qcom_clk_rcg2_calc_rate(p_freq, (f->n == 0 ? 0 : 1), f->m, f->n, f->pre_div); return (0); } static clknode_method_t qcom_clk_rcg2_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, qcom_clk_rcg2_init), CLKNODEMETHOD(clknode_recalc_freq, qcom_clk_rcg2_recalc), CLKNODEMETHOD(clknode_set_gate, qcom_clk_rcg2_set_gate), CLKNODEMETHOD(clknode_set_freq, qcom_clk_rcg2_set_freq), CLKNODEMETHOD_END }; DEFINE_CLASS_1(qcom_clk_fepll, qcom_clk_rcg2_class, qcom_clk_rcg2_methods, sizeof(struct qcom_clk_rcg2_sc), clknode_class); int qcom_clk_rcg2_register(struct clkdom *clkdom, struct qcom_clk_rcg2_def *clkdef) { struct clknode *clk; struct qcom_clk_rcg2_sc *sc; /* * Right now the rcg2 code isn't supporting turning off the clock * or limiting it to the lowest parent clock. But, do set the * flags appropriately. */ if (clkdef->flags & QCOM_CLK_RCG2_FLAGS_CRITICAL) clkdef->clkdef.flags |= CLK_NODE_CANNOT_STOP; clk = clknode_create(clkdom, &qcom_clk_rcg2_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->clknode = clk; sc->cmd_rcgr = clkdef->cmd_rcgr; sc->hid_width = clkdef->hid_width; sc->mnd_width = clkdef->mnd_width; sc->safe_src_idx = clkdef->safe_src_idx; sc->safe_pre_parent_idx = clkdef->safe_pre_parent_idx; sc->cfg_offset = clkdef->cfg_offset; sc->flags = clkdef->flags; sc->freq_tbl = clkdef->freq_tbl; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/qcom_clk/qcom_clk_ro_div.c b/sys/dev/qcom_clk/qcom_clk_ro_div.c index 72712119faf7..f6cb14192bfb 100644 --- a/sys/dev/qcom_clk/qcom_clk_ro_div.c +++ b/sys/dev/qcom_clk/qcom_clk_ro_div.c @@ -1,150 +1,150 @@ /*- * Copyright (c) 2021 Adrian Chadd . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include "qcom_clk_ro_div.h" #include "clkdev_if.h" #if 0 #define DPRINTF(dev, msg...) device_printf(dev, "cpufreq_dt: " msg); #else #define DPRINTF(dev, msg...) #endif /* * This is a read-only divisor table node. * It represents some divisor that is setup by the boot environment * and we don't have any need for the driver to go and fiddle with. * * It likely should just live in the extres/clk code. */ struct qcom_clk_ro_div_sc { struct clknode *clknode; uint32_t offset; uint32_t shift; uint32_t width; struct qcom_clk_ro_div_tbl *div_tbl; }; static int qcom_clk_ro_div_recalc(struct clknode *clk, uint64_t *freq) { struct qcom_clk_ro_div_sc *sc; uint32_t reg, idx, div = 1; int i; sc = clknode_get_softc(clk); if (freq == NULL || *freq == 0) { printf("%s: called; NULL or 0 frequency\n", __func__); return (ENXIO); } CLKDEV_DEVICE_LOCK(clknode_get_device(sc->clknode)); CLKDEV_READ_4(clknode_get_device(sc->clknode), sc->offset, ®); CLKDEV_DEVICE_UNLOCK(clknode_get_device(sc->clknode)); idx = (reg >> sc->shift) & ((1U << sc->width) - 1); for (i = 0; (sc->div_tbl[i].div != 0); i++) { if (idx == sc->div_tbl[i].val) { div = sc->div_tbl[i].div; break; } } DPRINTF(clknode_get_device(sc->clknode), "%s: freq=%llu, idx=%u, div=%u, out_freq=%llu\n", __func__, *freq, idx, div, *freq / div); *freq = *freq / div; return (0); } static int qcom_clk_ro_div_init(struct clknode *clk, device_t dev) { /* * There's only a single parent here for this divisor, * so just set it to 0; the caller doesn't need to supply it. */ clknode_init_parent_idx(clk, 0); return (0); } static clknode_method_t qcom_clk_ro_div_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, qcom_clk_ro_div_init), CLKNODEMETHOD(clknode_recalc_freq, qcom_clk_ro_div_recalc), CLKNODEMETHOD_END }; DEFINE_CLASS_1(qcom_clk_fepll, qcom_clk_ro_div_class, qcom_clk_ro_div_methods, sizeof(struct qcom_clk_ro_div_sc), clknode_class); int qcom_clk_ro_div_register(struct clkdom *clkdom, struct qcom_clk_ro_div_def *clkdef) { struct clknode *clk; struct qcom_clk_ro_div_sc *sc; clk = clknode_create(clkdom, &qcom_clk_ro_div_class, &clkdef->clkdef); if (clk == NULL) return (1); sc = clknode_get_softc(clk); sc->clknode = clk; sc->offset = clkdef->offset; sc->shift = clkdef->shift; sc->width = clkdef->width; sc->div_tbl = clkdef->div_tbl; clknode_register(clkdom, clk); return (0); } diff --git a/sys/dev/qcom_dwc3/qcom_dwc3.c b/sys/dev/qcom_dwc3/qcom_dwc3.c index 73034c914eeb..d9f01e714867 100644 --- a/sys/dev/qcom_dwc3/qcom_dwc3.c +++ b/sys/dev/qcom_dwc3/qcom_dwc3.c @@ -1,174 +1,174 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Adrian Chadd * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Qualcomm DWC3 glue */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include static struct ofw_compat_data compat_data[] = { { "qcom,dwc3", 1}, { NULL, 0 } }; struct qcom_dwc3_softc { struct simplebus_softc sc; device_t dev; clk_t clk_master; clk_t clk_sleep; clk_t clk_mock_utmi; int type; }; static int qcom_dwc3_probe(device_t dev) { phandle_t node; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); /* Binding says that we need a child node for the actual dwc3 controller */ node = ofw_bus_get_node(dev); if (OF_child(node) <= 0) return (ENXIO); device_set_desc(dev, "Qualcomm DWC3"); return (BUS_PROBE_DEFAULT); } static int qcom_dwc3_attach(device_t dev) { struct qcom_dwc3_softc *sc; device_t cdev; phandle_t node, child; int err; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; /* Mandatory clocks */ if (clk_get_by_ofw_name(dev, 0, "master", &sc->clk_master) != 0) { device_printf(dev, "Cannot get master clock\n"); return (ENXIO); } if (clk_get_by_ofw_name(dev, 0, "sleep", &sc->clk_sleep) != 0) { device_printf(dev, "Cannot get sleep clock\n"); return (ENXIO); } if (clk_get_by_ofw_name(dev, 0, "mock_utmi", &sc->clk_mock_utmi) != 0) { device_printf(dev, "Cannot get mock_utmi clock\n"); return (ENXIO); } /* * TODO: when we support optional reset blocks, take things * out of reset (well, put them into reset, then take out of reset.) */ /* * Now, iterate over the clocks and enable them. */ err = clk_enable(sc->clk_master); if (err != 0) { device_printf(dev, "Could not enable clock %s\n", clk_get_name(sc->clk_master)); return (ENXIO); } err = clk_enable(sc->clk_sleep); if (err != 0) { device_printf(dev, "Could not enable clock %s\n", clk_get_name(sc->clk_sleep)); return (ENXIO); } err = clk_enable(sc->clk_mock_utmi); if (err != 0) { device_printf(dev, "Could not enable clock %s\n", clk_get_name(sc->clk_mock_utmi)); return (ENXIO); } /* * Rest is glue code. */ simplebus_init(dev, node); if (simplebus_fill_ranges(node, &sc->sc) < 0) { device_printf(dev, "could not get ranges\n"); return (ENXIO); } for (child = OF_child(node); child > 0; child = OF_peer(child)) { cdev = simplebus_add_device(dev, child, 0, NULL, -1, NULL); if (cdev != NULL) device_probe_and_attach(cdev); } return (bus_generic_attach(dev)); } static device_method_t qcom_dwc3_methods[] = { /* Device interface */ DEVMETHOD(device_probe, qcom_dwc3_probe), DEVMETHOD(device_attach, qcom_dwc3_attach), /* XXX TODO suspend */ /* XXX TODO resume */ DEVMETHOD_END }; DEFINE_CLASS_1(qcom_dwc3, qcom_dwc3_driver, qcom_dwc3_methods, sizeof(struct qcom_dwc3_softc), simplebus_driver); DRIVER_MODULE(qcom_dwc3, simplebus, qcom_dwc3_driver, 0, 0); diff --git a/sys/dev/qcom_gcc/qcom_gcc_ipq4018_clock.c b/sys/dev/qcom_gcc/qcom_gcc_ipq4018_clock.c index b49053dc96d0..6441cf3e6ae5 100644 --- a/sys/dev/qcom_gcc/qcom_gcc_ipq4018_clock.c +++ b/sys/dev/qcom_gcc/qcom_gcc_ipq4018_clock.c @@ -1,767 +1,767 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021, Adrian Chadd * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for Qualcomm IPQ4018 clock and reset device */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include #include #include #include #include #include #include #include #include "qcom_gcc_ipq4018_var.h" /* Fixed rate clock. */ #define F_RATE(_id, cname, _freq) \ { \ .clkdef.id = _id, \ .clkdef.name = cname, \ .clkdef.parent_names = NULL, \ .clkdef.parent_cnt = 0, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .freq = _freq, \ } /* Linked clock. */ #define F_LINK(_id, _cname) \ { \ .clkdef.id = _id, \ .clkdef.name = _cname, \ .clkdef.parent_names = NULL, \ .clkdef.parent_cnt = 0, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ } /* FEPLL clock */ #define F_FEPLL(_id, _cname, _parent, _reg, _fs, _fw, _rs, _rw) \ { \ .clkdef.id = _id, \ .clkdef.name = _cname, \ .clkdef.parent_names = (const char *[]){_parent}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = _reg, \ .fdbkdiv_shift = _fs, \ .fdbkdiv_width = _fw, \ .refclkdiv_shift = _rs, \ .refclkdiv_width = _rw, \ } /* Fixed divisor clock */ #define F_FDIV(_id, _cname, _parent, _divisor) \ { \ .clkdef.id = _id, \ .clkdef.name = _cname, \ .clkdef.parent_names = (const char *[]){_parent}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .divisor = _divisor, \ } /* APSS DIV clock */ #define F_APSSDIV(_id, _cname, _parent, _doffset, _dshift, _dwidth, \ _eoffset, _eshift, _freqtbl) \ { \ .clkdef.id = _id, \ .clkdef.name = _cname, \ .clkdef.parent_names = (const char *[]){_parent}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .div_offset = _doffset, \ .div_width = _dwidth, \ .div_shift = _dshift, \ .enable_offset = _eoffset, \ .enable_shift = _eshift, \ .freq_tbl = _freqtbl, \ } /* read-only div table */ #define F_RO_DIV(_id, _cname, _parent, _offset, _shift, _width, _tbl) \ { \ .clkdef.id = _id, \ .clkdef.name = _cname, \ .clkdef.parent_names = (const char *[]){_parent}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .offset = _offset, \ .width = _width, \ .shift = _shift, \ .div_tbl = _tbl, \ } /* RCG2 clock */ #define F_RCG2(_id, _cname, _parents, _rcgr, _hid_width, _mnd_width, \ _safe_src_idx, _safe_pre_parent_idx, _cfg_offset, _flags, \ _freq_tbl) \ { \ .clkdef.id = _id, \ .clkdef.name = _cname, \ .clkdef.parent_names = _parents, \ .clkdef.parent_cnt = nitems(_parents), \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .cmd_rcgr = _rcgr, \ .hid_width = _hid_width, \ .mnd_width = _mnd_width, \ .safe_src_idx = _safe_src_idx, \ .flags= _flags, \ .safe_pre_parent_idx = _safe_pre_parent_idx, \ .freq_tbl = _freq_tbl, \ } /* branch2 gate nodes */ #define F_BRANCH2(_id, _cname, _parent, _eo, _es, _hr, _hs, _haltreg, \ _type, _voted, _flags) \ { \ .clkdef.id = _id, \ .clkdef.name = _cname, \ .clkdef.parent_names = (const char *[]){_parent}, \ .clkdef.parent_cnt = 1, \ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \ .enable_offset = _eo, \ .enable_shift = _es, \ .hwcg_reg = _hr, \ .hwcg_bit = _hs, \ .halt_reg = _haltreg, \ .halt_check_type = _type, \ .halt_check_voted = _voted, \ .flags = _flags, \ } /* * Fixed "gcc_fepll_vco" PLL derived sources: * * P_FEPLL125 - 125MHz * P_FEPLL125DLY - 125MHz * P_FEPLL200 - 200MHz * "fepll500" - 500MHz * * Fixed "gcc_apps_ddrpll_vco" PLL derived sources: * * P_DDRPLL - 192MHz */ static struct qcom_clk_fdiv_def fdiv_tbl[] = { F_FDIV(GCC_FEPLL125_CLK, "fepll125", "gcc_fepll_vco", 32), F_FDIV(GCC_FEPLL125DLY_CLK, "fepll125dly", "gcc_fepll_vco", 32), F_FDIV(GCC_FEPLL200_CLK, "fepll200", "gcc_fepll_vco", 20), F_FDIV(GCC_FEPLL500_CLK, "fepll500", "gcc_fepll_vco", 8), F_FDIV(GCC_SDCC_PLLDIV_CLK, "ddrpllsdcc", "gcc_apps_ddrpll_vco", 28), }; /* * FEPLL - 48MHz (xo) input, 4GHz output * DDRPLL - 48MHz (xo) input, 5.376GHz output */ static struct qcom_clk_fepll_def fepll_tbl[] = { F_FEPLL(GCC_FEPLL_VCO, "gcc_fepll_vco", "xo", 0x2f020, 16, 8, 24, 5), F_FEPLL(GCC_APSS_DDRPLL_VCO, "gcc_apps_ddrpll_vco", "xo", 0x2e020, 16, 8, 24, 5), }; /* * Frequency table for the APSS PLL/DIV path for the CPU frequency. * * Note - the APSS DIV code only needs the frequency and pre-divisor, * not the other fields. */ static struct qcom_clk_freq_tbl apss_freq_tbl[] = { { 384000000, "gcc_apps_ddrpll_vco", 0xd, 0, 0 }, { 413000000, "gcc_apps_ddrpll_vco", 0xc, 0, 0 }, { 448000000, "gcc_apps_ddrpll_vco", 0xb, 0, 0 }, { 488000000, "gcc_apps_ddrpll_vco", 0xa, 0, 0 }, { 512000000, "gcc_apps_ddrpll_vco", 0x9, 0, 0 }, { 537000000, "gcc_apps_ddrpll_vco", 0x8, 0, 0 }, { 565000000, "gcc_apps_ddrpll_vco", 0x7, 0, 0 }, { 597000000, "gcc_apps_ddrpll_vco", 0x6, 0, 0 }, { 632000000, "gcc_apps_ddrpll_vco", 0x5, 0, 0 }, { 672000000, "gcc_apps_ddrpll_vco", 0x4, 0, 0 }, { 716000000, "gcc_apps_ddrpll_vco", 0x3, 0, 0 }, { 768000000, "gcc_apps_ddrpll_vco", 0x2, 0, 0 }, { 823000000, "gcc_apps_ddrpll_vco", 0x1, 0, 0 }, { 896000000, "gcc_apps_ddrpll_vco", 0x0, 0, 0 }, { 0, } }; /* * APSS div/gate */ static struct qcom_clk_apssdiv_def apssdiv_tbl[] = { F_APSSDIV(GCC_APSS_CPU_PLLDIV_CLK, "ddrpllapss", "gcc_apps_ddrpll_vco", 0x2e020, 4, 4, 0x2e000, 0, &apss_freq_tbl[0]), }; /* * Parent clocks for the apps_clk_src clock. */ static const char * apps_clk_src_parents[] = { "xo", "ddrpllapss", "fepll500", "fepll200" }; /* * Parents lists for a variety of blocks. */ static const char * gcc_xo_200_parents[] = { "xo", "fepll200" }; static const char * gcc_xo_200_500_parents[] = { "xo", "fepll200", "fepll500" }; static const char * gcc_xo_200_spi_parents[] = { "xo", NULL, "fepll200" }; static const char * gcc_xo_sdcc1_500_parents[] = { "xo", "ddrpllsdcc", "fepll500" }; static const char * gcc_xo_125_dly_parents[] = { "xo", "fepll125dly" }; static const char * gcc_xo_wcss2g_parents[] = { "xo", "fepllwcss2g" }; static const char * gcc_xo_wcss5g_parents[] = { "xo", "fepllwcss5g" }; static struct qcom_clk_freq_tbl apps_clk_src_freq_tbl[] = { { 48000000, "xo", 1, 0, 0 }, { 200000000, "fepll200", 1, 0, 0 }, { 384000000, "ddrpllapss", 1, 0, 0 }, { 413000000, "ddrpllapss", 1, 0, 0 }, { 448000000, "ddrpllapss", 1, 0, 0 }, { 488000000, "ddrpllapss", 1, 0, 0 }, { 500000000, "fepll500", 1, 0, 0 }, { 512000000, "ddrpllapss", 1, 0, 0 }, { 537000000, "ddrpllapss", 1, 0, 0 }, { 565000000, "ddrpllapss", 1, 0, 0 }, { 597000000, "ddrpllapss", 1, 0, 0 }, { 632000000, "ddrpllapss", 1, 0, 0 }, { 672000000, "ddrpllapss", 1, 0, 0 }, { 716000000, "ddrpllapss", 1, 0, 0 }, { 0,} }; static struct qcom_clk_freq_tbl audio_clk_src_freq_tbl[] = { { 48000000, "xo", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 0, 0 }, { 200000000, "fepll200", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 0, 0 }, { 0,} }; static struct qcom_clk_freq_tbl blsp1_qup1_i2c_apps_clk_src_freq_tbl[] = { { 19050000, "fepll200", QCOM_CLK_FREQTBL_PREDIV_RCG2(10.5), 1, 1 }, { 0,} }; static struct qcom_clk_freq_tbl blsp1_qup1_spi_apps_clk_src_freq_tbl[] = { { 960000, "xo", QCOM_CLK_FREQTBL_PREDIV_RCG2(12), 1, 4 }, { 4800000, "xo", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 1, 10 }, { 9600000, "xo", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 1, 5 }, { 15000000, "xo", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 1, 3 }, { 19200000, "xo", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 2, 5 }, { 24000000, "xo", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 1, 2 }, { 48000000, "xo", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 0, 0 }, { 0,} }; static struct qcom_clk_freq_tbl gcc_pcnoc_ahb_clk_src_freq_tbl[] = { { 48000000, "xo", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 0, 0 }, { 100000000, "xo", QCOM_CLK_FREQTBL_PREDIV_RCG2(2), 0, 0 }, { 0, } }; static struct qcom_clk_freq_tbl blsp1_uart1_apps_clk_src_freq_tbl[] = { { 1843200, "fepll200", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 144, 15625 }, { 3686400, "fepll200", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 288, 15625 }, { 7372800, "fepll200", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 576, 15625 }, { 14745600, "fepll200", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 1152, 15625 }, { 16000000, "fepll200", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 2, 25 }, { 24000000, "xo", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 1, 2 }, { 32000000, "fepll200", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 4, 25 }, { 40000000, "fepll200", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 1, 5 }, { 46400000, "fepll200", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 29, 125 }, { 48000000, "xo", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 0, 0 }, { 0, } }; static struct qcom_clk_freq_tbl gp1_clk_src_freq_tbl[] = { { 1250000, "fepll200", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 16, 0 }, { 2500000, "fepll200", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 8, 0 }, { 5000000, "fepll200", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 4, 0 }, { 0, } }; static struct qcom_clk_freq_tbl sdcc1_apps_clk_src_freq_tbl[] = { { 144000, "xo", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 3, 240 }, { 400000, "xo", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 1, 0 }, { 20000000, "fepll500", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 1, 25 }, { 25000000, "fepll500", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 1, 20 }, { 50000000, "fepll500", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 1, 10 }, { 100000000, "fepll500", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 1, 5 }, { 192000000, "ddrpllsdcc", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 0, 0 }, { 0, } }; static struct qcom_clk_freq_tbl apps_ahb_clk_src_freq_tbl[] = { { 48000000, "xo", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 0, 0 }, { 100000000, "fepll200", QCOM_CLK_FREQTBL_PREDIV_RCG2(2), 0, 0 }, { 0, } }; static struct qcom_clk_freq_tbl usb30_mock_utmi_clk_src_freq_tbl[] = { { 2000000, "fepll200", QCOM_CLK_FREQTBL_PREDIV_RCG2(10), 0, 0 }, { 0, } }; static struct qcom_clk_freq_tbl fephy_125m_dly_clk_src_freq_tbl[] = { { 125000000, "fepll125dly", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 0, 0 }, { 0, } }; static struct qcom_clk_freq_tbl wcss2g_clk_src_freq_tbl[] = { { 48000000, "xo", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 0, 0 }, { 250000000, "fepllwcss2g", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 0, 0 }, { 0, } }; static struct qcom_clk_freq_tbl wcss5g_clk_src_freq_tbl[] = { { 48000000, "xo", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 0, 0 }, { 250000000, "fepllwcss5g", QCOM_CLK_FREQTBL_PREDIV_RCG2(1), 0, 0 }, { 0, } }; /* * Divisor table for the 2g/5g wifi clock divisors. */ static struct qcom_clk_ro_div_tbl fepllwcss_clk_div_tbl[] = { { 0, 15 }, { 1, 16 }, { 2, 18 }, { 3, 20 }, { 0, 0 } }; /* * Read-only divisor table clocks. */ static struct qcom_clk_ro_div_def ro_div_tbl[] = { F_RO_DIV(GCC_FEPLL_WCSS2G_CLK, "fepllwcss2g", "gcc_fepll_vco", 0x2f020, 8, 2, &fepllwcss_clk_div_tbl[0]), F_RO_DIV(GCC_FEPLL_WCSS5G_CLK, "fepllwcss5g", "gcc_fepll_vco", 0x2f020, 12, 2, &fepllwcss_clk_div_tbl[0]), }; /* * RCG2 clocks */ static struct qcom_clk_rcg2_def rcg2_tbl[] = { F_RCG2(AUDIO_CLK_SRC, "audio_clk_src", gcc_xo_200_parents, 0x1b000, 5, 0, -1, -1, 0, 0, &audio_clk_src_freq_tbl[0]), F_RCG2(BLSP1_QUP1_I2C_APPS_CLK_SRC, "blsp1_qup1_i2c_apps_clk_src", gcc_xo_200_parents, 0x200c, 5, 0, -1, -1, 0, 0, &blsp1_qup1_i2c_apps_clk_src_freq_tbl[0]), F_RCG2(BLSP1_QUP2_I2C_APPS_CLK_SRC, "blsp1_qup2_i2c_apps_clk_src", gcc_xo_200_parents, 0x3000, 5, 0, -1, -1, 0, 0, &blsp1_qup1_i2c_apps_clk_src_freq_tbl[0]), F_RCG2(BLSP1_QUP1_SPI_APPS_CLK_SRC, "blsp1_qup1_spi_apps_clk_src", gcc_xo_200_spi_parents, 0x2024, 5, 8, -1, -1, 0, 0, &blsp1_qup1_spi_apps_clk_src_freq_tbl[0]), F_RCG2(BLSP1_QUP2_SPI_APPS_CLK_SRC, "blsp1_qup2_spi_apps_clk_src", gcc_xo_200_spi_parents, 0x3014, 5, 8, -1, -1, 0, 0, &blsp1_qup1_spi_apps_clk_src_freq_tbl[0]), F_RCG2(BLSP1_UART1_APPS_CLK_SRC, "blsp1_uart1_apps_clk_src", gcc_xo_200_spi_parents, 0x2044, 5, 16, -1, -1, 0, 0, &blsp1_uart1_apps_clk_src_freq_tbl[0]), F_RCG2(BLSP1_UART2_APPS_CLK_SRC, "blsp1_uart2_apps_clk_src", gcc_xo_200_spi_parents, 0x3034, 5, 16, -1, -1, 0, 0, &blsp1_uart1_apps_clk_src_freq_tbl[0]), F_RCG2(GP1_CLK_SRC, "gp1_clk_src", gcc_xo_200_parents, 0x8004, 5, 8, -1, -1, 0, 0, &gp1_clk_src_freq_tbl[0]), F_RCG2(GP2_CLK_SRC, "gp2_clk_src", gcc_xo_200_parents, 0x9004, 5, 8, -1, -1, 0, 0, &gp1_clk_src_freq_tbl[0]), F_RCG2(GP3_CLK_SRC, "gp3_clk_src", gcc_xo_200_parents, 0xa004, 5, 8, -1, -1, 0, 0, &gp1_clk_src_freq_tbl[0]), F_RCG2(SDCC1_APPS_CLK_SRC, "sdcc1_apps_clk_src", gcc_xo_sdcc1_500_parents, 0x18004, 5, 0, -1, -1, 0, 0, &sdcc1_apps_clk_src_freq_tbl[0]), F_RCG2(GCC_APPS_CLK_SRC, "apps_clk_src", apps_clk_src_parents, 0x1900c, 5, 0, -1, 2, 0, QCOM_CLK_RCG2_FLAGS_SET_RATE_PARENT, &apps_clk_src_freq_tbl[0]), F_RCG2(GCC_APPS_AHB_CLK_SRC, "apps_ahb_clk_src", gcc_xo_200_500_parents, 0x19014, 5, 0, -1, -1, 0, 0, &apps_ahb_clk_src_freq_tbl[0]), F_RCG2(GCC_USB3_MOCK_UTMI_CLK_SRC, "usb30_mock_utmi_clk_src", gcc_xo_200_parents, 0x1e000, 5, 0, -1, -1, 0, 0, &usb30_mock_utmi_clk_src_freq_tbl[0]), F_RCG2(FEPHY_125M_DLY_CLK_SRC, "fephy_125m_dly_clk_src", gcc_xo_125_dly_parents, 0x12000, 5, 0, -1, -1, 0, 0, &fephy_125m_dly_clk_src_freq_tbl[0]), F_RCG2(WCSS2G_CLK_SRC, "wcss2g_clk_src", gcc_xo_wcss2g_parents, 0x1f000, 5, 0, -1, -1, 0, 0, &wcss2g_clk_src_freq_tbl[0]), F_RCG2(WCSS5G_CLK_SRC, "wcss5g_clk_src", gcc_xo_wcss5g_parents, 0x20000, 5, 0, -1, -1, 0, 0, &wcss5g_clk_src_freq_tbl[0]), F_RCG2(GCC_PCNOC_AHB_CLK_SRC, "gcc_pcnoc_ahb_clk_src", gcc_xo_200_500_parents, 0x21024, 5, 0, -1, -1, 0, 0, &gcc_pcnoc_ahb_clk_src_freq_tbl[0]), }; /* * branch2 clocks */ static struct qcom_clk_branch2_def branch2_tbl[] = { F_BRANCH2(GCC_AUDIO_AHB_CLK, "gcc_audio_ahb_clk", "pcnoc_clk_src", 0x1b010, 0, 0, 0, 0x1b010, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), F_BRANCH2(GCC_AUDIO_PWM_CLK, "gcc_audio_pwm_clk", "audio_clk_src", 0x1b00c, 0, 0, 0, 0x1b00c, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), F_BRANCH2(GCC_BLSP1_QUP1_I2C_APPS_CLK, "gcc_blsp1_qup1_i2c_apps_clk", "blsp1_qup1_i2c_apps_clk_src", 0x2008, 0, 0, 0, 0x2008, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), F_BRANCH2(GCC_BLSP1_QUP2_I2C_APPS_CLK, "gcc_blsp1_qup2_i2c_apps_clk", "blsp1_qup2_i2c_apps_clk_src", 0x3010, 0, 0, 0, 0x3010, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), F_BRANCH2(GCC_BLSP1_QUP1_SPI_APPS_CLK, "gcc_blsp1_qup1_spi_apps_clk", "blsp1_qup1_spi_apps_clk_src", 0x2004, 0, 0, 0, 0x2004, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), F_BRANCH2(GCC_BLSP1_QUP2_SPI_APPS_CLK, "gcc_blsp1_qup2_spi_apps_clk", "blsp1_qup2_spi_apps_clk_src", 0x300c, 0, 0, 0, 0x300c, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), F_BRANCH2(GCC_BLSP1_UART1_APPS_CLK, "gcc_blsp1_uart1_apps_clk", "blsp1_uart1_apps_clk_src", 0x203c, 0, 0, 0, 0x203c, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), F_BRANCH2(GCC_BLSP1_UART2_APPS_CLK, "gcc_blsp1_uart2_apps_clk", "blsp1_uart2_apps_clk_src", 0x302c, 0, 0, 0, 0x302c, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), F_BRANCH2(GCC_GP1_CLK, "gcc_gp1_clk", "gp1_clk_src", 0x8000, 0, 0, 0, 0x8000, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), F_BRANCH2(GCC_GP2_CLK, "gcc_gp2_clk", "gp2_clk_src", 0x9000, 0, 0, 0, 0x9000, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), F_BRANCH2(GCC_GP3_CLK, "gcc_gp3_clk", "gp3_clk_src", 0xa000, 0, 0, 0, 0xa000, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), /* BRANCH_HALT_VOTED; note the different enable/halt */ F_BRANCH2(GCC_APPS_AHB_CLK_SRC, "gcc_apss_ahb_clk", "apps_ahb_clk_src", 0x6000, 14, 0, 0, 0x19004, QCOM_CLK_BRANCH2_BRANCH_HALT, true, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), F_BRANCH2(GCC_BLSP1_AHB_CLK, "gcc_blsp1_ahb_clk", "pcnoc_clk_src", 0x6000, 10, 0, 0, 0x1008, QCOM_CLK_BRANCH2_BRANCH_HALT, true, 0), /* BRANCH_HALT_VOTED */ F_BRANCH2(GCC_DCD_XO_CLK, "gcc_dcd_xo_clk", "xo", 0x2103c, 0, 0, 0, 0x2103c, QCOM_CLK_BRANCH2_BRANCH_HALT, false, 0), F_BRANCH2(GCC_BOOT_ROM_AHB_CLK, "gcc_boot_rom_ahb_clk", "pcnoc_clk_src", 0x1300c, 0, 0, 0, 0x1300c, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), F_BRANCH2(GCC_CRYPTO_AHB_CLK, "gcc_crypto_ahb_clk", "pcnoc_clk_src", 0x6000, 0, 0, 0, 0x16024, QCOM_CLK_BRANCH2_BRANCH_HALT, true, 0), /* BRANCH_HALT_VOTED */ F_BRANCH2(GCC_CRYPTO_AXI_CLK, "gcc_crypto_axi_clk", "fepll125", 0x6000, 1, 0, 0, 0x16020, QCOM_CLK_BRANCH2_BRANCH_HALT, true, 0), /* BRANCH_HALT_VOTED */ F_BRANCH2(GCC_CRYPTO_CLK, "gcc_crypto_clk", "fepll125", 0x6000, 2, 0, 0, 0x1601c, QCOM_CLK_BRANCH2_BRANCH_HALT, true, 0), /* BRANCH_HALT_VOTED */ F_BRANCH2(GCC_ESS_CLK, "gcc_ess_clk", "fephy_125m_dly_clk_src", 0x12010, 0, 0, 0, 0x12010, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), /* BRANCH_HALT_VOTED */ F_BRANCH2(GCC_IMEM_AXI_CLK, "gcc_imem_axi_clk", "fepll200", 0x6000, 17, 0, 0, 0xe004, QCOM_CLK_BRANCH2_BRANCH_HALT, true, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), F_BRANCH2(GCC_IMEM_CFG_AHB_CLK, "gcc_imem_cfg_ahb_clk", "pcnoc_clk_src", 0xe008, 0, 0, 0, 0xe008, QCOM_CLK_BRANCH2_BRANCH_HALT, false, 0), F_BRANCH2(GCC_PCIE_AHB_CLK, "gcc_pcie_ahb_clk", "pcnoc_clk_src", 0x1d00c, 0, 0, 0, 0x1d00c, QCOM_CLK_BRANCH2_BRANCH_HALT, false, 0), F_BRANCH2(GCC_PCIE_AXI_M_CLK, "gcc_pcie_axi_m_clk", "fepll200", 0x1d004, 0, 0, 0, 0x1d004, QCOM_CLK_BRANCH2_BRANCH_HALT, false, 0), F_BRANCH2(GCC_PCIE_AXI_S_CLK, "gcc_pcie_axi_s_clk", "fepll200", 0x1d008, 0, 0, 0, 0x1d008, QCOM_CLK_BRANCH2_BRANCH_HALT, false, 0), F_BRANCH2(GCC_PRNG_AHB_CLK, "gcc_prng_ahb_clk", "pcnoc_clk_src", 0x6000, 8, 0, 0, 0x13004, QCOM_CLK_BRANCH2_BRANCH_HALT, true, 0), /* BRANCH_HALT_VOTED */ F_BRANCH2(GCC_QPIC_AHB_CLK, "gcc_qpic_ahb_clk", "pcnoc_clk_src", 0x1c008, 0, 0, 0, 0x1c008, QCOM_CLK_BRANCH2_BRANCH_HALT, false, 0), F_BRANCH2(GCC_QPIC_CLK, "gcc_qpic_clk", "pcnoc_clk_src", 0x1c004, 0, 0, 0, 0x1c004, QCOM_CLK_BRANCH2_BRANCH_HALT, false, 0), F_BRANCH2(GCC_SDCC1_AHB_CLK, "gcc_sdcc1_ahb_clk", "pcnoc_clk_src", 0x18010, 0, 0, 0, 0x18010, QCOM_CLK_BRANCH2_BRANCH_HALT, false, 0), F_BRANCH2(GCC_SDCC1_APPS_CLK, "gcc_sdcc1_apps_clk", "sdcc1_apps_clk_src", 0x1800c, 0, 0, 0, 0x1800c, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), F_BRANCH2(GCC_TLMM_AHB_CLK, "gcc_tlmm_ahb_clk", "pcnoc_clk_src", 0x6000, 5, 0, 0, 0x5004, QCOM_CLK_BRANCH2_BRANCH_HALT, true, 0), /* BRANCH_HALT_VOTED */ F_BRANCH2(GCC_USB2_MASTER_CLK, "gcc_usb2_master_clk", "pcnoc_clk_src", 0x1e00c, 0, 0, 0, 0x1e00c, QCOM_CLK_BRANCH2_BRANCH_HALT, false, 0), F_BRANCH2(GCC_USB2_SLEEP_CLK, "gcc_usb2_sleep_clk", "gcc_sleep_clk_src", 0x1e010, 0, 0, 0, 0x1e010, QCOM_CLK_BRANCH2_BRANCH_HALT, false, 0), F_BRANCH2(GCC_USB2_MOCK_UTMI_CLK, "gcc_usb2_mock_utmi_clk", "usb30_mock_utmi_clk_src", 0x1e014, 0, 0, 0, 0x1e014, QCOM_CLK_BRANCH2_BRANCH_HALT, false, 0), F_BRANCH2(GCC_USB3_MASTER_CLK, "gcc_usb3_master_clk", "fepll125", 0x1e028, 0, 0, 0, 0x1e028, QCOM_CLK_BRANCH2_BRANCH_HALT, false, 0), F_BRANCH2(GCC_USB3_SLEEP_CLK, "gcc_usb3_sleep_clk", "gcc_sleep_clk_src", 0x1e02c, 0, 0, 0, 0x1e02c, QCOM_CLK_BRANCH2_BRANCH_HALT, false, 0), F_BRANCH2(GCC_USB3_MOCK_UTMI_CLK, "gcc_usb3_mock_utmi_clk", "usb30_mock_utmi_clk_src", 0x1e030, 0, 0, 0, 0x1e030, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), /* Note - yes, these two have the same registers in linux */ F_BRANCH2(GCC_WCSS2G_CLK, "gcc_wcss2g_clk", "wcss2g_clk_src", 0x1f00c, 0, 0, 0, 0x1f00c, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), F_BRANCH2(GCC_WCSS2G_REF_CLK, "gcc_wcss2g_ref_clk", "xo", 0x1f00c, 0, 0, 0, 0x1f00c, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), F_BRANCH2(GCC_WCSS2G_RTC_CLK, "gcc_wcss2g_rtc_clk", "gcc_sleep_clk_src", 0x1f010, 0, 0, 0, 0x1f010, QCOM_CLK_BRANCH2_BRANCH_HALT, false, 0), /* Note - yes, these two have the same registers in linux */ F_BRANCH2(GCC_WCSS5G_CLK, "gcc_wcss5g_clk", "wcss5g_clk_src", 0x1f00c, 0, 0, 0, 0x2000c, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), F_BRANCH2(GCC_WCSS5G_REF_CLK, "gcc_wcss5g_ref_clk", "xo", 0x1f00c, 0, 0, 0, 0x2000c, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), F_BRANCH2(GCC_WCSS5G_RTC_CLK, "gcc_wcss5g_rtc_clk", "gcc_sleep_clk_src", 0x1f010, 0, 0, 0, 0x20010, QCOM_CLK_BRANCH2_BRANCH_HALT, false, 0), F_BRANCH2(GCC_PCNOC_AHB_CLK, "pcnoc_clk_src", "gcc_pcnoc_ahb_clk_src", 0x21030, 0, 0, 0, 0x21030, QCOM_CLK_BRANCH2_BRANCH_HALT, false, QCOM_CLK_BRANCH2_FLAGS_CRITICAL | QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT), }; static void qcom_gcc_ipq4018_clock_init_fepll(struct qcom_gcc_ipq4018_softc *sc) { int i, rv; for (i = 0; i < nitems(fepll_tbl); i++) { rv = qcom_clk_fepll_register(sc->clkdom, fepll_tbl + i); if (rv != 0) panic("qcom_clk_fepll_register failed"); } } static void qcom_gcc_ipq4018_clock_init_fdiv(struct qcom_gcc_ipq4018_softc *sc) { int i, rv; for (i = 0; i < nitems(fdiv_tbl); i++) { rv = qcom_clk_fdiv_register(sc->clkdom, fdiv_tbl + i); if (rv != 0) panic("qcom_clk_fdiv_register failed"); } } static void qcom_gcc_ipq4018_clock_init_apssdiv(struct qcom_gcc_ipq4018_softc *sc) { int i, rv; for (i = 0; i < nitems(apssdiv_tbl); i++) { rv = qcom_clk_apssdiv_register(sc->clkdom, apssdiv_tbl + i); if (rv != 0) panic("qcom_clk_apssdiv_register failed"); } } static void qcom_gcc_ipq4018_clock_init_rcg2(struct qcom_gcc_ipq4018_softc *sc) { int i, rv; for (i = 0; i < nitems(rcg2_tbl); i++) { rv = qcom_clk_rcg2_register(sc->clkdom, rcg2_tbl + i); if (rv != 0) panic("qcom_clk_rcg2_register failed"); } } static void qcom_gcc_ipq4018_clock_init_branch2(struct qcom_gcc_ipq4018_softc *sc) { int i, rv; for (i = 0; i < nitems(branch2_tbl); i++) { rv = qcom_clk_branch2_register(sc->clkdom, branch2_tbl + i); if (rv != 0) panic("qcom_clk_branch2_register failed"); } } static void qcom_gcc_ipq4018_clock_init_ro_div(struct qcom_gcc_ipq4018_softc *sc) { int i, rv; for (i = 0; i < nitems(ro_div_tbl); i++) { rv = qcom_clk_ro_div_register(sc->clkdom, ro_div_tbl + i); if (rv != 0) panic("qcom_clk_ro_div_register failed"); } } int qcom_gcc_ipq4018_clock_read(device_t dev, bus_addr_t addr, uint32_t *val) { struct qcom_gcc_ipq4018_softc *sc; sc = device_get_softc(dev); *val = bus_read_4(sc->reg, addr); return (0); } int qcom_gcc_ipq4018_clock_write(device_t dev, bus_addr_t addr, uint32_t val) { struct qcom_gcc_ipq4018_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->reg, addr, val); return (0); } int qcom_gcc_ipq4018_clock_modify(device_t dev, bus_addr_t addr, uint32_t clear_mask, uint32_t set_mask) { struct qcom_gcc_ipq4018_softc *sc; uint32_t reg; sc = device_get_softc(dev); reg = bus_read_4(sc->reg, addr); reg &= clear_mask; reg |= set_mask; bus_write_4(sc->reg, addr, reg); return (0); } void qcom_gcc_ipq4018_clock_setup(struct qcom_gcc_ipq4018_softc *sc) { sc->clkdom = clkdom_create(sc->dev); /* Setup stuff */ qcom_gcc_ipq4018_clock_init_fepll(sc); qcom_gcc_ipq4018_clock_init_fdiv(sc); qcom_gcc_ipq4018_clock_init_apssdiv(sc); qcom_gcc_ipq4018_clock_init_rcg2(sc); qcom_gcc_ipq4018_clock_init_branch2(sc); qcom_gcc_ipq4018_clock_init_ro_div(sc); /* Finalise clock tree */ clkdom_finit(sc->clkdom); } void qcom_gcc_ipq4018_clock_lock(device_t dev) { struct qcom_gcc_ipq4018_softc *sc; sc = device_get_softc(dev); mtx_lock(&sc->mtx); } void qcom_gcc_ipq4018_clock_unlock(device_t dev) { struct qcom_gcc_ipq4018_softc *sc; sc = device_get_softc(dev); mtx_unlock(&sc->mtx); } diff --git a/sys/dev/qcom_qup/qcom_spi.c b/sys/dev/qcom_qup/qcom_spi.c index 552f01c59bb2..f7163fa1dcab 100644 --- a/sys/dev/qcom_qup/qcom_spi.c +++ b/sys/dev/qcom_qup/qcom_spi.c @@ -1,905 +1,905 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021, Adrian Chadd * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "spibus_if.h" #include #include #include #include static struct ofw_compat_data compat_data[] = { { "qcom,spi-qup-v1.1.1", QCOM_SPI_HW_QPI_V1_1 }, { "qcom,spi-qup-v2.1.1", QCOM_SPI_HW_QPI_V2_1 }, { "qcom,spi-qup-v2.2.1", QCOM_SPI_HW_QPI_V2_2 }, { NULL, 0 } }; /* * Flip the CS GPIO line either active or inactive. * * Actually listen to the CS polarity. */ static void qcom_spi_set_chipsel(struct qcom_spi_softc *sc, int cs, bool active) { bool pinactive; bool invert = !! (cs & SPIBUS_CS_HIGH); cs = cs & ~SPIBUS_CS_HIGH; if (sc->cs_pins[cs] == NULL) { device_printf(sc->sc_dev, "%s: cs=%u, active=%u, invert=%u, no gpio?\n", __func__, cs, active, invert); return; } QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_CHIPSELECT, "%s: cs=%u active=%u\n", __func__, cs, active); /* * Default rule here is CS is active low. */ if (active) pinactive = false; else pinactive = true; /* * Invert the CS line if required. */ if (invert) pinactive = !! pinactive; gpio_pin_set_active(sc->cs_pins[cs], pinactive); gpio_pin_is_active(sc->cs_pins[cs], &pinactive); } static void qcom_spi_intr(void *arg) { struct qcom_spi_softc *sc = arg; int ret; QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_INTR, "%s: called\n", __func__); QCOM_SPI_LOCK(sc); ret = qcom_spi_hw_interrupt_handle(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to read intr status\n"); goto done; } /* * Handle spurious interrupts outside of an actual * transfer. */ if (sc->transfer.active == false) { device_printf(sc->sc_dev, "ERROR: spurious interrupt\n"); qcom_spi_hw_ack_opmode(sc); goto done; } /* Now, handle interrupts */ if (sc->intr.error) { sc->intr.error = false; device_printf(sc->sc_dev, "ERROR: intr\n"); } if (sc->intr.do_rx) { sc->intr.do_rx = false; QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_INTR, "%s: PIO_READ\n", __func__); if (sc->state.transfer_mode == QUP_IO_M_MODE_FIFO) ret = qcom_spi_hw_read_pio_fifo(sc); else ret = qcom_spi_hw_read_pio_block(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_read failed (%u)\n", ret); goto done; } } if (sc->intr.do_tx) { sc->intr.do_tx = false; QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_INTR, "%s: PIO_WRITE\n", __func__); /* * For FIFO operations we do not do a write here, we did * it at the beginning of the transfer. * * For BLOCK operations yes, we call the routine. */ if (sc->state.transfer_mode == QUP_IO_M_MODE_FIFO) ret = qcom_spi_hw_ack_write_pio_fifo(sc); else ret = qcom_spi_hw_write_pio_block(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_write failed (%u)\n", ret); goto done; } } /* * Do this last. We may actually have completed the * transfer in the PIO receive path above and it will * set the done flag here. */ if (sc->intr.done) { sc->intr.done = false; sc->transfer.done = true; QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_INTR, "%s: transfer done\n", __func__); wakeup(sc); } done: QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_INTR, "%s: done\n", __func__); QCOM_SPI_UNLOCK(sc); } static int qcom_spi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Qualcomm SPI Interface"); return (BUS_PROBE_DEFAULT); } /* * Allocate GPIOs if provided in the SPI controller block. * * Some devices will use GPIO lines for chip select. * It's also quite annoying because some devices will want to use * the hardware provided CS gating for say, the first chipselect block, * and then use GPIOs for the later ones. * * So here we just assume for now that SPI index 0 uses the hardware * lines, and >0 use GPIO lines. Revisit this if better hardware * shows up. * * And finally, iterating over the cs-gpios list to allocate GPIOs * doesn't actually tell us what the polarity is. For that we need * to actually iterate over the list of child nodes and check what * their properties are (and look for "spi-cs-high".) */ static void qcom_spi_attach_gpios(struct qcom_spi_softc *sc) { phandle_t node; int idx, err; /* Allocate gpio pins for configured chip selects. */ node = ofw_bus_get_node(sc->sc_dev); for (idx = 0; idx < nitems(sc->cs_pins); idx++) { err = gpio_pin_get_by_ofw_propidx(sc->sc_dev, node, "cs-gpios", idx, &sc->cs_pins[idx]); if (err == 0) { err = gpio_pin_setflags(sc->cs_pins[idx], GPIO_PIN_OUTPUT); if (err != 0) { device_printf(sc->sc_dev, "error configuring gpio for" " cs %u (%d)\n", idx, err); } /* * We can't set this HIGH right now because * we don't know if it needs to be set to * high for inactive or low for inactive * based on the child SPI device flags. */ #if 0 gpio_pin_set_active(sc->cs_pins[idx], 1); gpio_pin_is_active(sc->cs_pins[idx], &tmp); #endif } else { device_printf(sc->sc_dev, "cannot configure gpio for chip select %u\n", idx); sc->cs_pins[idx] = NULL; } } } static void qcom_spi_sysctl_attach(struct qcom_spi_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs"); } static int qcom_spi_attach(device_t dev) { struct qcom_spi_softc *sc = device_get_softc(dev); int rid, ret, i, val; sc->sc_dev = dev; /* * Hardware version is stored in the ofw_compat_data table. */ sc->hw_version = ofw_bus_search_compatible(dev, compat_data)->ocd_data; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "ERROR: Could not map memory\n"); ret = ENXIO; goto error; } rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (!sc->sc_irq_res) { device_printf(dev, "ERROR: Could not map interrupt\n"); ret = ENXIO; goto error; } ret = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, qcom_spi_intr, sc, &sc->sc_irq_h); if (ret != 0) { device_printf(dev, "ERROR: could not configure interrupt " "(%d)\n", ret); goto error; } qcom_spi_attach_gpios(sc); ret = clk_get_by_ofw_name(dev, 0, "core", &sc->clk_core); if (ret != 0) { device_printf(dev, "ERROR: could not get %s clock (%d)\n", "core", ret); goto error; } ret = clk_get_by_ofw_name(dev, 0, "iface", &sc->clk_iface); if (ret != 0) { device_printf(dev, "ERROR: could not get %s clock (%d)\n", "iface", ret); goto error; } /* Bring up initial clocks if they're off */ ret = clk_enable(sc->clk_core); if (ret != 0) { device_printf(dev, "ERROR: couldn't enable core clock (%u)\n", ret); goto error; } ret = clk_enable(sc->clk_iface); if (ret != 0) { device_printf(dev, "ERROR: couldn't enable iface clock (%u)\n", ret); goto error; } /* * Read optional spi-max-frequency */ if (OF_getencprop(ofw_bus_get_node(dev), "spi-max-frequency", &val, sizeof(val)) > 0) sc->config.max_frequency = val; else sc->config.max_frequency = SPI_MAX_RATE; /* * Read optional cs-select */ if (OF_getencprop(ofw_bus_get_node(dev), "cs-select", &val, sizeof(val)) > 0) sc->config.cs_select = val; else sc->config.cs_select = 0; /* * Read optional num-cs */ if (OF_getencprop(ofw_bus_get_node(dev), "num-cs", &val, sizeof(val)) > 0) sc->config.num_cs = val; else sc->config.num_cs = SPI_NUM_CHIPSELECTS; ret = fdt_pinctrl_configure_by_name(dev, "default"); if (ret != 0) { device_printf(dev, "ERROR: could not configure default pinmux\n"); goto error; } ret = qcom_spi_hw_read_controller_transfer_sizes(sc); if (ret != 0) { device_printf(dev, "ERROR: Could not read transfer config\n"); goto error; } device_printf(dev, "BLOCK: input=%u bytes, output=%u bytes\n", sc->config.input_block_size, sc->config.output_block_size); device_printf(dev, "FIFO: input=%u bytes, output=%u bytes\n", sc->config.input_fifo_size, sc->config.output_fifo_size); /* QUP config */ QCOM_SPI_LOCK(sc); ret = qcom_spi_hw_qup_init_locked(sc); if (ret != 0) { device_printf(dev, "ERROR: QUP init failed (%d)\n", ret); QCOM_SPI_UNLOCK(sc); goto error; } /* Initial SPI config */ ret = qcom_spi_hw_spi_init_locked(sc); if (ret != 0) { device_printf(dev, "ERROR: SPI init failed (%d)\n", ret); QCOM_SPI_UNLOCK(sc); goto error; } QCOM_SPI_UNLOCK(sc); sc->spibus = device_add_child(dev, "spibus", -1); /* We're done, so shut down the interface clock for now */ device_printf(dev, "DONE: shutting down interface clock for now\n"); clk_disable(sc->clk_iface); /* Register for debug sysctl */ qcom_spi_sysctl_attach(sc); return (bus_generic_attach(dev)); error: if (sc->sc_irq_h) bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_h); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); if (sc->clk_core) { clk_disable(sc->clk_core); clk_release(sc->clk_core); } if (sc->clk_iface) { clk_disable(sc->clk_iface); clk_release(sc->clk_iface); } for (i = 0; i < CS_MAX; i++) { if (sc->cs_pins[i] != NULL) gpio_pin_release(sc->cs_pins[i]); } mtx_destroy(&sc->sc_mtx); return (ret); } /* * Do a PIO transfer. * * Note that right now the TX/RX lens need to match, I'm not doing * dummy reads / dummy writes as required if they're not the same * size. The QUP hardware supports doing multi-phase transactions * where the FIFO isn't engaged for transmit or receive, but it's * not yet being done here. */ static int qcom_spi_transfer_pio_block(struct qcom_spi_softc *sc, int mode, char *tx_buf, int tx_len, char *rx_buf, int rx_len) { int ret = 0; QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_TRANSFER, "%s: start\n", __func__); if (rx_len != tx_len) { device_printf(sc->sc_dev, "ERROR: tx/rx len doesn't match (%d/%d)\n", tx_len, rx_len); return (ENXIO); } QCOM_SPI_ASSERT_LOCKED(sc); /* * Make initial choices for transfer configuration. */ ret = qcom_spi_hw_setup_transfer_selection(sc, tx_len); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to setup transfer selection (%d)\n", ret); return (ret); } /* Now set suitable buffer/lengths */ sc->transfer.tx_buf = tx_buf; sc->transfer.tx_len = tx_len; sc->transfer.rx_buf = rx_buf; sc->transfer.rx_len = rx_len; sc->transfer.done = false; sc->transfer.active = false; /* * Loop until the full transfer set is done. * * qcom_spi_hw_setup_current_transfer() will take care of * setting a maximum transfer size for the hardware and choose * a suitable operating mode. */ while (sc->transfer.tx_offset < sc->transfer.tx_len) { /* * Set transfer to false early; this covers * it also finishing a sub-transfer and we're * about the put the block into RESET state before * starting a new transfer. */ sc->transfer.active = false; QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_TRANSFER, "%s: tx=%d of %d bytes, rx=%d of %d bytes\n", __func__, sc->transfer.tx_offset, sc->transfer.tx_len, sc->transfer.rx_offset, sc->transfer.rx_len); /* * Set state to RESET before doing anything. * * Otherwise the second sub-transfer that we queue up * will generate interrupts immediately when we start * configuring it here and it'll start underflowing. */ ret = qcom_spi_hw_qup_set_state_locked(sc, QUP_STATE_RESET); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: can't transition to RESET (%u)\n", ret); goto done; } /* blank interrupt state; we'll do a RESET below */ bzero(&sc->intr, sizeof(sc->intr)); sc->transfer.done = false; /* * Configure what the transfer configuration for this * sub-transfer will be. */ ret = qcom_spi_hw_setup_current_transfer(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to setup sub transfer (%d)\n", ret); goto done; } /* * For now since we're configuring up PIO, we only setup * the PIO transfer size. */ ret = qcom_spi_hw_setup_pio_transfer_cnt(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_setup_pio_transfer_cnt failed" " (%u)\n", ret); goto done; } #if 0 /* * This is what we'd do to setup the block transfer sizes. */ ret = qcom_spi_hw_setup_block_transfer_cnt(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_setup_block_transfer_cnt failed" " (%u)\n", ret); goto done; } #endif ret = qcom_spi_hw_setup_io_modes(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_setup_io_modes failed" " (%u)\n", ret); goto done; } ret = qcom_spi_hw_setup_spi_io_clock_polarity(sc, !! (mode & SPIBUS_MODE_CPOL)); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_setup_spi_io_clock_polarity" " failed (%u)\n", ret); goto done; } ret = qcom_spi_hw_setup_spi_config(sc, sc->state.frequency, !! (mode & SPIBUS_MODE_CPHA)); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_setup_spi_config failed" " (%u)\n", ret); goto done; } ret = qcom_spi_hw_setup_qup_config(sc, !! (tx_len > 0), !! (rx_len > 0)); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_setup_qup_config failed" " (%u)\n", ret); goto done; } ret = qcom_spi_hw_setup_operational_mask(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_setup_operational_mask failed" " (%u)\n", ret); goto done; } /* * Setup is done; reset the controller and start the PIO * write. */ /* * Set state to RUN; we may start getting interrupts that * are valid and we need to handle. */ sc->transfer.active = true; ret = qcom_spi_hw_qup_set_state_locked(sc, QUP_STATE_RUN); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: can't transition to RUN (%u)\n", ret); goto done; } /* * Set state to PAUSE */ ret = qcom_spi_hw_qup_set_state_locked(sc, QUP_STATE_PAUSE); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: can't transition to PAUSE (%u)\n", ret); goto done; } /* * If FIFO mode, write data now. Else, we'll get an * interrupt when it's time to populate more data * in BLOCK mode. */ if (sc->state.transfer_mode == QUP_IO_M_MODE_FIFO) ret = qcom_spi_hw_write_pio_fifo(sc); else ret = qcom_spi_hw_write_pio_block(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_write failed (%u)\n", ret); goto done; } /* * Set state to RUN */ ret = qcom_spi_hw_qup_set_state_locked(sc, QUP_STATE_RUN); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: can't transition to RUN (%u)\n", ret); goto done; } /* * Wait for an interrupt notification (which will * continue to drive the state machine for this * sub-transfer) or timeout. */ ret = 0; while (ret == 0 && sc->transfer.done == false) { QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_TRANSFER, "%s: waiting\n", __func__); ret = msleep(sc, &sc->sc_mtx, 0, "qcom_spi", 0); } } done: /* * Complete; put controller into reset. * * Don't worry about return value here; if we errored out above then * we want to communicate that value to the caller. */ (void) qcom_spi_hw_qup_set_state_locked(sc, QUP_STATE_RESET); QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_TRANSFER, "%s: completed\n", __func__); /* * Blank the transfer state so we don't use an old transfer * state in a subsequent interrupt. */ (void) qcom_spi_hw_complete_transfer(sc); sc->transfer.active = false; return (ret); } static int qcom_spi_transfer(device_t dev, device_t child, struct spi_command *cmd) { struct qcom_spi_softc *sc = device_get_softc(dev); uint32_t cs_val, mode_val, clock_val; uint32_t ret = 0; spibus_get_cs(child, &cs_val); spibus_get_clock(child, &clock_val); spibus_get_mode(child, &mode_val); QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_TRANSFER, "%s: called; child cs=0x%08x, clock=%u, mode=0x%08x, " "cmd=%u/%u bytes; data=%u/%u bytes\n", __func__, cs_val, clock_val, mode_val, cmd->tx_cmd_sz, cmd->rx_cmd_sz, cmd->tx_data_sz, cmd->rx_data_sz); QCOM_SPI_LOCK(sc); /* * wait until the controller isn't busy */ while (sc->sc_busy == true) mtx_sleep(sc, &sc->sc_mtx, 0, "qcom_spi_wait", 0); /* * it's ours now! */ sc->sc_busy = true; sc->state.cs_high = !! (cs_val & SPIBUS_CS_HIGH); sc->state.frequency = clock_val; /* * We can't set the clock frequency and enable it * with the driver lock held, as the SPI lock is non-sleepable * and the clock framework is sleepable. * * No other transaction is going on right now, so we can * unlock here and do the clock related work. */ QCOM_SPI_UNLOCK(sc); /* * Set the clock frequency */ ret = clk_set_freq(sc->clk_iface, sc->state.frequency, 0); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to set frequency to %u\n", sc->state.frequency); goto done2; } clk_enable(sc->clk_iface); QCOM_SPI_LOCK(sc); /* * Set state to RESET */ ret = qcom_spi_hw_qup_set_state_locked(sc, QUP_STATE_RESET); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: can't transition to RESET (%u)\n", ret); goto done; } /* Assert hardware CS if set, else GPIO */ if (sc->cs_pins[cs_val & ~SPIBUS_CS_HIGH] == NULL) qcom_spi_hw_spi_cs_force(sc, cs_val & SPIBUS_CS_HIGH, true); else qcom_spi_set_chipsel(sc, cs_val & ~SPIBUS_CS_HIGH, true); /* * cmd buffer transfer */ ret = qcom_spi_transfer_pio_block(sc, mode_val, cmd->tx_cmd, cmd->tx_cmd_sz, cmd->rx_cmd, cmd->rx_cmd_sz); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to transfer cmd payload (%u)\n", ret); goto done; } /* * data buffer transfer */ if (cmd->tx_data_sz > 0) { ret = qcom_spi_transfer_pio_block(sc, mode_val, cmd->tx_data, cmd->tx_data_sz, cmd->rx_data, cmd->rx_data_sz); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to transfer data payload (%u)\n", ret); goto done; } } done: /* De-assert GPIO/CS */ if (sc->cs_pins[cs_val & ~SPIBUS_CS_HIGH] == NULL) qcom_spi_hw_spi_cs_force(sc, cs_val & ~SPIBUS_CS_HIGH, false); else qcom_spi_set_chipsel(sc, cs_val & ~SPIBUS_CS_HIGH, false); /* * Similarly to when we enabled the clock, we can't hold it here * across a clk API as that's a sleep lock and we're non-sleepable. * So instead we unlock/relock here, but we still hold the busy flag. */ QCOM_SPI_UNLOCK(sc); clk_disable(sc->clk_iface); QCOM_SPI_LOCK(sc); done2: /* * We're done; so mark the bus as not busy and wakeup * the next caller. */ sc->sc_busy = false; wakeup_one(sc); QCOM_SPI_UNLOCK(sc); return (ret); } static int qcom_spi_detach(device_t dev) { struct qcom_spi_softc *sc = device_get_softc(dev); int i; bus_generic_detach(sc->sc_dev); if (sc->spibus != NULL) device_delete_child(dev, sc->spibus); if (sc->sc_irq_h) bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_h); if (sc->clk_iface) { clk_disable(sc->clk_iface); clk_release(sc->clk_iface); } if (sc->clk_core) { clk_disable(sc->clk_core); clk_release(sc->clk_core); } for (i = 0; i < CS_MAX; i++) { if (sc->cs_pins[i] != NULL) gpio_pin_release(sc->cs_pins[i]); } if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); mtx_destroy(&sc->sc_mtx); return (0); } static phandle_t qcom_spi_get_node(device_t bus, device_t dev) { return ofw_bus_get_node(bus); } static device_method_t qcom_spi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, qcom_spi_probe), DEVMETHOD(device_attach, qcom_spi_attach), DEVMETHOD(device_detach, qcom_spi_detach), /* TODO: suspend */ /* TODO: resume */ DEVMETHOD(spibus_transfer, qcom_spi_transfer), /* ofw_bus_if */ DEVMETHOD(ofw_bus_get_node, qcom_spi_get_node), DEVMETHOD_END }; static driver_t qcom_spi_driver = { "qcom_spi", qcom_spi_methods, sizeof(struct qcom_spi_softc), }; DRIVER_MODULE(qcom_spi, simplebus, qcom_spi_driver, 0, 0); DRIVER_MODULE(ofw_spibus, qcom_spi, ofw_spibus_driver, 0, 0); MODULE_DEPEND(qcom_spi, ofw_spibus, 1, 1, 1); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/dev/qcom_qup/qcom_spi_hw.c b/sys/dev/qcom_qup/qcom_spi_hw.c index ba2663c79cff..6efbedf0892a 100644 --- a/sys/dev/qcom_qup/qcom_spi_hw.c +++ b/sys/dev/qcom_qup/qcom_spi_hw.c @@ -1,982 +1,982 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021, Adrian Chadd * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "spibus_if.h" #include #include #include #include int qcom_spi_hw_read_controller_transfer_sizes(struct qcom_spi_softc *sc) { uint32_t reg, val; reg = QCOM_SPI_READ_4(sc, QUP_IO_M_MODES); QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP, "%s: QUP_IO_M_MODES=0x%08x\n", __func__, reg); /* Input block size */ val = (reg >> QUP_IO_M_INPUT_BLOCK_SIZE_SHIFT) & QUP_IO_M_INPUT_BLOCK_SIZE_MASK; if (val == 0) sc->config.input_block_size = 4; else sc->config.input_block_size = val * 16; /* Output block size */ val = (reg >> QUP_IO_M_OUTPUT_BLOCK_SIZE_SHIFT) & QUP_IO_M_OUTPUT_BLOCK_SIZE_MASK; if (val == 0) sc->config.output_block_size = 4; else sc->config.output_block_size = val * 16; /* Input FIFO size */ val = (reg >> QUP_IO_M_INPUT_FIFO_SIZE_SHIFT) & QUP_IO_M_INPUT_FIFO_SIZE_MASK; sc->config.input_fifo_size = sc->config.input_block_size * (2 << val); /* Output FIFO size */ val = (reg >> QUP_IO_M_OUTPUT_FIFO_SIZE_SHIFT) & QUP_IO_M_OUTPUT_FIFO_SIZE_MASK; sc->config.output_fifo_size = sc->config.output_block_size * (2 << val); return (0); } static bool qcom_spi_hw_qup_is_state_valid_locked(struct qcom_spi_softc *sc) { uint32_t reg; QCOM_SPI_ASSERT_LOCKED(sc); reg = QCOM_SPI_READ_4(sc, QUP_STATE); QCOM_SPI_BARRIER_READ(sc); return !! (reg & QUP_STATE_VALID); } static int qcom_spi_hw_qup_wait_state_valid_locked(struct qcom_spi_softc *sc) { int i; for (i = 0; i < 10; i++) { if (qcom_spi_hw_qup_is_state_valid_locked(sc)) break; } if (i >= 10) { device_printf(sc->sc_dev, "ERROR: timeout waiting for valid state\n"); return (ENXIO); } return (0); } static bool qcom_spi_hw_is_opmode_dma_locked(struct qcom_spi_softc *sc) { QCOM_SPI_ASSERT_LOCKED(sc); if (sc->state.transfer_mode == QUP_IO_M_MODE_DMOV) return (true); if (sc->state.transfer_mode == QUP_IO_M_MODE_BAM) return (true); return (false); } int qcom_spi_hw_qup_set_state_locked(struct qcom_spi_softc *sc, uint32_t state) { uint32_t cur_state; int ret; QCOM_SPI_ASSERT_LOCKED(sc); /* Wait until the state becomes valid */ ret = qcom_spi_hw_qup_wait_state_valid_locked(sc); if (ret != 0) { return (ret); } cur_state = QCOM_SPI_READ_4(sc, QUP_STATE); QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_STATE_CHANGE, "%s: target state=%d, cur_state=0x%08x\n", __func__, state, cur_state); /* * According to the QUP specification, when going * from PAUSE to RESET, two writes are required. */ if ((state == QUP_STATE_RESET) && ((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE)) { QCOM_SPI_WRITE_4(sc, QUP_STATE, QUP_STATE_CLEAR); QCOM_SPI_BARRIER_WRITE(sc); QCOM_SPI_WRITE_4(sc, QUP_STATE, QUP_STATE_CLEAR); QCOM_SPI_BARRIER_WRITE(sc); } else { cur_state &= ~QUP_STATE_MASK; cur_state |= state; QCOM_SPI_WRITE_4(sc, QUP_STATE, cur_state); QCOM_SPI_BARRIER_WRITE(sc); } /* Wait until the state becomes valid */ ret = qcom_spi_hw_qup_wait_state_valid_locked(sc); if (ret != 0) { return (ret); } cur_state = QCOM_SPI_READ_4(sc, QUP_STATE); QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_STATE_CHANGE, "%s: FINISH: target state=%d, cur_state=0x%08x\n", __func__, state, cur_state); return (0); } /* * Do initial QUP setup. * * This is initially for the SPI driver; it would be interesting to see how * much of this is the same with the I2C/HSUART paths. */ int qcom_spi_hw_qup_init_locked(struct qcom_spi_softc *sc) { int ret; QCOM_SPI_ASSERT_LOCKED(sc); /* Full hardware reset */ (void) qcom_spi_hw_do_full_reset(sc); ret = qcom_spi_hw_qup_set_state_locked(sc, QUP_STATE_RESET); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: %s: couldn't reset\n", __func__); goto error; } QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL, 0); QCOM_SPI_WRITE_4(sc, QUP_IO_M_MODES, 0); /* Note: no QUP_OPERATIONAL_MASK in QUP v1 */ if (! QCOM_SPI_QUP_VERSION_V1(sc)) QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL_MASK, 0); /* Explicitly disable input overrun in QUP v1 */ if (QCOM_SPI_QUP_VERSION_V1(sc)) QCOM_SPI_WRITE_4(sc, QUP_ERROR_FLAGS_EN, QUP_ERROR_OUTPUT_OVER_RUN | QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN); QCOM_SPI_BARRIER_WRITE(sc); return (0); error: return (ret); } /* * Do initial SPI setup. */ int qcom_spi_hw_spi_init_locked(struct qcom_spi_softc *sc) { QCOM_SPI_ASSERT_LOCKED(sc); /* Initial SPI error flags */ QCOM_SPI_WRITE_4(sc, SPI_ERROR_FLAGS_EN, QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN); QCOM_SPI_BARRIER_WRITE(sc); /* Initial SPI config */ QCOM_SPI_WRITE_4(sc, SPI_CONFIG, 0); QCOM_SPI_BARRIER_WRITE(sc); /* Initial CS/tri-state io control config */ QCOM_SPI_WRITE_4(sc, SPI_IO_CONTROL, SPI_IO_C_NO_TRI_STATE | SPI_IO_C_CS_SELECT(sc->config.cs_select)); QCOM_SPI_BARRIER_WRITE(sc); return (0); } /* * Force the currently selected device CS line to be active * or inactive. * * This forces it to be active or inactive rather than letting * the SPI transfer machine do its thing. If you want to be able * break up a big transaction into a handful of smaller ones, * without toggling /CS_n for that device, then you need it forced. * (If you toggle the /CS_n to the device to inactive then active, * NOR/NAND devices tend to stop a block transfer.) */ int qcom_spi_hw_spi_cs_force(struct qcom_spi_softc *sc, int cs, bool enable) { uint32_t reg; QCOM_SPI_ASSERT_LOCKED(sc); QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_CHIPSELECT, "%s: called, enable=%u\n", __func__, enable); reg = QCOM_SPI_READ_4(sc, SPI_IO_CONTROL); if (enable) reg |= SPI_IO_C_FORCE_CS; else reg &= ~SPI_IO_C_FORCE_CS; reg &= ~SPI_IO_C_CS_SELECT_MASK; reg |= SPI_IO_C_CS_SELECT(cs); QCOM_SPI_WRITE_4(sc, SPI_IO_CONTROL, reg); QCOM_SPI_BARRIER_WRITE(sc); return (0); } /* * ACK/store current interrupt flag state. */ int qcom_spi_hw_interrupt_handle(struct qcom_spi_softc *sc) { uint32_t qup_error, spi_error, op_flags; QCOM_SPI_ASSERT_LOCKED(sc); /* Get QUP/SPI state */ qup_error = QCOM_SPI_READ_4(sc, QUP_ERROR_FLAGS); spi_error = QCOM_SPI_READ_4(sc, SPI_ERROR_FLAGS); op_flags = QCOM_SPI_READ_4(sc, QUP_OPERATIONAL); /* ACK state */ QCOM_SPI_WRITE_4(sc, QUP_ERROR_FLAGS, qup_error); QCOM_SPI_WRITE_4(sc, SPI_ERROR_FLAGS, spi_error); QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_INTR, "%s: called; qup=0x%08x, spi=0x%08x, op=0x%08x\n", __func__, qup_error, spi_error, op_flags); /* handle error flags */ if (qup_error != 0) { device_printf(sc->sc_dev, "ERROR: (QUP) mask=0x%08x\n", qup_error); sc->intr.error = true; } if (spi_error != 0) { device_printf(sc->sc_dev, "ERROR: (SPI) mask=0x%08x\n", spi_error); sc->intr.error = true; } /* handle operational state */ if (qcom_spi_hw_is_opmode_dma_locked(sc)) { /* ACK interrupts now */ QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL, op_flags); if ((op_flags & QUP_OP_IN_SERVICE_FLAG) && (op_flags & QUP_OP_MAX_INPUT_DONE_FLAG)) sc->intr.rx_dma_done = true; if ((op_flags & QUP_OP_OUT_SERVICE_FLAG) && (op_flags & QUP_OP_MAX_OUTPUT_DONE_FLAG)) sc->intr.tx_dma_done = true; } else { /* FIFO/Block */ if (op_flags & QUP_OP_IN_SERVICE_FLAG) sc->intr.do_rx = true; if (op_flags & QUP_OP_OUT_SERVICE_FLAG) sc->intr.do_tx = true; } /* Check if we've finished transfers */ if (op_flags & QUP_OP_MAX_INPUT_DONE_FLAG) sc->intr.done = true; if (sc->intr.error) sc->intr.done = true; return (0); } /* * Make initial transfer selections based on the transfer sizes * and alignment. * * For now this'll just default to FIFO until that works, and then * will grow to include BLOCK / DMA as appropriate. */ int qcom_spi_hw_setup_transfer_selection(struct qcom_spi_softc *sc, uint32_t len) { QCOM_SPI_ASSERT_LOCKED(sc); /* * For now only support doing a single FIFO transfer. * The main PIO transfer routine loop will break it up for us. */ sc->state.transfer_mode = QUP_IO_M_MODE_FIFO; sc->transfer.tx_offset = 0; sc->transfer.rx_offset = 0; sc->transfer.tx_len = 0; sc->transfer.rx_len = 0; sc->transfer.tx_buf = NULL; sc->transfer.rx_buf = NULL; /* * If we're sending a DWORD multiple sized block (like IO buffers) * then we can totally just use the DWORD size transfers. * * This is really only valid for PIO/block modes; I'm not yet * sure what we should do for DMA modes. */ if (len > 0 && len % 4 == 0) sc->state.transfer_word_size = 4; else sc->state.transfer_word_size = 1; return (0); } /* * Blank the transfer state after a full transfer is completed. */ int qcom_spi_hw_complete_transfer(struct qcom_spi_softc *sc) { QCOM_SPI_ASSERT_LOCKED(sc); sc->state.transfer_mode = QUP_IO_M_MODE_FIFO; sc->transfer.tx_offset = 0; sc->transfer.rx_offset = 0; sc->transfer.tx_len = 0; sc->transfer.rx_len = 0; sc->transfer.tx_buf = NULL; sc->transfer.rx_buf = NULL; sc->state.transfer_word_size = 0; return (0); } /* * Configure up the transfer selection for the current transfer. * * This calculates how many words we can transfer in the current * transfer and what's left to transfer. */ int qcom_spi_hw_setup_current_transfer(struct qcom_spi_softc *sc) { uint32_t bytes_left; QCOM_SPI_ASSERT_LOCKED(sc); /* * XXX For now, base this on the TX side buffer size, not both. * Later on we'll want to configure it based on the MAX of * either and just eat up the dummy values in the PIO * routines. (For DMA it's .. more annoyingly complicated * if the transfer sizes are not symmetrical.) */ bytes_left = sc->transfer.tx_len - sc->transfer.tx_offset; if (sc->state.transfer_mode == QUP_IO_M_MODE_FIFO) { /* * For FIFO transfers the num_words limit depends upon * the word size, FIFO size and how many bytes are left. * It definitely will be under SPI_MAX_XFER so don't * worry about that here. */ sc->transfer.num_words = bytes_left / sc->state.transfer_word_size; sc->transfer.num_words = MIN(sc->transfer.num_words, sc->config.input_fifo_size / sizeof(uint32_t)); } else if (sc->state.transfer_mode == QUP_IO_M_MODE_BLOCK) { /* * For BLOCK transfers the logic will be a little different. * Instead of it being based on the maximum input_fifo_size, * it'll be broken down into the 'words per block" size but * our maximum transfer size will ACTUALLY be capped by * SPI_MAX_XFER (65536-64 bytes.) Each transfer * will end up being in multiples of a block until the * last transfer. */ sc->transfer.num_words = bytes_left / sc->state.transfer_word_size; sc->transfer.num_words = MIN(sc->transfer.num_words, SPI_MAX_XFER); } QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP, "%s: transfer.tx_len=%u," "transfer.tx_offset=%u," " transfer_word_size=%u," " bytes_left=%u, num_words=%u, fifo_word_max=%u\n", __func__, sc->transfer.tx_len, sc->transfer.tx_offset, sc->state.transfer_word_size, bytes_left, sc->transfer.num_words, sc->config.input_fifo_size / sizeof(uint32_t)); return (0); } /* * Setup the PIO FIFO transfer count. * * Note that we get a /single/ TX/RX phase up to these num_words * transfers. */ int qcom_spi_hw_setup_pio_transfer_cnt(struct qcom_spi_softc *sc) { QCOM_SPI_ASSERT_LOCKED(sc); QCOM_SPI_WRITE_4(sc, QUP_MX_READ_CNT, sc->transfer.num_words); QCOM_SPI_WRITE_4(sc, QUP_MX_WRITE_CNT, sc->transfer.num_words); QCOM_SPI_WRITE_4(sc, QUP_MX_INPUT_CNT, 0); QCOM_SPI_WRITE_4(sc, QUP_MX_OUTPUT_CNT, 0); QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP, "%s: num_words=%u\n", __func__, sc->transfer.num_words); QCOM_SPI_BARRIER_WRITE(sc); return (0); } /* * Setup the PIO BLOCK transfer count. * * This sets up the total transfer size, in TX/RX FIFO block size * chunks. We will get multiple notifications when a block sized * chunk of data is avaliable or required. */ int qcom_spi_hw_setup_block_transfer_cnt(struct qcom_spi_softc *sc) { QCOM_SPI_ASSERT_LOCKED(sc); QCOM_SPI_WRITE_4(sc, QUP_MX_READ_CNT, 0); QCOM_SPI_WRITE_4(sc, QUP_MX_WRITE_CNT, 0); QCOM_SPI_WRITE_4(sc, QUP_MX_INPUT_CNT, sc->transfer.num_words); QCOM_SPI_WRITE_4(sc, QUP_MX_OUTPUT_CNT, sc->transfer.num_words); QCOM_SPI_BARRIER_WRITE(sc); return (0); } int qcom_spi_hw_setup_io_modes(struct qcom_spi_softc *sc) { uint32_t reg; QCOM_SPI_ASSERT_LOCKED(sc); reg = QCOM_SPI_READ_4(sc, QUP_IO_M_MODES); reg &= ~((QUP_IO_M_INPUT_MODE_MASK << QUP_IO_M_INPUT_MODE_SHIFT) | (QUP_IO_M_OUTPUT_MODE_MASK << QUP_IO_M_OUTPUT_MODE_SHIFT)); /* * If it's being done using DMA then the hardware will * need to pack and unpack the byte stream into the word/dword * stream being expected by the SPI/QUP micro engine. * * For PIO modes we're doing the pack/unpack in software, * see the pio/block transfer routines. */ if (qcom_spi_hw_is_opmode_dma_locked(sc)) reg |= (QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN); else reg &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN); /* Transfer mode */ reg |= ((sc->state.transfer_mode & QUP_IO_M_INPUT_MODE_MASK) << QUP_IO_M_INPUT_MODE_SHIFT); reg |= ((sc->state.transfer_mode & QUP_IO_M_OUTPUT_MODE_MASK) << QUP_IO_M_OUTPUT_MODE_SHIFT); QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP, "%s: QUP_IO_M_MODES=0x%08x\n", __func__, reg); QCOM_SPI_WRITE_4(sc, QUP_IO_M_MODES, reg); QCOM_SPI_BARRIER_WRITE(sc); return (0); } int qcom_spi_hw_setup_spi_io_clock_polarity(struct qcom_spi_softc *sc, bool cpol) { uint32_t reg; QCOM_SPI_ASSERT_LOCKED(sc); reg = QCOM_SPI_READ_4(sc, SPI_IO_CONTROL); if (cpol) reg |= SPI_IO_C_CLK_IDLE_HIGH; else reg &= ~SPI_IO_C_CLK_IDLE_HIGH; QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP, "%s: SPI_IO_CONTROL=0x%08x\n", __func__, reg); QCOM_SPI_WRITE_4(sc, SPI_IO_CONTROL, reg); QCOM_SPI_BARRIER_WRITE(sc); return (0); } int qcom_spi_hw_setup_spi_config(struct qcom_spi_softc *sc, uint32_t clock_val, bool cpha) { uint32_t reg; /* * For now we don't have a way to configure loopback SPI for testing, * or the clock/transfer phase. When we do then here's where we * would put that. */ QCOM_SPI_ASSERT_LOCKED(sc); reg = QCOM_SPI_READ_4(sc, SPI_CONFIG); reg &= ~SPI_CONFIG_LOOPBACK; if (cpha) reg &= ~SPI_CONFIG_INPUT_FIRST; else reg |= SPI_CONFIG_INPUT_FIRST; /* * If the frequency is above SPI_HS_MIN_RATE then enable high speed. * This apparently improves stability. * * Note - don't do this if SPI loopback is enabled! */ if (clock_val >= SPI_HS_MIN_RATE) reg |= SPI_CONFIG_HS_MODE; else reg &= ~SPI_CONFIG_HS_MODE; QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP, "%s: SPI_CONFIG=0x%08x\n", __func__, reg); QCOM_SPI_WRITE_4(sc, SPI_CONFIG, reg); QCOM_SPI_BARRIER_WRITE(sc); return (0); } int qcom_spi_hw_setup_qup_config(struct qcom_spi_softc *sc, bool is_tx, bool is_rx) { uint32_t reg; QCOM_SPI_ASSERT_LOCKED(sc); reg = QCOM_SPI_READ_4(sc, QUP_CONFIG); reg &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N); /* SPI mode */ reg |= QUP_CONFIG_SPI_MODE; /* bitmask for number of bits per word being used in each FIFO slot */ reg |= ((sc->state.transfer_word_size * 8) - 1) & QUP_CONFIG_N; /* * When doing DMA we need to configure whether we are shifting * data in, out, and/or both. For PIO/block modes it must stay * unset. */ if (qcom_spi_hw_is_opmode_dma_locked(sc)) { if (is_rx == false) reg |= QUP_CONFIG_NO_INPUT; if (is_tx == false) reg |= QUP_CONFIG_NO_OUTPUT; } QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP, "%s: QUP_CONFIG=0x%08x\n", __func__, reg); QCOM_SPI_WRITE_4(sc, QUP_CONFIG, reg); QCOM_SPI_BARRIER_WRITE(sc); return (0); } int qcom_spi_hw_setup_operational_mask(struct qcom_spi_softc *sc) { QCOM_SPI_ASSERT_LOCKED(sc); if (QCOM_SPI_QUP_VERSION_V1(sc)) { QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TRANSFER_SETUP, "%s: skipping, qupv1\n", __func__); return (0); } if (qcom_spi_hw_is_opmode_dma_locked(sc)) QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL_MASK, QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG); else QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL_MASK, 0); QCOM_SPI_BARRIER_WRITE(sc); return (0); } /* * ACK that we already have serviced the output FIFO. */ int qcom_spi_hw_ack_write_pio_fifo(struct qcom_spi_softc *sc) { QCOM_SPI_ASSERT_LOCKED(sc); QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL, QUP_OP_OUT_SERVICE_FLAG); QCOM_SPI_BARRIER_WRITE(sc); return (0); } int qcom_spi_hw_ack_opmode(struct qcom_spi_softc *sc) { QCOM_SPI_ASSERT_LOCKED(sc); QCOM_SPI_BARRIER_READ(sc); QCOM_SPI_READ_4(sc, QUP_OPERATIONAL); QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL, QUP_OP_OUT_SERVICE_FLAG); QCOM_SPI_BARRIER_WRITE(sc); return (0); } /* * Read the value from the TX buffer into the given 32 bit DWORD, * pre-shifting it into the place requested. * * Returns true if there was a byte available, false otherwise. */ static bool qcom_spi_hw_write_from_tx_buf(struct qcom_spi_softc *sc, int shift, uint32_t *val) { QCOM_SPI_ASSERT_LOCKED(sc); if (sc->transfer.tx_buf == NULL) return false; if (sc->transfer.tx_offset < sc->transfer.tx_len) { *val |= (sc->transfer.tx_buf[sc->transfer.tx_offset] & 0xff) << shift; sc->transfer.tx_offset++; return true; } return false; } int qcom_spi_hw_write_pio_fifo(struct qcom_spi_softc *sc) { uint32_t i; int num_bytes = 0; QCOM_SPI_ASSERT_LOCKED(sc); QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL, QUP_OP_OUT_SERVICE_FLAG); QCOM_SPI_BARRIER_WRITE(sc); /* * Loop over the transfer num_words, do complain if we are full. */ for (i = 0; i < sc->transfer.num_words; i++) { uint32_t reg; /* Break if FIFO is full */ if ((QCOM_SPI_READ_4(sc, QUP_OPERATIONAL) & QUP_OP_OUT_FIFO_FULL) != 0) { device_printf(sc->sc_dev, "%s: FIFO full\n", __func__); break; } /* * Handle 1, 2, 4 byte transfer packing rules. * * Unlike read, where the shifting is done towards the MSB * for us by default, we have to do it ourselves for transmit. * There's a bit that one can set to do the preshifting * (and u-boot uses it!) but I'll stick with what Linux is * doing to make it easier for future maintenance. * * The format is the same as 4 byte RX - 0xaabbccdd; * the byte ordering on the wire being aa, bb, cc, dd. */ reg = 0; if (sc->state.transfer_word_size == 1) { if (qcom_spi_hw_write_from_tx_buf(sc, 24, ®)) num_bytes++; } else if (sc->state.transfer_word_size == 2) { if (qcom_spi_hw_write_from_tx_buf(sc, 24, ®)) num_bytes++; if (qcom_spi_hw_write_from_tx_buf(sc, 16, ®)) num_bytes++; } else if (sc->state.transfer_word_size == 4) { if (qcom_spi_hw_write_from_tx_buf(sc, 24, ®)) num_bytes++; if (qcom_spi_hw_write_from_tx_buf(sc, 16, ®)) num_bytes++; if (qcom_spi_hw_write_from_tx_buf(sc, 8, ®)) num_bytes++; if (qcom_spi_hw_write_from_tx_buf(sc, 0, ®)) num_bytes++; } /* * always shift out something in case we need phantom * writes to finish things up whilst we read a reply * payload. */ QCOM_SPI_WRITE_4(sc, QUP_OUTPUT_FIFO, reg); QCOM_SPI_BARRIER_WRITE(sc); } QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TX_FIFO, "%s: wrote %d bytes (%d fifo slots)\n", __func__, num_bytes, sc->transfer.num_words); return (0); } int qcom_spi_hw_write_pio_block(struct qcom_spi_softc *sc) { /* Not yet implemented */ return (ENXIO); } /* * Read data into the RX buffer and increment the RX offset. * * Return true if the byte was saved into the RX buffer, else * return false. */ static bool qcom_spi_hw_read_into_rx_buf(struct qcom_spi_softc *sc, uint8_t val) { QCOM_SPI_ASSERT_LOCKED(sc); if (sc->transfer.rx_buf == NULL) return false; /* Make sure we aren't overflowing the receive buffer */ if (sc->transfer.rx_offset < sc->transfer.rx_len) { sc->transfer.rx_buf[sc->transfer.rx_offset] = val; sc->transfer.rx_offset++; return true; } return false; } /* * Read "n_words" transfers, and push those bytes into the receive buffer. * Make sure we have enough space, and make sure we don't overflow the * read buffer size too! */ int qcom_spi_hw_read_pio_fifo(struct qcom_spi_softc *sc) { uint32_t i; uint32_t reg; int num_bytes = 0; QCOM_SPI_ASSERT_LOCKED(sc); QCOM_SPI_WRITE_4(sc, QUP_OPERATIONAL, QUP_OP_IN_SERVICE_FLAG); QCOM_SPI_BARRIER_WRITE(sc); for (i = 0; i < sc->transfer.num_words; i++) { /* Break if FIFO is empty */ QCOM_SPI_BARRIER_READ(sc); reg = QCOM_SPI_READ_4(sc, QUP_OPERATIONAL); if ((reg & QUP_OP_IN_FIFO_NOT_EMPTY) == 0) { device_printf(sc->sc_dev, "%s: FIFO empty\n", __func__); break; } /* * Always read num_words up to FIFO being non-empty; that way * if we have mis-matching TX/RX buffer sizes for some reason * we will read the needed phantom bytes. */ reg = QCOM_SPI_READ_4(sc, QUP_INPUT_FIFO); /* * Unpack the receive buffer based on whether we are * doing 1, 2, or 4 byte transfer words. */ if (sc->state.transfer_word_size == 1) { if (qcom_spi_hw_read_into_rx_buf(sc, reg & 0xff)) num_bytes++; } else if (sc->state.transfer_word_size == 2) { if (qcom_spi_hw_read_into_rx_buf(sc, (reg >> 8) & 0xff)) num_bytes++; if (qcom_spi_hw_read_into_rx_buf(sc, reg & 0xff)) num_bytes++; } else if (sc->state.transfer_word_size == 4) { if (qcom_spi_hw_read_into_rx_buf(sc, (reg >> 24) & 0xff)) num_bytes++; if (qcom_spi_hw_read_into_rx_buf(sc, (reg >> 16) & 0xff)) num_bytes++; if (qcom_spi_hw_read_into_rx_buf(sc, (reg >> 8) & 0xff)) num_bytes++; if (qcom_spi_hw_read_into_rx_buf(sc, reg & 0xff)) num_bytes++; } } QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_HW_TX_FIFO, "%s: read %d bytes (%d transfer words)\n", __func__, num_bytes, sc->transfer.num_words); #if 0 /* * This is a no-op for FIFO mode, it's only a thing for BLOCK * transfers. */ QCOM_SPI_BARRIER_READ(sc); reg = QCOM_SPI_READ_4(sc, QUP_OPERATIONAL); if (reg & QUP_OP_MAX_INPUT_DONE_FLAG) { device_printf(sc->sc_dev, "%s: read complete (DONE)\n" , __func__); sc->intr.done = true; } #endif #if 0 /* * And see if we've finished the transfer and won't be getting * any more. Then treat it as done as well. * * In FIFO only mode we don't get a completion interrupt; * we get an interrupt when the FIFO has enough data present. */ if ((sc->state.transfer_mode == QUP_IO_M_MODE_FIFO) && (sc->transfer.rx_offset >= sc->transfer.rx_len)) { device_printf(sc->sc_dev, "%s: read complete (rxlen)\n", __func__); sc->intr.done = true; } #endif /* * For FIFO transfers we get a /single/ result that complete * the FIFO transfer. We won't get any subsequent transfers; * we'll need to schedule a new FIFO transfer. */ sc->intr.done = true; return (0); } int qcom_spi_hw_read_pio_block(struct qcom_spi_softc *sc) { /* Not yet implemented */ return (ENXIO); } int qcom_spi_hw_do_full_reset(struct qcom_spi_softc *sc) { QCOM_SPI_ASSERT_LOCKED(sc); QCOM_SPI_WRITE_4(sc, QUP_SW_RESET, 1); QCOM_SPI_BARRIER_WRITE(sc); DELAY(100); return (0); } diff --git a/sys/dev/sdhci/sdhci_fdt.c b/sys/dev/sdhci/sdhci_fdt.c index 02077e9766c6..bf9f81108467 100644 --- a/sys/dev/sdhci/sdhci_fdt.c +++ b/sys/dev/sdhci/sdhci_fdt.c @@ -1,724 +1,724 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2012 Thomas Skibo * Copyright (c) 2008 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Generic driver to attach sdhci controllers on simplebus. * Derived mainly from sdhci_pci.c */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include +#include +#include #include #include #include #include #include "mmcbr_if.h" #include "sdhci_if.h" #include "opt_mmccam.h" #include "clkdev_if.h" #include "syscon_if.h" #define MAX_SLOTS 6 #define SDHCI_FDT_ARMADA38X 1 #define SDHCI_FDT_XLNX_ZY7 2 #define SDHCI_FDT_QUALCOMM 3 #define SDHCI_FDT_RK3399 4 #define SDHCI_FDT_RK3568 5 #define SDHCI_FDT_XLNX_ZMP 6 #define RK3399_GRF_EMMCCORE_CON0 0xf000 #define RK3399_CORECFG_BASECLKFREQ 0xff00 #define RK3399_CORECFG_TIMEOUTCLKUNIT (1 << 7) #define RK3399_CORECFG_TUNINGCOUNT 0x3f #define RK3399_GRF_EMMCCORE_CON11 0xf02c #define RK3399_CORECFG_CLOCKMULTIPLIER 0xff #define RK3568_EMMC_HOST_CTRL 0x0508 #define RK3568_EMMC_EMMC_CTRL 0x052c #define RK3568_EMMC_ATCTRL 0x0540 #define RK3568_EMMC_DLL_CTRL 0x0800 #define DLL_CTRL_SRST 0x00000001 #define DLL_CTRL_START 0x00000002 #define DLL_CTRL_START_POINT_DEFAULT 0x00050000 #define DLL_CTRL_INCREMENT_DEFAULT 0x00000200 #define RK3568_EMMC_DLL_RXCLK 0x0804 #define DLL_RXCLK_DELAY_ENABLE 0x08000000 #define DLL_RXCLK_NO_INV 0x20000000 #define RK3568_EMMC_DLL_TXCLK 0x0808 #define DLL_TXCLK_DELAY_ENABLE 0x08000000 #define DLL_TXCLK_TAPNUM_DEFAULT 0x00000008 #define DLL_TXCLK_TAPNUM_FROM_SW 0x01000000 #define RK3568_EMMC_DLL_STRBIN 0x080c #define DLL_STRBIN_DELAY_ENABLE 0x08000000 #define DLL_STRBIN_TAPNUM_DEFAULT 0x00000008 #define DLL_STRBIN_TAPNUM_FROM_SW 0x01000000 #define RK3568_EMMC_DLL_STATUS0 0x0840 #define DLL_STATUS0_DLL_LOCK 0x00000100 #define DLL_STATUS0_DLL_TIMEOUT 0x00000200 #define LOWEST_SET_BIT(mask) ((((mask) - 1) & (mask)) ^ (mask)) #define SHIFTIN(x, mask) ((x) * LOWEST_SET_BIT(mask)) static struct ofw_compat_data compat_data[] = { { "marvell,armada-380-sdhci", SDHCI_FDT_ARMADA38X }, { "qcom,sdhci-msm-v4", SDHCI_FDT_QUALCOMM }, { "rockchip,rk3399-sdhci-5.1", SDHCI_FDT_RK3399 }, { "xlnx,zy7_sdhci", SDHCI_FDT_XLNX_ZY7 }, { "rockchip,rk3568-dwcmshc", SDHCI_FDT_RK3568 }, { "xlnx,zynqmp-8.9a", SDHCI_FDT_XLNX_ZMP }, { NULL, 0 } }; struct sdhci_fdt_softc { device_t dev; /* Controller device */ u_int quirks; /* Chip specific quirks */ u_int caps; /* If we override SDHCI_CAPABILITIES */ uint32_t max_clk; /* Max possible freq */ uint8_t sdma_boundary; /* If we override the SDMA boundary */ struct resource *irq_res; /* IRQ resource */ void *intrhand; /* Interrupt handle */ int num_slots; /* Number of slots on this controller*/ struct sdhci_slot slots[MAX_SLOTS]; struct resource *mem_res[MAX_SLOTS]; /* Memory resource */ bool wp_inverted; /* WP pin is inverted */ bool wp_disabled; /* WP pin is not supported */ bool no_18v; /* No 1.8V support */ clk_t clk_xin; /* xin24m fixed clock */ clk_t clk_ahb; /* ahb clock */ clk_t clk_core; /* core clock */ phy_t phy; /* phy to be used */ struct syscon *syscon; /* Handle to the syscon */ }; struct sdhci_exported_clocks_sc { device_t clkdev; }; static int sdhci_exported_clocks_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static clknode_method_t sdhci_exported_clocks_clknode_methods[] = { /* Device interface */ CLKNODEMETHOD(clknode_init, sdhci_exported_clocks_init), CLKNODEMETHOD_END }; DEFINE_CLASS_1(sdhci_exported_clocks_clknode, sdhci_exported_clocks_clknode_class, sdhci_exported_clocks_clknode_methods, sizeof(struct sdhci_exported_clocks_sc), clknode_class); static int sdhci_clock_ofw_map(struct clkdom *clkdom, uint32_t ncells, phandle_t *cells, struct clknode **clk) { int id = 1; /* Our clock id starts at 1 */ if (ncells != 0) id = cells[1]; *clk = clknode_find_by_id(clkdom, id); if (*clk == NULL) return (ENXIO); return (0); } static void sdhci_export_clocks(struct sdhci_fdt_softc *sc) { struct clknode_init_def def; struct sdhci_exported_clocks_sc *clksc; struct clkdom *clkdom; struct clknode *clk; bus_addr_t paddr; bus_size_t psize; const char **clknames; phandle_t node; int i, nclocks, ncells, error; node = ofw_bus_get_node(sc->dev); if (ofw_reg_to_paddr(node, 0, &paddr, &psize, NULL) != 0) { device_printf(sc->dev, "cannot parse 'reg' property\n"); return; } error = ofw_bus_parse_xref_list_get_length(node, "clocks", "#clock-cells", &ncells); if (error != 0 || ncells != 2) { device_printf(sc->dev, "couldn't find parent clocks\n"); return; } nclocks = ofw_bus_string_list_to_array(node, "clock-output-names", &clknames); /* No clocks to export */ if (nclocks <= 0) return; clkdom = clkdom_create(sc->dev); clkdom_set_ofw_mapper(clkdom, sdhci_clock_ofw_map); for (i = 0; i < nclocks; i++) { memset(&def, 0, sizeof(def)); def.id = i + 1; /* Exported clock IDs starts at 1 */ def.name = clknames[i]; def.parent_names = malloc(sizeof(char *) * 1, M_OFWPROP, M_WAITOK); def.parent_names[0] = clk_get_name(sc->clk_xin); def.parent_cnt = 1; clk = clknode_create(clkdom, &sdhci_exported_clocks_clknode_class, &def); if (clk == NULL) { device_printf(sc->dev, "cannot create clknode\n"); return; } clksc = clknode_get_softc(clk); clksc->clkdev = device_get_parent(sc->dev); clknode_register(clkdom, clk); } if (clkdom_finit(clkdom) != 0) { device_printf(sc->dev, "cannot finalize clkdom initialization\n"); return; } if (bootverbose) clkdom_dump(clkdom); } static int sdhci_init_clocks(device_t dev) { struct sdhci_fdt_softc *sc = device_get_softc(dev); int error; /* Get and activate clocks */ error = clk_get_by_ofw_name(dev, 0, "clk_xin", &sc->clk_xin); if (error != 0) { device_printf(dev, "cannot get xin clock\n"); return (ENXIO); } error = clk_enable(sc->clk_xin); if (error != 0) { device_printf(dev, "cannot enable xin clock\n"); return (ENXIO); } error = clk_get_by_ofw_name(dev, 0, "clk_ahb", &sc->clk_ahb); if (error != 0) { device_printf(dev, "cannot get ahb clock\n"); return (ENXIO); } error = clk_enable(sc->clk_ahb); if (error != 0) { device_printf(dev, "cannot enable ahb clock\n"); return (ENXIO); } return (0); } static int sdhci_init_phy(struct sdhci_fdt_softc *sc) { int error; /* Enable PHY */ error = phy_get_by_ofw_name(sc->dev, 0, "phy_arasan", &sc->phy); if (error == ENOENT) return (0); if (error != 0) { device_printf(sc->dev, "Could not get phy\n"); return (ENXIO); } error = phy_enable(sc->phy); if (error != 0) { device_printf(sc->dev, "Could not enable phy\n"); return (ENXIO); } return (0); } static int sdhci_get_syscon(struct sdhci_fdt_softc *sc) { phandle_t node; /* Get syscon */ node = ofw_bus_get_node(sc->dev); if (OF_hasprop(node, "arasan,soc-ctl-syscon") && syscon_get_by_ofw_property(sc->dev, node, "arasan,soc-ctl-syscon", &sc->syscon) != 0) { device_printf(sc->dev, "cannot get syscon handle\n"); return (ENXIO); } return (0); } static int sdhci_init_rk3399(device_t dev) { struct sdhci_fdt_softc *sc = device_get_softc(dev); uint64_t freq; uint32_t mask, val; int error; error = clk_get_freq(sc->clk_xin, &freq); if (error != 0) { device_printf(dev, "cannot get xin clock frequency\n"); return (ENXIO); } /* Disable clock multiplier */ mask = RK3399_CORECFG_CLOCKMULTIPLIER; val = 0; SYSCON_WRITE_4(sc->syscon, RK3399_GRF_EMMCCORE_CON11, (mask << 16) | val); /* Set base clock frequency */ mask = RK3399_CORECFG_BASECLKFREQ; val = SHIFTIN((freq + (1000000 / 2)) / 1000000, RK3399_CORECFG_BASECLKFREQ); SYSCON_WRITE_4(sc->syscon, RK3399_GRF_EMMCCORE_CON0, (mask << 16) | val); return (0); } static uint8_t sdhci_fdt_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct sdhci_fdt_softc *sc = device_get_softc(dev); return (bus_read_1(sc->mem_res[slot->num], off)); } static void sdhci_fdt_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint8_t val) { struct sdhci_fdt_softc *sc = device_get_softc(dev); bus_write_1(sc->mem_res[slot->num], off, val); } static uint16_t sdhci_fdt_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct sdhci_fdt_softc *sc = device_get_softc(dev); return (bus_read_2(sc->mem_res[slot->num], off)); } static void sdhci_fdt_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint16_t val) { struct sdhci_fdt_softc *sc = device_get_softc(dev); bus_write_2(sc->mem_res[slot->num], off, val); } static uint32_t sdhci_fdt_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct sdhci_fdt_softc *sc = device_get_softc(dev); uint32_t val32; val32 = bus_read_4(sc->mem_res[slot->num], off); if (off == SDHCI_CAPABILITIES && sc->no_18v) val32 &= ~SDHCI_CAN_VDD_180; return (val32); } static void sdhci_fdt_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t val) { struct sdhci_fdt_softc *sc = device_get_softc(dev); bus_write_4(sc->mem_res[slot->num], off, val); } static void sdhci_fdt_read_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct sdhci_fdt_softc *sc = device_get_softc(dev); bus_read_multi_4(sc->mem_res[slot->num], off, data, count); } static void sdhci_fdt_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct sdhci_fdt_softc *sc = device_get_softc(dev); bus_write_multi_4(sc->mem_res[slot->num], off, data, count); } static void sdhci_fdt_intr(void *arg) { struct sdhci_fdt_softc *sc = (struct sdhci_fdt_softc *)arg; int i; for (i = 0; i < sc->num_slots; i++) sdhci_generic_intr(&sc->slots[i]); } static int sdhci_fdt_get_ro(device_t bus, device_t dev) { struct sdhci_fdt_softc *sc = device_get_softc(bus); if (sc->wp_disabled) return (false); return (sdhci_generic_get_ro(bus, dev) ^ sc->wp_inverted); } static int sdhci_fdt_set_clock(device_t dev, struct sdhci_slot *slot, int clock) { struct sdhci_fdt_softc *sc = device_get_softc(dev); int32_t val; int i; if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == SDHCI_FDT_RK3568) { if (clock == 400000) clock = 375000; if (clock) { clk_set_freq(sc->clk_core, clock, 0); if (clock <= 52000000) { bus_write_4(sc->mem_res[slot->num], RK3568_EMMC_DLL_CTRL, 0x0); bus_write_4(sc->mem_res[slot->num], RK3568_EMMC_DLL_RXCLK, DLL_RXCLK_NO_INV); bus_write_4(sc->mem_res[slot->num], RK3568_EMMC_DLL_TXCLK, 0x0); bus_write_4(sc->mem_res[slot->num], RK3568_EMMC_DLL_STRBIN, 0x0); return (clock); } bus_write_4(sc->mem_res[slot->num], RK3568_EMMC_DLL_CTRL, DLL_CTRL_START); DELAY(1000); bus_write_4(sc->mem_res[slot->num], RK3568_EMMC_DLL_CTRL, 0); bus_write_4(sc->mem_res[slot->num], RK3568_EMMC_DLL_CTRL, DLL_CTRL_START_POINT_DEFAULT | DLL_CTRL_INCREMENT_DEFAULT | DLL_CTRL_START); for (i = 0; i < 500; i++) { val = bus_read_4(sc->mem_res[slot->num], RK3568_EMMC_DLL_STATUS0); if (val & DLL_STATUS0_DLL_LOCK && !(val & DLL_STATUS0_DLL_TIMEOUT)) break; DELAY(1000); } bus_write_4(sc->mem_res[slot->num], RK3568_EMMC_ATCTRL, (0x1 << 16 | 0x2 << 17 | 0x3 << 19)); bus_write_4(sc->mem_res[slot->num], RK3568_EMMC_DLL_RXCLK, DLL_RXCLK_DELAY_ENABLE | DLL_RXCLK_NO_INV); bus_write_4(sc->mem_res[slot->num], RK3568_EMMC_DLL_TXCLK, DLL_TXCLK_DELAY_ENABLE | DLL_TXCLK_TAPNUM_DEFAULT|DLL_TXCLK_TAPNUM_FROM_SW); bus_write_4(sc->mem_res[slot->num], RK3568_EMMC_DLL_STRBIN, DLL_STRBIN_DELAY_ENABLE | DLL_STRBIN_TAPNUM_DEFAULT | DLL_STRBIN_TAPNUM_FROM_SW); } } return (clock); } static int sdhci_fdt_probe(device_t dev) { struct sdhci_fdt_softc *sc = device_get_softc(dev); phandle_t node; pcell_t cid; sc->quirks = 0; sc->num_slots = 1; sc->max_clk = 0; if (!ofw_bus_status_okay(dev)) return (ENXIO); switch (ofw_bus_search_compatible(dev, compat_data)->ocd_data) { case SDHCI_FDT_ARMADA38X: sc->quirks = SDHCI_QUIRK_BROKEN_AUTO_STOP; device_set_desc(dev, "ARMADA38X SDHCI controller"); break; case SDHCI_FDT_QUALCOMM: sc->quirks = SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE | SDHCI_QUIRK_BROKEN_SDMA_BOUNDARY; sc->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_4K; device_set_desc(dev, "Qualcomm FDT SDHCI controller"); break; case SDHCI_FDT_RK3399: device_set_desc(dev, "Rockchip RK3399 fdt SDHCI controller"); break; case SDHCI_FDT_XLNX_ZY7: sc->quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; device_set_desc(dev, "Zynq-7000 generic fdt SDHCI controller"); break; case SDHCI_FDT_RK3568: device_set_desc(dev, "Rockchip RK3568 fdt SDHCI controller"); break; case SDHCI_FDT_XLNX_ZMP: device_set_desc(dev, "ZynqMP generic fdt SDHCI controller"); break; default: return (ENXIO); } node = ofw_bus_get_node(dev); /* Allow dts to patch quirks, slots, and max-frequency. */ if ((OF_getencprop(node, "quirks", &cid, sizeof(cid))) > 0) sc->quirks = cid; if ((OF_getencprop(node, "num-slots", &cid, sizeof(cid))) > 0) sc->num_slots = cid; if ((OF_getencprop(node, "max-frequency", &cid, sizeof(cid))) > 0) sc->max_clk = cid; if (OF_hasprop(node, "no-1-8-v")) sc->no_18v = true; if (OF_hasprop(node, "wp-inverted")) sc->wp_inverted = true; if (OF_hasprop(node, "disable-wp")) sc->wp_disabled = true; return (0); } static int sdhci_fdt_attach(device_t dev) { struct sdhci_fdt_softc *sc = device_get_softc(dev); struct sdhci_slot *slot; int err, slots, rid, i, compat; sc->dev = dev; /* Allocate IRQ. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Can't allocate IRQ\n"); return (ENOMEM); } compat = ofw_bus_search_compatible(dev, compat_data)->ocd_data; switch (compat) { case SDHCI_FDT_RK3399: case SDHCI_FDT_XLNX_ZMP: err = sdhci_init_clocks(dev); if (err != 0) { device_printf(dev, "Cannot init clocks\n"); return (err); } sdhci_export_clocks(sc); if ((err = sdhci_init_phy(sc)) != 0) { device_printf(dev, "Cannot init phy\n"); return (err); } if ((err = sdhci_get_syscon(sc)) != 0) { device_printf(dev, "Cannot get syscon handle\n"); return (err); } if (compat == SDHCI_FDT_RK3399) { err = sdhci_init_rk3399(dev); if (err != 0) { device_printf(dev, "Cannot init RK3399 SDHCI\n"); return (err); } } break; case SDHCI_FDT_RK3568: /* setup & enable clocks */ if (clk_get_by_ofw_name(dev, 0, "core", &sc->clk_core)) { device_printf(dev, "cannot get core clock\n"); return (ENXIO); } clk_enable(sc->clk_core); break; default: break; } /* Scan all slots. */ slots = sc->num_slots; /* number of slots determined in probe(). */ sc->num_slots = 0; for (i = 0; i < slots; i++) { slot = &sc->slots[sc->num_slots]; /* Allocate memory. */ rid = 0; sc->mem_res[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res[i] == NULL) { device_printf(dev, "Can't allocate memory for slot %d\n", i); continue; } slot->quirks = sc->quirks; slot->caps = sc->caps; slot->max_clk = sc->max_clk; slot->sdma_boundary = sc->sdma_boundary; if (sdhci_init_slot(dev, slot, i) != 0) continue; sc->num_slots++; } device_printf(dev, "%d slot(s) allocated\n", sc->num_slots); /* Activate the interrupt */ err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, sdhci_fdt_intr, sc, &sc->intrhand); if (err) { device_printf(dev, "Cannot setup IRQ\n"); return (err); } /* Process cards detection. */ for (i = 0; i < sc->num_slots; i++) sdhci_start_slot(&sc->slots[i]); return (0); } static int sdhci_fdt_detach(device_t dev) { struct sdhci_fdt_softc *sc = device_get_softc(dev); int i; bus_generic_detach(dev); bus_teardown_intr(dev, sc->irq_res, sc->intrhand); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res), sc->irq_res); for (i = 0; i < sc->num_slots; i++) { sdhci_cleanup_slot(&sc->slots[i]); bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res[i]), sc->mem_res[i]); } return (0); } static device_method_t sdhci_fdt_methods[] = { /* device_if */ DEVMETHOD(device_probe, sdhci_fdt_probe), DEVMETHOD(device_attach, sdhci_fdt_attach), DEVMETHOD(device_detach, sdhci_fdt_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, sdhci_generic_read_ivar), DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar), /* mmcbr_if */ DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios), DEVMETHOD(mmcbr_request, sdhci_generic_request), DEVMETHOD(mmcbr_get_ro, sdhci_fdt_get_ro), DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host), DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host), /* SDHCI registers accessors */ DEVMETHOD(sdhci_read_1, sdhci_fdt_read_1), DEVMETHOD(sdhci_read_2, sdhci_fdt_read_2), DEVMETHOD(sdhci_read_4, sdhci_fdt_read_4), DEVMETHOD(sdhci_read_multi_4, sdhci_fdt_read_multi_4), DEVMETHOD(sdhci_write_1, sdhci_fdt_write_1), DEVMETHOD(sdhci_write_2, sdhci_fdt_write_2), DEVMETHOD(sdhci_write_4, sdhci_fdt_write_4), DEVMETHOD(sdhci_write_multi_4, sdhci_fdt_write_multi_4), DEVMETHOD(sdhci_set_clock, sdhci_fdt_set_clock), DEVMETHOD_END }; static driver_t sdhci_fdt_driver = { "sdhci_fdt", sdhci_fdt_methods, sizeof(struct sdhci_fdt_softc), }; DRIVER_MODULE(sdhci_fdt, simplebus, sdhci_fdt_driver, NULL, NULL); SDHCI_DEPEND(sdhci_fdt); #ifndef MMCCAM MMC_DECLARE_BRIDGE(sdhci_fdt); #endif diff --git a/sys/dev/sdhci/sdhci_fsl_fdt.c b/sys/dev/sdhci/sdhci_fsl_fdt.c index e83ee384cad9..12ce8c13342c 100644 --- a/sys/dev/sdhci/sdhci_fsl_fdt.c +++ b/sys/dev/sdhci/sdhci_fsl_fdt.c @@ -1,1562 +1,1562 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 - 2021 Alstom Group. * Copyright (c) 2020 - 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* eSDHC controller driver for NXP QorIQ Layerscape SoCs. */ #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include "mmcbr_if.h" #include "sdhci_if.h" #include "syscon_if.h" #define RD4 (sc->read) #define WR4 (sc->write) #define SDHCI_FSL_PRES_STATE 0x24 #define SDHCI_FSL_PRES_SDSTB (1 << 3) #define SDHCI_FSL_PRES_COMPAT_MASK 0x000f0f07 #define SDHCI_FSL_PROT_CTRL 0x28 #define SDHCI_FSL_PROT_CTRL_WIDTH_1BIT (0 << 1) #define SDHCI_FSL_PROT_CTRL_WIDTH_4BIT (1 << 1) #define SDHCI_FSL_PROT_CTRL_WIDTH_8BIT (2 << 1) #define SDHCI_FSL_PROT_CTRL_WIDTH_MASK (3 << 1) #define SDHCI_FSL_PROT_CTRL_BYTE_SWAP (0 << 4) #define SDHCI_FSL_PROT_CTRL_BYTE_NATIVE (2 << 4) #define SDHCI_FSL_PROT_CTRL_BYTE_MASK (3 << 4) #define SDHCI_FSL_PROT_CTRL_DMA_MASK (3 << 8) #define SDHCI_FSL_PROT_CTRL_VOLT_SEL (1 << 10) #define SDHCI_FSL_IRQSTAT 0x30 #define SDHCI_FSL_IRQSTAT_BRR (1 << 5) #define SDHCI_FSL_IRQSTAT_CINTSEN (1 << 8) #define SDHCI_FSL_IRQSTAT_RTE (1 << 12) #define SDHCI_FSL_IRQSTAT_TNE (1 << 26) #define SDHCI_FSL_SYS_CTRL 0x2c #define SDHCI_FSL_CLK_IPGEN (1 << 0) #define SDHCI_FSL_CLK_SDCLKEN (1 << 3) #define SDHCI_FSL_CLK_DIVIDER_MASK 0x000000f0 #define SDHCI_FSL_CLK_DIVIDER_SHIFT 4 #define SDHCI_FSL_CLK_PRESCALE_MASK 0x0000ff00 #define SDHCI_FSL_CLK_PRESCALE_SHIFT 8 #define SDHCI_FSL_WTMK_LVL 0x44 #define SDHCI_FSL_WTMK_RD_512B (0 << 0) #define SDHCI_FSL_WTMK_WR_512B (0 << 15) #define SDHCI_FSL_AUTOCERR 0x3C #define SDHCI_FSL_AUTOCERR_UHMS_HS200 (3 << 16) #define SDHCI_FSL_AUTOCERR_UHMS (7 << 16) #define SDHCI_FSL_AUTOCERR_EXTN (1 << 22) #define SDHCI_FSL_AUTOCERR_SMPCLKSEL (1 << 23) #define SDHCI_FSL_AUTOCERR_UHMS_SHIFT 16 #define SDHCI_FSL_HOST_VERSION 0xfc #define SDHCI_FSL_VENDOR_V23 0x13 #define SDHCI_FSL_CAPABILITIES2 0x114 #define SDHCI_FSL_TBCTL 0x120 #define SDHCI_FSL_TBSTAT 0x124 #define SDHCI_FSL_TBCTL_TBEN (1 << 2) #define SDHCI_FSL_TBCTL_HS400_EN (1 << 4) #define SDHCI_FSL_TBCTL_SAMP_CMD_DQS (1 << 5) #define SDHCI_FSL_TBCTL_HS400_WND_ADJ (1 << 6) #define SDHCI_FSL_TBCTL_TB_MODE_MASK 0x3 #define SDHCI_FSL_TBCTL_MODE_1 0 #define SDHCI_FSL_TBCTL_MODE_2 1 #define SDHCI_FSL_TBCTL_MODE_3 2 #define SDHCI_FSL_TBCTL_MODE_SW 3 #define SDHCI_FSL_TBPTR 0x128 #define SDHCI_FSL_TBPTR_WND_START_SHIFT 8 #define SDHCI_FSL_TBPTR_WND_MASK 0x7F #define SDHCI_FSL_SDCLKCTL 0x144 #define SDHCI_FSL_SDCLKCTL_CMD_CLK_CTL (1 << 15) #define SDHCI_FSL_SDCLKCTL_LPBK_CLK_SEL (1 << 31) #define SDHCI_FSL_SDTIMINGCTL 0x148 #define SDHCI_FSL_SDTIMINGCTL_FLW_CTL (1 << 15) #define SDHCI_FSL_DLLCFG0 0x160 #define SDHCI_FSL_DLLCFG0_FREQ_SEL (1 << 27) #define SDHCI_FSL_DLLCFG0_RESET (1 << 30) #define SDHCI_FSL_DLLCFG0_EN (1 << 31) #define SDHCI_FSL_DLLCFG1 0x164 #define SDHCI_FSL_DLLCFG1_PULSE_STRETCH (1 << 31) #define SDHCI_FSL_DLLSTAT0 0x170 #define SDHCI_FSL_DLLSTAT0_SLV_STS (1 << 27) #define SDHCI_FSL_ESDHC_CTRL 0x40c #define SDHCI_FSL_ESDHC_CTRL_SNOOP (1 << 6) #define SDHCI_FSL_ESDHC_CTRL_FAF (1 << 18) #define SDHCI_FSL_ESDHC_CTRL_CLK_DIV2 (1 << 19) #define SCFG_SDHCIOVSELCR 0x408 #define SCFG_SDHCIOVSELCR_TGLEN (1 << 0) #define SCFG_SDHCIOVSELCR_VS (1 << 31) #define SCFG_SDHCIOVSELCR_VSELVAL_MASK (3 << 1) #define SCFG_SDHCIOVSELCR_VSELVAL_1_8 0x0 #define SCFG_SDHCIOVSELCR_VSELVAL_3_3 0x2 #define SDHCI_FSL_CAN_VDD_MASK \ (SDHCI_CAN_VDD_180 | SDHCI_CAN_VDD_300 | SDHCI_CAN_VDD_330) /* Some platforms do not detect pulse width correctly. */ #define SDHCI_FSL_UNRELIABLE_PULSE_DET (1 << 0) /* On some platforms switching voltage to 1.8V is not supported */ #define SDHCI_FSL_UNSUPP_1_8V (1 << 1) /* Hardware tuning can fail, fallback to SW tuning in that case. */ #define SDHCI_FSL_TUNING_ERRATUM_TYPE1 (1 << 2) /* * Pointer window might not be set properly on some platforms. * Check window and perform SW tuning. */ #define SDHCI_FSL_TUNING_ERRATUM_TYPE2 (1 << 3) /* * In HS400 mode only 4, 8, 12 clock dividers can be used. * Use the smallest value, bigger than requested in that case. */ #define SDHCI_FSL_HS400_LIMITED_CLK_DIV (1 << 4) /* * Some SoCs don't have a fixed regulator. Switching voltage * requires special routine including syscon registers. */ #define SDHCI_FSL_MISSING_VCCQ_REG (1 << 5) /* * HS400 tuning is done in HS200 mode, but it has to be done using * the target frequency. In order to apply the errata above we need to * know the target mode during tuning procedure. Use this flag for just that. */ #define SDHCI_FSL_HS400_FLAG (1 << 0) #define SDHCI_FSL_MAX_RETRIES 20000 /* DELAY(10) * this = 200ms */ struct sdhci_fsl_fdt_softc { device_t dev; const struct sdhci_fsl_fdt_soc_data *soc_data; struct resource *mem_res; struct resource *irq_res; void *irq_cookie; uint32_t baseclk_hz; uint32_t maxclk_hz; struct sdhci_fdt_gpio *gpio; struct sdhci_slot slot; bool slot_init_done; uint32_t cmd_and_mode; uint16_t sdclk_bits; struct mmc_helper fdt_helper; uint32_t div_ratio; uint8_t vendor_ver; uint32_t flags; uint32_t (* read)(struct sdhci_fsl_fdt_softc *, bus_size_t); void (* write)(struct sdhci_fsl_fdt_softc *, bus_size_t, uint32_t); }; struct sdhci_fsl_fdt_soc_data { int quirks; int baseclk_div; uint8_t errata; char *syscon_compat; }; static const struct sdhci_fsl_fdt_soc_data sdhci_fsl_fdt_ls1012a_soc_data = { .quirks = 0, .baseclk_div = 1, .errata = SDHCI_FSL_MISSING_VCCQ_REG | SDHCI_FSL_TUNING_ERRATUM_TYPE2, .syscon_compat = "fsl,ls1012a-scfg", }; static const struct sdhci_fsl_fdt_soc_data sdhci_fsl_fdt_ls1028a_soc_data = { .quirks = SDHCI_QUIRK_DONT_SET_HISPD_BIT | SDHCI_QUIRK_BROKEN_AUTO_STOP | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, .baseclk_div = 2, .errata = SDHCI_FSL_UNRELIABLE_PULSE_DET | SDHCI_FSL_HS400_LIMITED_CLK_DIV, }; static const struct sdhci_fsl_fdt_soc_data sdhci_fsl_fdt_ls1046a_soc_data = { .quirks = SDHCI_QUIRK_DONT_SET_HISPD_BIT | SDHCI_QUIRK_BROKEN_AUTO_STOP, .baseclk_div = 2, .errata = SDHCI_FSL_MISSING_VCCQ_REG | SDHCI_FSL_TUNING_ERRATUM_TYPE2, .syscon_compat = "fsl,ls1046a-scfg", }; static const struct sdhci_fsl_fdt_soc_data sdhci_fsl_fdt_lx2160a_soc_data = { .quirks = 0, .baseclk_div = 2, .errata = SDHCI_FSL_UNRELIABLE_PULSE_DET | SDHCI_FSL_HS400_LIMITED_CLK_DIV, }; static const struct sdhci_fsl_fdt_soc_data sdhci_fsl_fdt_gen_data = { .quirks = 0, .baseclk_div = 1, }; static const struct ofw_compat_data sdhci_fsl_fdt_compat_data[] = { {"fsl,ls1012a-esdhc", (uintptr_t)&sdhci_fsl_fdt_ls1012a_soc_data}, {"fsl,ls1028a-esdhc", (uintptr_t)&sdhci_fsl_fdt_ls1028a_soc_data}, {"fsl,ls1046a-esdhc", (uintptr_t)&sdhci_fsl_fdt_ls1046a_soc_data}, {"fsl,esdhc", (uintptr_t)&sdhci_fsl_fdt_gen_data}, {NULL, 0} }; static uint32_t read_be(struct sdhci_fsl_fdt_softc *sc, bus_size_t off) { return (be32toh(bus_read_4(sc->mem_res, off))); } static void write_be(struct sdhci_fsl_fdt_softc *sc, bus_size_t off, uint32_t val) { bus_write_4(sc->mem_res, off, htobe32(val)); } static uint32_t read_le(struct sdhci_fsl_fdt_softc *sc, bus_size_t off) { return (bus_read_4(sc->mem_res, off)); } static void write_le(struct sdhci_fsl_fdt_softc *sc, bus_size_t off, uint32_t val) { bus_write_4(sc->mem_res, off, val); } static uint16_t sdhci_fsl_fdt_get_clock(struct sdhci_fsl_fdt_softc *sc) { uint16_t val; val = sc->sdclk_bits | SDHCI_CLOCK_INT_EN; if (RD4(sc, SDHCI_FSL_PRES_STATE) & SDHCI_FSL_PRES_SDSTB) val |= SDHCI_CLOCK_INT_STABLE; if (RD4(sc, SDHCI_FSL_SYS_CTRL) & SDHCI_FSL_CLK_SDCLKEN) val |= SDHCI_CLOCK_CARD_EN; return (val); } /* * Calculate clock prescaler and divisor values based on the following formula: * `frequency = base clock / (prescaler * divisor)`. */ #define SDHCI_FSL_FDT_CLK_DIV(sc, base, freq, pre, div) \ do { \ (pre) = (sc)->vendor_ver < SDHCI_FSL_VENDOR_V23 ? 2 : 1;\ while ((freq) < (base) / ((pre) * 16) && (pre) < 256) \ (pre) <<= 1; \ /* div/pre can't both be set to 1, according to PM. */ \ (div) = ((pre) == 1 ? 2 : 1); \ while ((freq) < (base) / ((pre) * (div)) && (div) < 16) \ ++(div); \ } while (0) static void fsl_sdhc_fdt_set_clock(struct sdhci_fsl_fdt_softc *sc, struct sdhci_slot *slot, uint16_t val) { uint32_t prescale, div, val32, div_ratio; sc->sdclk_bits = val & SDHCI_DIVIDERS_MASK; val32 = RD4(sc, SDHCI_CLOCK_CONTROL); if ((val & SDHCI_CLOCK_CARD_EN) == 0) { WR4(sc, SDHCI_CLOCK_CONTROL, val32 & ~SDHCI_FSL_CLK_SDCLKEN); return; } /* * Ignore dividers provided by core in `sdhci_set_clock` and calculate * them anew with higher accuracy. */ SDHCI_FSL_FDT_CLK_DIV(sc, sc->baseclk_hz, slot->clock, prescale, div); div_ratio = prescale * div; /* * According to limited clock division erratum, clock dividers in hs400 * can be only 4, 8 or 12 */ if ((sc->soc_data->errata & SDHCI_FSL_HS400_LIMITED_CLK_DIV) && (sc->slot.host.ios.timing == bus_timing_mmc_hs400 || (sc->flags & SDHCI_FSL_HS400_FLAG))) { if (div_ratio <= 4) { prescale = 4; div = 1; } else if (div_ratio <= 8) { prescale = 4; div = 2; } else if (div_ratio <= 12) { prescale = 4; div = 3; } else { device_printf(sc->dev, "Unsupported clock divider.\n"); } } sc->div_ratio = prescale * div; if (bootverbose) device_printf(sc->dev, "Desired SD/MMC freq: %d, actual: %d; base %d prescale %d divisor %d\n", slot->clock, sc->baseclk_hz / (prescale * div), sc->baseclk_hz, prescale, div); prescale >>= 1; div -= 1; val32 &= ~(SDHCI_FSL_CLK_DIVIDER_MASK | SDHCI_FSL_CLK_PRESCALE_MASK); val32 |= div << SDHCI_FSL_CLK_DIVIDER_SHIFT; val32 |= prescale << SDHCI_FSL_CLK_PRESCALE_SHIFT; val32 |= SDHCI_FSL_CLK_IPGEN | SDHCI_FSL_CLK_SDCLKEN; WR4(sc, SDHCI_CLOCK_CONTROL, val32); } static uint8_t sdhci_fsl_fdt_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct sdhci_fsl_fdt_softc *sc; uint32_t wrk32, val32; sc = device_get_softc(dev); switch (off) { case SDHCI_HOST_CONTROL: wrk32 = RD4(sc, SDHCI_FSL_PROT_CTRL); val32 = wrk32 & (SDHCI_CTRL_LED | SDHCI_CTRL_CARD_DET | SDHCI_CTRL_FORCE_CARD); if (wrk32 & SDHCI_FSL_PROT_CTRL_WIDTH_4BIT) val32 |= SDHCI_CTRL_4BITBUS; else if (wrk32 & SDHCI_FSL_PROT_CTRL_WIDTH_8BIT) val32 |= SDHCI_CTRL_8BITBUS; return (val32); case SDHCI_POWER_CONTROL: return (SDHCI_POWER_ON | SDHCI_POWER_300); default: break; } return ((RD4(sc, off & ~3) >> (off & 3) * 8) & UINT8_MAX); } static uint16_t sdhci_fsl_fdt_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct sdhci_fsl_fdt_softc *sc; uint32_t val32; sc = device_get_softc(dev); switch (off) { case SDHCI_CLOCK_CONTROL: return (sdhci_fsl_fdt_get_clock(sc)); case SDHCI_HOST_VERSION: return (RD4(sc, SDHCI_FSL_HOST_VERSION) & UINT16_MAX); case SDHCI_TRANSFER_MODE: return (sc->cmd_and_mode & UINT16_MAX); case SDHCI_COMMAND_FLAGS: return (sc->cmd_and_mode >> 16); case SDHCI_SLOT_INT_STATUS: /* * eSDHC hardware manages only a single slot. * Synthesize a slot interrupt status register for slot 1 below. */ val32 = RD4(sc, SDHCI_INT_STATUS); val32 &= RD4(sc, SDHCI_SIGNAL_ENABLE); return (!!val32); default: return ((RD4(sc, off & ~3) >> (off & 3) * 8) & UINT16_MAX); } } static uint32_t sdhci_fsl_fdt_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct sdhci_fsl_fdt_softc *sc; uint32_t wrk32, val32; sc = device_get_softc(dev); if (off == SDHCI_BUFFER) return (bus_read_4(sc->mem_res, off)); if (off == SDHCI_CAPABILITIES2) off = SDHCI_FSL_CAPABILITIES2; val32 = RD4(sc, off); if (off == SDHCI_PRESENT_STATE) { wrk32 = val32; val32 &= SDHCI_FSL_PRES_COMPAT_MASK; val32 |= (wrk32 >> 4) & SDHCI_STATE_DAT_MASK; val32 |= (wrk32 << 1) & SDHCI_STATE_CMD; } return (val32); } static void sdhci_fsl_fdt_read_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct sdhci_fsl_fdt_softc *sc; sc = device_get_softc(dev); bus_read_multi_4(sc->mem_res, off, data, count); } static void sdhci_fsl_fdt_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint8_t val) { struct sdhci_fsl_fdt_softc *sc; uint32_t val32; sc = device_get_softc(dev); switch (off) { case SDHCI_HOST_CONTROL: val32 = RD4(sc, SDHCI_FSL_PROT_CTRL); val32 &= ~SDHCI_FSL_PROT_CTRL_WIDTH_MASK; val32 |= (val & SDHCI_CTRL_LED); if (val & SDHCI_CTRL_8BITBUS) val32 |= SDHCI_FSL_PROT_CTRL_WIDTH_8BIT; else /* Bus width is 1-bit when this flag is not set. */ val32 |= (val & SDHCI_CTRL_4BITBUS); /* Enable SDMA by masking out this field. */ val32 &= ~SDHCI_FSL_PROT_CTRL_DMA_MASK; val32 &= ~(SDHCI_CTRL_CARD_DET | SDHCI_CTRL_FORCE_CARD); val32 |= (val & (SDHCI_CTRL_CARD_DET | SDHCI_CTRL_FORCE_CARD)); WR4(sc, SDHCI_FSL_PROT_CTRL, val32); return; case SDHCI_POWER_CONTROL: return; default: val32 = RD4(sc, off & ~3); val32 &= ~(UINT8_MAX << (off & 3) * 8); val32 |= (val << (off & 3) * 8); WR4(sc, off & ~3, val32); return; } } static void sdhci_fsl_fdt_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint16_t val) { struct sdhci_fsl_fdt_softc *sc; uint32_t val32; sc = device_get_softc(dev); switch (off) { case SDHCI_CLOCK_CONTROL: fsl_sdhc_fdt_set_clock(sc, slot, val); return; /* * eSDHC hardware combines command and mode into a single * register. Cache it here, so that command isn't written * until after mode. */ case SDHCI_TRANSFER_MODE: sc->cmd_and_mode = val; return; case SDHCI_COMMAND_FLAGS: sc->cmd_and_mode = (sc->cmd_and_mode & UINT16_MAX) | (val << 16); WR4(sc, SDHCI_TRANSFER_MODE, sc->cmd_and_mode); sc->cmd_and_mode = 0; return; case SDHCI_HOST_CONTROL2: /* * Switching to HS400 requires a special procedure, * which is done in sdhci_fsl_fdt_set_uhs_timing. */ if ((val & SDHCI_CTRL2_UHS_MASK) == SDHCI_CTRL2_MMC_HS400) val &= ~SDHCI_CTRL2_MMC_HS400; default: val32 = RD4(sc, off & ~3); val32 &= ~(UINT16_MAX << (off & 3) * 8); val32 |= ((val & UINT16_MAX) << (off & 3) * 8); WR4(sc, off & ~3, val32); return; } } static void sdhci_fsl_fdt_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t val) { struct sdhci_fsl_fdt_softc *sc; sc = device_get_softc(dev); switch (off) { case SDHCI_BUFFER: bus_write_4(sc->mem_res, off, val); return; /* * eSDHC hardware lacks support for the SDMA buffer boundary * feature and instead generates SDHCI_INT_DMA_END interrupts * after each completed DMA data transfer. * Since this duplicates the SDHCI_INT_DATA_END functionality, * mask out the unneeded SDHCI_INT_DMA_END interrupt. */ case SDHCI_INT_ENABLE: case SDHCI_SIGNAL_ENABLE: val &= ~SDHCI_INT_DMA_END; /* FALLTHROUGH. */ default: WR4(sc, off, val); return; } } static void sdhci_fsl_fdt_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct sdhci_fsl_fdt_softc *sc; sc = device_get_softc(dev); bus_write_multi_4(sc->mem_res, off, data, count); } static void sdhci_fsl_fdt_irq(void *arg) { struct sdhci_fsl_fdt_softc *sc; sc = arg; sdhci_generic_intr(&sc->slot); return; } static int sdhci_fsl_fdt_update_ios(device_t brdev, device_t reqdev) { int err; struct sdhci_fsl_fdt_softc *sc; struct mmc_ios *ios; struct sdhci_slot *slot; err = sdhci_generic_update_ios(brdev, reqdev); if (err != 0) return (err); sc = device_get_softc(brdev); slot = device_get_ivars(reqdev); ios = &slot->host.ios; switch (ios->power_mode) { case power_on: break; case power_off: if (bootverbose) device_printf(sc->dev, "Powering down sd/mmc\n"); if (sc->fdt_helper.vmmc_supply) regulator_disable(sc->fdt_helper.vmmc_supply); if (sc->fdt_helper.vqmmc_supply) regulator_disable(sc->fdt_helper.vqmmc_supply); break; case power_up: if (bootverbose) device_printf(sc->dev, "Powering up sd/mmc\n"); if (sc->fdt_helper.vmmc_supply) regulator_enable(sc->fdt_helper.vmmc_supply); if (sc->fdt_helper.vqmmc_supply) regulator_enable(sc->fdt_helper.vqmmc_supply); break; }; return (0); } static int sdhci_fsl_fdt_switch_syscon_voltage(device_t dev, struct sdhci_fsl_fdt_softc *sc, enum mmc_vccq vccq) { struct syscon *syscon; phandle_t syscon_node; uint32_t reg; if (sc->soc_data->syscon_compat == NULL) { device_printf(dev, "Empty syscon compat string.\n"); return (ENXIO); } syscon_node = ofw_bus_find_compatible(OF_finddevice("/"), sc->soc_data->syscon_compat); if (syscon_get_by_ofw_node(dev, syscon_node, &syscon) != 0) { device_printf(dev, "Could not find syscon node.\n"); return (ENXIO); } reg = SYSCON_READ_4(syscon, SCFG_SDHCIOVSELCR); reg &= ~SCFG_SDHCIOVSELCR_VSELVAL_MASK; reg |= SCFG_SDHCIOVSELCR_TGLEN; switch (vccq) { case vccq_180: reg |= SCFG_SDHCIOVSELCR_VSELVAL_1_8; SYSCON_WRITE_4(syscon, SCFG_SDHCIOVSELCR, reg); DELAY(5000); reg = SYSCON_READ_4(syscon, SCFG_SDHCIOVSELCR); reg |= SCFG_SDHCIOVSELCR_VS; break; case vccq_330: reg |= SCFG_SDHCIOVSELCR_VSELVAL_3_3; SYSCON_WRITE_4(syscon, SCFG_SDHCIOVSELCR, reg); DELAY(5000); reg = SYSCON_READ_4(syscon, SCFG_SDHCIOVSELCR); reg &= ~SCFG_SDHCIOVSELCR_VS; break; default: device_printf(dev, "Unsupported voltage requested.\n"); return (ENXIO); } SYSCON_WRITE_4(syscon, SCFG_SDHCIOVSELCR, reg); return (0); } static int sdhci_fsl_fdt_switch_vccq(device_t brdev, device_t reqdev) { struct sdhci_fsl_fdt_softc *sc; struct sdhci_slot *slot; regulator_t vqmmc_supply; uint32_t val_old, val; int uvolt, err = 0; sc = device_get_softc(brdev); slot = device_get_ivars(reqdev); val_old = val = RD4(sc, SDHCI_FSL_PROT_CTRL); switch (slot->host.ios.vccq) { case vccq_180: if (sc->soc_data->errata & SDHCI_FSL_UNSUPP_1_8V) return (EOPNOTSUPP); val |= SDHCI_FSL_PROT_CTRL_VOLT_SEL; uvolt = 1800000; break; case vccq_330: val &= ~SDHCI_FSL_PROT_CTRL_VOLT_SEL; uvolt = 3300000; break; default: return (EOPNOTSUPP); } WR4(sc, SDHCI_FSL_PROT_CTRL, val); if (sc->soc_data->errata & SDHCI_FSL_MISSING_VCCQ_REG) { err = sdhci_fsl_fdt_switch_syscon_voltage(brdev, sc, slot->host.ios.vccq); if (err != 0) goto vccq_fail; } vqmmc_supply = sc->fdt_helper.vqmmc_supply; /* * Even though we expect to find a fixed regulator in this controller * family, let's play safe. */ if (vqmmc_supply != NULL) { err = regulator_set_voltage(vqmmc_supply, uvolt, uvolt); if (err != 0) goto vccq_fail; } return (0); vccq_fail: device_printf(sc->dev, "Cannot set vqmmc to %d<->%d\n", uvolt, uvolt); WR4(sc, SDHCI_FSL_PROT_CTRL, val_old); return (err); } static int sdhci_fsl_fdt_get_ro(device_t bus, device_t child) { struct sdhci_fsl_fdt_softc *sc; sc = device_get_softc(bus); return (sdhci_fdt_gpio_get_readonly(sc->gpio)); } static bool sdhci_fsl_fdt_get_card_present(device_t dev, struct sdhci_slot *slot) { struct sdhci_fsl_fdt_softc *sc; sc = device_get_softc(dev); return (sdhci_fdt_gpio_get_present(sc->gpio)); } static uint32_t sdhci_fsl_fdt_vddrange_to_mask(device_t dev, uint32_t *vdd_ranges, int len) { uint32_t vdd_min, vdd_max; uint32_t vdd_mask = 0; int i; /* Ranges are organized as pairs of values. */ if ((len % 2) != 0) { device_printf(dev, "Invalid voltage range\n"); return (0); } len = len / 2; for (i = 0; i < len; i++) { vdd_min = vdd_ranges[2 * i]; vdd_max = vdd_ranges[2 * i + 1]; if (vdd_min > vdd_max || vdd_min < 1650 || vdd_min > 3600 || vdd_max < 1650 || vdd_max > 3600) { device_printf(dev, "Voltage range %d - %d is out of bounds\n", vdd_min, vdd_max); return (0); } if (vdd_min <= 1800 && vdd_max >= 1800) vdd_mask |= SDHCI_CAN_VDD_180; if (vdd_min <= 3000 && vdd_max >= 3000) vdd_mask |= SDHCI_CAN_VDD_300; if (vdd_min <= 3300 && vdd_max >= 3300) vdd_mask |= SDHCI_CAN_VDD_330; } return (vdd_mask); } static void sdhci_fsl_fdt_of_parse(device_t dev) { struct sdhci_fsl_fdt_softc *sc; phandle_t node; pcell_t *voltage_ranges; uint32_t vdd_mask = 0; ssize_t num_ranges; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); /* Call mmc_fdt_parse in order to get mmc related properties. */ mmc_fdt_parse(dev, node, &sc->fdt_helper, &sc->slot.host); sc->slot.caps = sdhci_fsl_fdt_read_4(dev, &sc->slot, SDHCI_CAPABILITIES) & ~(SDHCI_CAN_DO_SUSPEND); sc->slot.caps2 = sdhci_fsl_fdt_read_4(dev, &sc->slot, SDHCI_CAPABILITIES2); /* Parse the "voltage-ranges" dts property. */ num_ranges = OF_getencprop_alloc(node, "voltage-ranges", (void **) &voltage_ranges); if (num_ranges <= 0) return; vdd_mask = sdhci_fsl_fdt_vddrange_to_mask(dev, voltage_ranges, num_ranges / sizeof(uint32_t)); OF_prop_free(voltage_ranges); /* Overwrite voltage caps only if we got something from dts. */ if (vdd_mask != 0 && (vdd_mask != (sc->slot.caps & SDHCI_FSL_CAN_VDD_MASK))) { sc->slot.caps &= ~(SDHCI_FSL_CAN_VDD_MASK); sc->slot.caps |= vdd_mask; sc->slot.quirks |= SDHCI_QUIRK_MISSING_CAPS; } } static int sdhci_fsl_poll_register(struct sdhci_fsl_fdt_softc *sc, uint32_t reg, uint32_t mask, int value) { int retries; retries = SDHCI_FSL_MAX_RETRIES; while ((RD4(sc, reg) & mask) != value) { if (!retries--) return (ENXIO); DELAY(10); } return (0); } static int sdhci_fsl_fdt_attach(device_t dev) { struct sdhci_fsl_fdt_softc *sc; struct mmc_host *host; uint32_t val, buf_order; uintptr_t ocd_data; uint64_t clk_hz; phandle_t node; int rid, ret; clk_t clk; node = ofw_bus_get_node(dev); sc = device_get_softc(dev); ocd_data = ofw_bus_search_compatible(dev, sdhci_fsl_fdt_compat_data)->ocd_data; sc->dev = dev; sc->flags = 0; host = &sc->slot.host; rid = 0; /* * LX2160A needs its own soc_data in order to apply SoC * specific quriks. Since the controller is identified * only with a generic compatible string we need to do this dance here. */ if (ofw_bus_node_is_compatible(OF_finddevice("/"), "fsl,lx2160a")) sc->soc_data = &sdhci_fsl_fdt_lx2160a_soc_data; else sc->soc_data = (struct sdhci_fsl_fdt_soc_data *)ocd_data; sc->slot.quirks = sc->soc_data->quirks; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Could not allocate resources for controller\n"); return (ENOMEM); } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Could not allocate irq resources for controller\n"); ret = ENOMEM; goto err_free_mem; } ret = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, sdhci_fsl_fdt_irq, sc, &sc->irq_cookie); if (ret != 0) { device_printf(dev, "Could not setup IRQ handler\n"); goto err_free_irq_res; } ret = clk_get_by_ofw_index(dev, node, 0, &clk); if (ret != 0) { device_printf(dev, "Parent clock not found\n"); goto err_free_irq; } ret = clk_get_freq(clk, &clk_hz); if (ret != 0) { device_printf(dev, "Could not get parent clock frequency\n"); goto err_free_irq; } sc->baseclk_hz = clk_hz / sc->soc_data->baseclk_div; /* Figure out eSDHC block endianness before we touch any HW regs. */ if (OF_hasprop(node, "little-endian")) { sc->read = read_le; sc->write = write_le; buf_order = SDHCI_FSL_PROT_CTRL_BYTE_NATIVE; } else { sc->read = read_be; sc->write = write_be; buf_order = SDHCI_FSL_PROT_CTRL_BYTE_SWAP; } sc->vendor_ver = (RD4(sc, SDHCI_FSL_HOST_VERSION) & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; sdhci_fsl_fdt_of_parse(dev); sc->maxclk_hz = host->f_max ? host->f_max : sc->baseclk_hz; /* * Setting this register affects byte order in SDHCI_BUFFER only. * If the eSDHC block is connected over a big-endian bus, the data * read from/written to the buffer will be already byte swapped. * In such a case, setting SDHCI_FSL_PROT_CTRL_BYTE_SWAP will convert * the byte order again, resulting in a native byte order. * The read/write callbacks accommodate for this behavior. */ val = RD4(sc, SDHCI_FSL_PROT_CTRL); val &= ~SDHCI_FSL_PROT_CTRL_BYTE_MASK; WR4(sc, SDHCI_FSL_PROT_CTRL, val | buf_order); /* * Gate the SD clock and set its source to * peripheral clock / baseclk_div. The frequency in baseclk_hz is set * to match this. */ val = RD4(sc, SDHCI_CLOCK_CONTROL); WR4(sc, SDHCI_CLOCK_CONTROL, val & ~SDHCI_FSL_CLK_SDCLKEN); val = RD4(sc, SDHCI_FSL_ESDHC_CTRL); WR4(sc, SDHCI_FSL_ESDHC_CTRL, val | SDHCI_FSL_ESDHC_CTRL_CLK_DIV2); sc->slot.max_clk = sc->maxclk_hz; sc->gpio = sdhci_fdt_gpio_setup(dev, &sc->slot); /* * Set the buffer watermark level to 128 words (512 bytes) for both * read and write. The hardware has a restriction that when the read or * write ready status is asserted, that means you can read exactly the * number of words set in the watermark register before you have to * re-check the status and potentially wait for more data. The main * sdhci driver provides no hook for doing status checking on less than * a full block boundary, so we set the watermark level to be a full * block. Reads and writes where the block size is less than the * watermark size will work correctly too, no need to change the * watermark for different size blocks. However, 128 is the maximum * allowed for the watermark, so PIO is limitted to 512 byte blocks. */ WR4(sc, SDHCI_FSL_WTMK_LVL, SDHCI_FSL_WTMK_WR_512B | SDHCI_FSL_WTMK_RD_512B); ret = sdhci_init_slot(dev, &sc->slot, 0); if (ret != 0) goto err_free_gpio; sc->slot_init_done = true; sdhci_start_slot(&sc->slot); return (bus_generic_attach(dev)); err_free_gpio: sdhci_fdt_gpio_teardown(sc->gpio); err_free_irq: bus_teardown_intr(dev, sc->irq_res, sc->irq_cookie); err_free_irq_res: bus_free_resource(dev, SYS_RES_IRQ, sc->irq_res); err_free_mem: bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res); return (ret); } static int sdhci_fsl_fdt_detach(device_t dev) { struct sdhci_fsl_fdt_softc *sc; sc = device_get_softc(dev); if (sc->slot_init_done) sdhci_cleanup_slot(&sc->slot); if (sc->gpio != NULL) sdhci_fdt_gpio_teardown(sc->gpio); if (sc->irq_cookie != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_cookie); if (sc->irq_res != NULL) bus_free_resource(dev, SYS_RES_IRQ, sc->irq_res); if (sc->mem_res != NULL) bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res); return (0); } static int sdhci_fsl_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, sdhci_fsl_fdt_compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "NXP QorIQ Layerscape eSDHC controller"); return (BUS_PROBE_DEFAULT); } static int sdhci_fsl_fdt_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct sdhci_slot *slot = device_get_ivars(child); if (which == MMCBR_IVAR_MAX_DATA && (slot->opt & SDHCI_HAVE_DMA)) { /* * In the absence of SDMA buffer boundary functionality, * limit the maximum data length per read/write command * to bounce buffer size. */ *result = howmany(slot->sdma_bbufsz, 512); return (0); } return (sdhci_generic_read_ivar(bus, child, which, result)); } static int sdhci_fsl_fdt_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct sdhci_fsl_fdt_softc *sc; struct sdhci_slot *slot = device_get_ivars(child); uint32_t prescale, div; /* Don't depend on clock resolution limits from sdhci core. */ if (which == MMCBR_IVAR_CLOCK) { if (value == 0) { slot->host.ios.clock = 0; return (0); } sc = device_get_softc(bus); SDHCI_FSL_FDT_CLK_DIV(sc, sc->baseclk_hz, value, prescale, div); slot->host.ios.clock = sc->baseclk_hz / (prescale * div); return (0); } return (sdhci_generic_write_ivar(bus, child, which, value)); } static void sdhci_fsl_fdt_reset(device_t dev, struct sdhci_slot *slot, uint8_t mask) { struct sdhci_fsl_fdt_softc *sc; uint32_t val; sdhci_generic_reset(dev, slot, mask); if (!(mask & SDHCI_RESET_ALL)) return; sc = device_get_softc(dev); /* Some registers have to be cleared by hand. */ if (slot->version >= SDHCI_SPEC_300) { val = RD4(sc, SDHCI_FSL_TBCTL); val &= ~SDHCI_FSL_TBCTL_TBEN; WR4(sc, SDHCI_FSL_TBCTL, val); } /* * Pulse width detection is not reliable on some boards. Perform * workaround by clearing register's bit according to errata. */ if (sc->soc_data->errata & SDHCI_FSL_UNRELIABLE_PULSE_DET) { val = RD4(sc, SDHCI_FSL_DLLCFG1); val &= ~SDHCI_FSL_DLLCFG1_PULSE_STRETCH; WR4(sc, SDHCI_FSL_DLLCFG1, val); } sc->flags = 0; } static void sdhci_fsl_switch_tuning_block(device_t dev, bool enable) { struct sdhci_fsl_fdt_softc *sc; uint32_t reg; sc = device_get_softc(dev); reg = RD4(sc, SDHCI_FSL_TBCTL); if (enable) reg |= SDHCI_FSL_TBCTL_TBEN; else reg &= ~SDHCI_FSL_TBCTL_TBEN; WR4(sc, SDHCI_FSL_TBCTL, reg); } static int sdhci_fsl_sw_tuning(struct sdhci_fsl_fdt_softc *sc, device_t bus, device_t child, bool hs400, uint32_t wnd_start, uint32_t wnd_end) { uint32_t reg; int error; if (sc->soc_data->errata & SDHCI_FSL_TUNING_ERRATUM_TYPE1 || abs(wnd_start - wnd_end) <= (4 * sc->div_ratio + 2)) { wnd_start = 5 * sc->div_ratio; wnd_end = 3 * sc->div_ratio; } else { wnd_start = 8 * sc->div_ratio; wnd_end = 4 * sc->div_ratio; } reg = RD4(sc, SDHCI_FSL_TBPTR); reg &= ~SDHCI_FSL_TBPTR_WND_MASK; reg &= ~(SDHCI_FSL_TBPTR_WND_MASK << SDHCI_FSL_TBPTR_WND_START_SHIFT); reg |= wnd_start << SDHCI_FSL_TBPTR_WND_START_SHIFT; reg |= wnd_end; WR4(sc, SDHCI_FSL_TBPTR, reg); /* * Normally those are supposed to be set in sdhci_execute_tuning. * However in our case we need a small delay between setting the two. */ reg = RD4(sc, SDHCI_FSL_AUTOCERR); reg |= SDHCI_FSL_AUTOCERR_EXTN; WR4(sc, SDHCI_FSL_AUTOCERR, reg); DELAY(10); reg |= SDHCI_FSL_AUTOCERR_SMPCLKSEL; WR4(sc, SDHCI_FSL_AUTOCERR, reg); reg = RD4(sc, SDHCI_FSL_TBCTL); reg &= ~SDHCI_FSL_TBCTL_TB_MODE_MASK; reg |= SDHCI_FSL_TBCTL_MODE_SW; WR4(sc, SDHCI_FSL_TBCTL, reg); error = sdhci_generic_tune(bus, child, hs400); if (error != 0) { device_printf(bus, "Failed to execute generic tune while performing software tuning.\n"); } return (error); } static int sdhci_fsl_fdt_tune(device_t bus, device_t child, bool hs400) { struct sdhci_fsl_fdt_softc *sc; uint32_t wnd_start, wnd_end; uint32_t clk_divider, reg; struct sdhci_slot *slot; int error; sc = device_get_softc(bus); slot = device_get_ivars(child); if (sc->slot.host.ios.timing == bus_timing_uhs_sdr50 && !(slot->opt & SDHCI_SDR50_NEEDS_TUNING)) return (0); /* * For tuning mode SD clock divider must be within 3 to 16. * We also need to match the frequency to whatever mode is used. * For that reason we're just bailing if the dividers don't match * that requirement. */ clk_divider = sc->baseclk_hz / slot->clock; if (clk_divider < 3 || clk_divider > 16) return (ENXIO); if (hs400) sc->flags |= SDHCI_FSL_HS400_FLAG; /* Disable clock. */ fsl_sdhc_fdt_set_clock(sc, slot, sc->sdclk_bits); /* Wait for PRSSTAT[SDSTB] to be set by hardware. */ error = sdhci_fsl_poll_register(sc, SDHCI_FSL_PRES_STATE, SDHCI_FSL_PRES_SDSTB, SDHCI_FSL_PRES_SDSTB); if (error != 0) device_printf(bus, "Timeout while waiting for clock to stabilize.\n"); /* Flush async IO. */ reg = RD4(sc, SDHCI_FSL_ESDHC_CTRL); reg |= SDHCI_FSL_ESDHC_CTRL_FAF; WR4(sc, SDHCI_FSL_ESDHC_CTRL, reg); /* Wait for ESDHC[FAF] to be cleared by hardware. */ error = sdhci_fsl_poll_register(sc, SDHCI_FSL_ESDHC_CTRL, SDHCI_FSL_ESDHC_CTRL_FAF, 0); if (error) device_printf(bus, "Timeout while waiting for hardware.\n"); /* * Set TBCTL[TB_EN] register and program valid tuning mode. * According to RM MODE_3 means that: * "eSDHC takes care of the re-tuning during data transfer * (auto re-tuning).". * Tuning mode can only be changed while the clock is disabled. */ reg = RD4(sc, SDHCI_FSL_TBCTL); reg &= ~SDHCI_FSL_TBCTL_TB_MODE_MASK; reg |= SDHCI_FSL_TBCTL_TBEN | SDHCI_FSL_TBCTL_MODE_3; WR4(sc, SDHCI_FSL_TBCTL, reg); /* Enable clock. */ fsl_sdhc_fdt_set_clock(sc, slot, SDHCI_CLOCK_CARD_EN | sc->sdclk_bits); /* Wait for clock to stabilize. */ error = sdhci_fsl_poll_register(sc, SDHCI_FSL_PRES_STATE, SDHCI_FSL_PRES_SDSTB, SDHCI_FSL_PRES_SDSTB); if (error) device_printf(bus, "Timeout while waiting for clock to stabilize.\n"); /* Perform hardware tuning. */ error = sdhci_generic_tune(bus, child, hs400); reg = RD4(sc, SDHCI_FSL_TBPTR); wnd_start = reg >> SDHCI_FSL_TBPTR_WND_START_SHIFT; wnd_start &= SDHCI_FSL_TBPTR_WND_MASK; wnd_end = reg & SDHCI_FSL_TBPTR_WND_MASK; /* * For erratum type2 affected platforms, the controller can erroneously * declare that the tuning was successful. Verify the tuning window to * make sure that we're fine. */ if (error == 0 && sc->soc_data->errata & SDHCI_FSL_TUNING_ERRATUM_TYPE2 && abs(wnd_start - wnd_end) > (4 * sc->div_ratio + 2)) { error = EIO; } /* If hardware tuning failed, try software tuning. */ if (error != 0 && (sc->soc_data->errata & (SDHCI_FSL_TUNING_ERRATUM_TYPE1 | SDHCI_FSL_TUNING_ERRATUM_TYPE2))) { error = sdhci_fsl_sw_tuning(sc, bus, child, hs400, wnd_start, wnd_end); if (error != 0) device_printf(bus, "Software tuning failed.\n"); } if (error != 0) { sdhci_fsl_switch_tuning_block(bus, false); return (error); } if (hs400) { reg = RD4(sc, SDHCI_FSL_SDTIMINGCTL); reg |= SDHCI_FSL_SDTIMINGCTL_FLW_CTL; WR4(sc, SDHCI_FSL_SDTIMINGCTL, reg); } return (0); } static int sdhci_fsl_fdt_retune(device_t bus, device_t child, bool reset) { struct sdhci_slot *slot; struct sdhci_fsl_fdt_softc *sc; slot = device_get_ivars(child); sc = device_get_softc(bus); if (!(slot->opt & SDHCI_TUNING_ENABLED)) return (0); /* HS400 must be tuned in HS200 mode. */ if (slot->host.ios.timing == bus_timing_mmc_hs400) return (EINVAL); /* * Only re-tuning with full reset is supported. * The controller is normally put in "mode 3", which means that * periodic re-tuning is done automatically. See comment in * sdhci_fsl_fdt_tune for details. * Because of that re-tuning should only be triggered as a result * of a CRC error. */ if (!reset) return (ENOTSUP); return (sdhci_fsl_fdt_tune(bus, child, sc->flags & SDHCI_FSL_HS400_FLAG)); } static void sdhci_fsl_disable_hs400_mode(device_t dev, struct sdhci_fsl_fdt_softc *sc) { uint32_t reg; int error; /* Check if HS400 is enabled right now. */ reg = RD4(sc, SDHCI_FSL_TBCTL); if ((reg & SDHCI_FSL_TBCTL_HS400_EN) == 0) return; reg = RD4(sc, SDHCI_FSL_SDTIMINGCTL); reg &= ~SDHCI_FSL_SDTIMINGCTL_FLW_CTL; WR4(sc, SDHCI_FSL_SDTIMINGCTL, reg); reg = RD4(sc, SDHCI_FSL_SDCLKCTL); reg &= ~SDHCI_FSL_SDCLKCTL_CMD_CLK_CTL; WR4(sc, SDHCI_FSL_SDCLKCTL, reg); fsl_sdhc_fdt_set_clock(sc, &sc->slot, sc->sdclk_bits); error = sdhci_fsl_poll_register(sc, SDHCI_FSL_PRES_STATE, SDHCI_FSL_PRES_SDSTB, SDHCI_FSL_PRES_SDSTB); if (error != 0) device_printf(dev, "Internal clock never stabilized.\n"); reg = RD4(sc, SDHCI_FSL_TBCTL); reg &= ~SDHCI_FSL_TBCTL_HS400_EN; WR4(sc, SDHCI_FSL_TBCTL, reg); fsl_sdhc_fdt_set_clock(sc, &sc->slot, SDHCI_CLOCK_CARD_EN | sc->sdclk_bits); error = sdhci_fsl_poll_register(sc, SDHCI_FSL_PRES_STATE, SDHCI_FSL_PRES_SDSTB, SDHCI_FSL_PRES_SDSTB); if (error != 0) device_printf(dev, "Internal clock never stabilized.\n"); reg = RD4(sc, SDHCI_FSL_DLLCFG0); reg &= ~(SDHCI_FSL_DLLCFG0_EN | SDHCI_FSL_DLLCFG0_FREQ_SEL); WR4(sc, SDHCI_FSL_DLLCFG0, reg); reg = RD4(sc, SDHCI_FSL_TBCTL); reg &= ~SDHCI_FSL_TBCTL_HS400_WND_ADJ; WR4(sc, SDHCI_FSL_TBCTL, reg); sdhci_fsl_switch_tuning_block(dev, false); } static void sdhci_fsl_enable_hs400_mode(device_t dev, struct sdhci_slot *slot, struct sdhci_fsl_fdt_softc *sc) { uint32_t reg; int error; sdhci_fsl_switch_tuning_block(dev, true); fsl_sdhc_fdt_set_clock(sc, slot, sc->sdclk_bits); error = sdhci_fsl_poll_register(sc, SDHCI_FSL_PRES_STATE, SDHCI_FSL_PRES_SDSTB, SDHCI_FSL_PRES_SDSTB); if (error != 0) device_printf(dev, "Timeout while waiting for clock to stabilize.\n"); reg = RD4(sc, SDHCI_FSL_TBCTL); reg |= SDHCI_FSL_TBCTL_HS400_EN; WR4(sc, SDHCI_FSL_TBCTL, reg); reg = RD4(sc, SDHCI_FSL_SDCLKCTL); reg |= SDHCI_FSL_SDCLKCTL_CMD_CLK_CTL; WR4(sc, SDHCI_FSL_SDCLKCTL, reg); fsl_sdhc_fdt_set_clock(sc, slot, SDHCI_CLOCK_CARD_EN | sc->sdclk_bits); error = sdhci_fsl_poll_register(sc, SDHCI_FSL_PRES_STATE, SDHCI_FSL_PRES_SDSTB, SDHCI_FSL_PRES_SDSTB); if (error != 0) device_printf(dev, "Timeout while waiting for clock to stabilize.\n"); reg = RD4(sc, SDHCI_FSL_DLLCFG0); reg |= SDHCI_FSL_DLLCFG0_EN | SDHCI_FSL_DLLCFG0_RESET | SDHCI_FSL_DLLCFG0_FREQ_SEL; WR4(sc, SDHCI_FSL_DLLCFG0, reg); /* * The reset bit is not a self clearing one. * Give it some time and clear it manually. */ DELAY(100); reg &= ~SDHCI_FSL_DLLCFG0_RESET; WR4(sc, SDHCI_FSL_DLLCFG0, reg); error = sdhci_fsl_poll_register(sc, SDHCI_FSL_DLLSTAT0, SDHCI_FSL_DLLSTAT0_SLV_STS, SDHCI_FSL_DLLSTAT0_SLV_STS); if (error != 0) device_printf(dev, "Timeout while waiting for DLL0.\n"); reg = RD4(sc, SDHCI_FSL_TBCTL); reg |= SDHCI_FSL_TBCTL_HS400_WND_ADJ; WR4(sc, SDHCI_FSL_TBCTL, reg); fsl_sdhc_fdt_set_clock(sc, slot, sc->sdclk_bits); error = sdhci_fsl_poll_register(sc, SDHCI_FSL_PRES_STATE, SDHCI_FSL_PRES_SDSTB, SDHCI_FSL_PRES_SDSTB); if (error != 0) device_printf(dev, "timeout while waiting for clock to stabilize.\n"); reg = RD4(sc, SDHCI_FSL_ESDHC_CTRL); reg |= SDHCI_FSL_ESDHC_CTRL_FAF; WR4(sc, SDHCI_FSL_ESDHC_CTRL, reg); error = sdhci_fsl_poll_register(sc, SDHCI_FSL_ESDHC_CTRL, SDHCI_FSL_ESDHC_CTRL_FAF, 0); if (error != 0) device_printf(dev, "Timeout while waiting for hardware.\n"); fsl_sdhc_fdt_set_clock(sc, slot, SDHCI_CLOCK_CARD_EN | sc->sdclk_bits); error = sdhci_fsl_poll_register(sc, SDHCI_FSL_PRES_STATE, SDHCI_FSL_PRES_SDSTB, SDHCI_FSL_PRES_SDSTB); if (error != 0) device_printf(dev, "Timeout while waiting for clock to stabilize.\n"); } static void sdhci_fsl_fdt_set_uhs_timing(device_t dev, struct sdhci_slot *slot) { struct sdhci_fsl_fdt_softc *sc; const struct mmc_ios *ios; uint32_t mode, reg; sc = device_get_softc(dev); ios = &slot->host.ios; mode = 0; /* * When we switch to HS400 this function is called twice. * First after the timing is set, and then after the clock * is changed to the target frequency. * The controller can be switched to HS400 only after the latter * is done. */ if (slot->host.ios.timing == bus_timing_mmc_hs400 && ios->clock > SD_SDR50_MAX) sdhci_fsl_enable_hs400_mode(dev, slot, sc); else if (slot->host.ios.timing < bus_timing_mmc_hs400) { sdhci_fsl_disable_hs400_mode(dev, sc); /* * Switching to HS400 requires a custom procedure executed in * sdhci_fsl_enable_hs400_mode in case above. * For all other modes we just need to set the corresponding flag. */ reg = RD4(sc, SDHCI_FSL_AUTOCERR); reg &= ~SDHCI_FSL_AUTOCERR_UHMS; if (ios->clock > SD_SDR50_MAX) mode = SDHCI_CTRL2_UHS_SDR104; else if (ios->clock > SD_SDR25_MAX) mode = SDHCI_CTRL2_UHS_SDR50; else if (ios->clock > SD_SDR12_MAX) { if (ios->timing == bus_timing_uhs_ddr50 || ios->timing == bus_timing_mmc_ddr52) mode = SDHCI_CTRL2_UHS_DDR50; else mode = SDHCI_CTRL2_UHS_SDR25; } else if (ios->clock > SD_MMC_CARD_ID_FREQUENCY) mode = SDHCI_CTRL2_UHS_SDR12; reg |= mode << SDHCI_FSL_AUTOCERR_UHMS_SHIFT; WR4(sc, SDHCI_FSL_AUTOCERR, reg); } } static const device_method_t sdhci_fsl_fdt_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, sdhci_fsl_fdt_probe), DEVMETHOD(device_attach, sdhci_fsl_fdt_attach), DEVMETHOD(device_detach, sdhci_fsl_fdt_detach), /* Bus interface. */ DEVMETHOD(bus_read_ivar, sdhci_fsl_fdt_read_ivar), DEVMETHOD(bus_write_ivar, sdhci_fsl_fdt_write_ivar), /* MMC bridge interface. */ DEVMETHOD(mmcbr_request, sdhci_generic_request), DEVMETHOD(mmcbr_get_ro, sdhci_fsl_fdt_get_ro), DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host), DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host), DEVMETHOD(mmcbr_switch_vccq, sdhci_fsl_fdt_switch_vccq), DEVMETHOD(mmcbr_update_ios, sdhci_fsl_fdt_update_ios), DEVMETHOD(mmcbr_tune, sdhci_fsl_fdt_tune), DEVMETHOD(mmcbr_retune, sdhci_fsl_fdt_retune), /* SDHCI accessors. */ DEVMETHOD(sdhci_read_1, sdhci_fsl_fdt_read_1), DEVMETHOD(sdhci_read_2, sdhci_fsl_fdt_read_2), DEVMETHOD(sdhci_read_4, sdhci_fsl_fdt_read_4), DEVMETHOD(sdhci_read_multi_4, sdhci_fsl_fdt_read_multi_4), DEVMETHOD(sdhci_write_1, sdhci_fsl_fdt_write_1), DEVMETHOD(sdhci_write_2, sdhci_fsl_fdt_write_2), DEVMETHOD(sdhci_write_4, sdhci_fsl_fdt_write_4), DEVMETHOD(sdhci_write_multi_4, sdhci_fsl_fdt_write_multi_4), DEVMETHOD(sdhci_get_card_present, sdhci_fsl_fdt_get_card_present), DEVMETHOD(sdhci_reset, sdhci_fsl_fdt_reset), DEVMETHOD(sdhci_set_uhs_timing, sdhci_fsl_fdt_set_uhs_timing), DEVMETHOD_END }; static driver_t sdhci_fsl_fdt_driver = { "sdhci_fsl_fdt", sdhci_fsl_fdt_methods, sizeof(struct sdhci_fsl_fdt_softc), }; DRIVER_MODULE(sdhci_fsl_fdt, simplebus, sdhci_fsl_fdt_driver, NULL, NULL); SDHCI_DEPEND(sdhci_fsl_fdt); #ifndef MMCCAM MMC_DECLARE_BRIDGE(sdhci_fsl_fdt); #endif diff --git a/sys/dev/spibus/controller/allwinner/aw_spi.c b/sys/dev/spibus/controller/allwinner/aw_spi.c index 3dcde0e4225e..fe6f5c21fcd7 100644 --- a/sys/dev/spibus/controller/allwinner/aw_spi.c +++ b/sys/dev/spibus/controller/allwinner/aw_spi.c @@ -1,610 +1,610 @@ /*- * Copyright (c) 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include "spibus_if.h" #define AW_SPI_GCR 0x04 /* Global Control Register */ #define AW_SPI_GCR_EN (1 << 0) /* ENable */ #define AW_SPI_GCR_MODE_MASTER (1 << 1) /* 1 = Master, 0 = Slave */ #define AW_SPI_GCR_TP_EN (1 << 7) /* 1 = Stop transmit when FIFO is full */ #define AW_SPI_GCR_SRST (1 << 31) /* Soft Reset */ #define AW_SPI_TCR 0x08 /* Transfer Control register */ #define AW_SPI_TCR_XCH (1 << 31) /* Initiate transfer */ #define AW_SPI_TCR_SDDM (1 << 14) /* Sending Delay Data Mode */ #define AW_SPI_TCR_SDM (1 << 13) /* Master Sample Data Mode */ #define AW_SPI_TCR_FBS (1 << 12) /* First Transmit Bit Select (1 == LSB) */ #define AW_SPI_TCR_SDC (1 << 11) /* Master Sample Data Control */ #define AW_SPI_TCR_RPSM (1 << 10) /* Rapid Mode Select */ #define AW_SPI_TCR_DDB (1 << 9) /* Dummy Burst Type */ #define AW_SPI_TCR_SSSEL_MASK 0x30 /* Chip select */ #define AW_SPI_TCR_SSSEL_SHIFT 4 #define AW_SPI_TCR_SS_LEVEL (1 << 7) /* 1 == CS High */ #define AW_SPI_TCR_SS_OWNER (1 << 6) /* 1 == Software controlled */ #define AW_SPI_TCR_SPOL (1 << 2) /* 1 == Active low */ #define AW_SPI_TCR_CPOL (1 << 1) /* 1 == Active low */ #define AW_SPI_TCR_CPHA (1 << 0) /* 1 == Phase 1 */ #define AW_SPI_IER 0x10 /* Interrupt Control Register */ #define AW_SPI_IER_SS (1 << 13) /* Chip select went from valid to invalid */ #define AW_SPI_IER_TC (1 << 12) /* Transfer complete */ #define AW_SPI_IER_TF_UDR (1 << 11) /* TXFIFO underrun */ #define AW_SPI_IER_TF_OVF (1 << 10) /* TXFIFO overrun */ #define AW_SPI_IER_RF_UDR (1 << 9) /* RXFIFO underrun */ #define AW_SPI_IER_RF_OVF (1 << 8) /* RXFIFO overrun */ #define AW_SPI_IER_TF_FULL (1 << 6) /* TXFIFO Full */ #define AW_SPI_IER_TF_EMP (1 << 5) /* TXFIFO Empty */ #define AW_SPI_IER_TF_ERQ (1 << 4) /* TXFIFO Empty Request */ #define AW_SPI_IER_RF_FULL (1 << 2) /* RXFIFO Full */ #define AW_SPI_IER_RF_EMP (1 << 1) /* RXFIFO Empty */ #define AW_SPI_IER_RF_RDY (1 << 0) /* RXFIFO Ready Request */ #define AW_SPI_ISR 0x14 /* Interrupt Status Register */ #define AW_SPI_FCR 0x18 /* FIFO Control Register */ #define AW_SPI_FCR_TX_RST (1 << 31) /* Reset TX FIFO */ #define AW_SPI_FCR_TX_TRIG_MASK 0xFF0000 /* TX FIFO Trigger level */ #define AW_SPI_FCR_TX_TRIG_SHIFT 16 #define AW_SPI_FCR_RX_RST (1 << 15) /* Reset RX FIFO */ #define AW_SPI_FCR_RX_TRIG_MASK 0xFF /* RX FIFO Trigger level */ #define AW_SPI_FCR_RX_TRIG_SHIFT 0 #define AW_SPI_FSR 0x1C /* FIFO Status Register */ #define AW_SPI_FSR_TB_WR (1 << 31) #define AW_SPI_FSR_TB_CNT_MASK 0x70000000 #define AW_SPI_FSR_TB_CNT_SHIFT 28 #define AW_SPI_FSR_TF_CNT_MASK 0xFF0000 #define AW_SPI_FSR_TF_CNT_SHIFT 16 #define AW_SPI_FSR_RB_WR (1 << 15) #define AW_SPI_FSR_RB_CNT_MASK 0x7000 #define AW_SPI_FSR_RB_CNT_SHIFT 12 #define AW_SPI_FSR_RF_CNT_MASK 0xFF #define AW_SPI_FSR_RF_CNT_SHIFT 0 #define AW_SPI_WCR 0x20 /* Wait Clock Counter Register */ #define AW_SPI_CCR 0x24 /* Clock Rate Control Register */ #define AW_SPI_CCR_DRS (1 << 12) /* Clock divider select */ #define AW_SPI_CCR_CDR1_MASK 0xF00 #define AW_SPI_CCR_CDR1_SHIFT 8 #define AW_SPI_CCR_CDR2_MASK 0xFF #define AW_SPI_CCR_CDR2_SHIFT 0 #define AW_SPI_MBC 0x30 /* Burst Counter Register */ #define AW_SPI_MTC 0x34 /* Transmit Counter Register */ #define AW_SPI_BCC 0x38 /* Burst Control Register */ #define AW_SPI_MDMA_CTL 0x88 /* Normal DMA Control Register */ #define AW_SPI_TXD 0x200 /* TX Data Register */ #define AW_SPI_RDX 0x300 /* RX Data Register */ #define AW_SPI_MAX_CS 4 #define AW_SPI_FIFO_SIZE 64 static struct ofw_compat_data compat_data[] = { { "allwinner,sun8i-h3-spi", 1 }, { NULL, 0 } }; static struct resource_spec aw_spi_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; struct aw_spi_softc { device_t dev; device_t spibus; struct resource *res[2]; struct mtx mtx; clk_t clk_ahb; clk_t clk_mod; uint64_t mod_freq; hwreset_t rst_ahb; void * intrhand; int transfer; uint8_t *rxbuf; uint32_t rxcnt; uint8_t *txbuf; uint32_t txcnt; uint32_t txlen; uint32_t rxlen; }; #define AW_SPI_LOCK(sc) mtx_lock(&(sc)->mtx) #define AW_SPI_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define AW_SPI_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) #define AW_SPI_READ_1(sc, reg) bus_read_1((sc)->res[0], (reg)) #define AW_SPI_WRITE_1(sc, reg, val) bus_write_1((sc)->res[0], (reg), (val)) #define AW_SPI_READ_4(sc, reg) bus_read_4((sc)->res[0], (reg)) #define AW_SPI_WRITE_4(sc, reg, val) bus_write_4((sc)->res[0], (reg), (val)) static int aw_spi_probe(device_t dev); static int aw_spi_attach(device_t dev); static int aw_spi_detach(device_t dev); static int aw_spi_intr(void *arg); static int aw_spi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Allwinner SPI"); return (BUS_PROBE_DEFAULT); } static int aw_spi_attach(device_t dev) { struct aw_spi_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); if (bus_alloc_resources(dev, aw_spi_spec, sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); error = ENXIO; goto fail; } if (bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE, aw_spi_intr, NULL, sc, &sc->intrhand)) { bus_release_resources(dev, aw_spi_spec, sc->res); device_printf(dev, "cannot setup interrupt handler\n"); return (ENXIO); } /* De-assert reset */ if (hwreset_get_by_ofw_idx(dev, 0, 0, &sc->rst_ahb) == 0) { error = hwreset_deassert(sc->rst_ahb); if (error != 0) { device_printf(dev, "cannot de-assert reset\n"); goto fail; } } /* Activate the module clock. */ error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->clk_ahb); if (error != 0) { device_printf(dev, "cannot get ahb clock\n"); goto fail; } error = clk_get_by_ofw_name(dev, 0, "mod", &sc->clk_mod); if (error != 0) { device_printf(dev, "cannot get mod clock\n"); goto fail; } error = clk_enable(sc->clk_ahb); if (error != 0) { device_printf(dev, "cannot enable ahb clock\n"); goto fail; } error = clk_enable(sc->clk_mod); if (error != 0) { device_printf(dev, "cannot enable mod clock\n"); goto fail; } sc->spibus = device_add_child(dev, "spibus", -1); return (bus_generic_attach(dev)); fail: aw_spi_detach(dev); return (error); } static int aw_spi_detach(device_t dev) { struct aw_spi_softc *sc; sc = device_get_softc(dev); bus_generic_detach(sc->dev); if (sc->spibus != NULL) device_delete_child(dev, sc->spibus); if (sc->clk_mod != NULL) clk_release(sc->clk_mod); if (sc->clk_ahb) clk_release(sc->clk_ahb); if (sc->rst_ahb) hwreset_assert(sc->rst_ahb); if (sc->intrhand != NULL) bus_teardown_intr(sc->dev, sc->res[1], sc->intrhand); bus_release_resources(dev, aw_spi_spec, sc->res); mtx_destroy(&sc->mtx); return (0); } static phandle_t aw_spi_get_node(device_t bus, device_t dev) { return ofw_bus_get_node(bus); } static void aw_spi_setup_mode(struct aw_spi_softc *sc, uint32_t mode) { uint32_t reg; /* We only support master mode */ reg = AW_SPI_READ_4(sc, AW_SPI_GCR); reg |= AW_SPI_GCR_MODE_MASTER; AW_SPI_WRITE_4(sc, AW_SPI_GCR, reg); /* Setup the modes */ reg = AW_SPI_READ_4(sc, AW_SPI_TCR); if (mode & SPIBUS_MODE_CPHA) reg |= AW_SPI_TCR_CPHA; if (mode & SPIBUS_MODE_CPOL) reg |= AW_SPI_TCR_CPOL; AW_SPI_WRITE_4(sc, AW_SPI_TCR, reg); } static void aw_spi_setup_cs(struct aw_spi_softc *sc, uint32_t cs, bool low) { uint32_t reg; /* Setup CS */ reg = AW_SPI_READ_4(sc, AW_SPI_TCR); reg &= ~(AW_SPI_TCR_SSSEL_MASK); reg |= cs << AW_SPI_TCR_SSSEL_SHIFT; reg |= AW_SPI_TCR_SS_OWNER; if (low) reg &= ~(AW_SPI_TCR_SS_LEVEL); else reg |= AW_SPI_TCR_SS_LEVEL; AW_SPI_WRITE_4(sc, AW_SPI_TCR, reg); } static uint64_t aw_spi_clock_test_cdr1(struct aw_spi_softc *sc, uint64_t clock, uint32_t *ccr) { uint64_t cur, best = 0; int i, max, best_div; max = AW_SPI_CCR_CDR1_MASK >> AW_SPI_CCR_CDR1_SHIFT; for (i = 0; i < max; i++) { cur = sc->mod_freq / (1 << i); if ((clock - cur) < (clock - best)) { best = cur; best_div = i; } } *ccr = (best_div << AW_SPI_CCR_CDR1_SHIFT); return (best); } static uint64_t aw_spi_clock_test_cdr2(struct aw_spi_softc *sc, uint64_t clock, uint32_t *ccr) { uint64_t cur, best = 0; int i, max, best_div; max = ((AW_SPI_CCR_CDR2_MASK) >> AW_SPI_CCR_CDR2_SHIFT); for (i = 0; i < max; i++) { cur = sc->mod_freq / (2 * i + 1); if ((clock - cur) < (clock - best)) { best = cur; best_div = i; } } *ccr = AW_SPI_CCR_DRS | (best_div << AW_SPI_CCR_CDR2_SHIFT); return (best); } static void aw_spi_setup_clock(struct aw_spi_softc *sc, uint64_t clock) { uint64_t best_ccr1, best_ccr2; uint32_t ccr, ccr1, ccr2; best_ccr1 = aw_spi_clock_test_cdr1(sc, clock, &ccr1); best_ccr2 = aw_spi_clock_test_cdr2(sc, clock, &ccr2); if (best_ccr1 == clock) { ccr = ccr1; } else if (best_ccr2 == clock) { ccr = ccr2; } else { if ((clock - best_ccr1) < (clock - best_ccr2)) ccr = ccr1; else ccr = ccr2; } AW_SPI_WRITE_4(sc, AW_SPI_CCR, ccr); } static inline void aw_spi_fill_txfifo(struct aw_spi_softc *sc) { uint32_t reg, txcnt; int i; if (sc->txcnt == sc->txlen) return; reg = AW_SPI_READ_4(sc, AW_SPI_FSR); reg &= AW_SPI_FSR_TF_CNT_MASK; txcnt = reg >> AW_SPI_FSR_TF_CNT_SHIFT; for (i = 0; i < (AW_SPI_FIFO_SIZE - txcnt); i++) { AW_SPI_WRITE_1(sc, AW_SPI_TXD, sc->txbuf[sc->txcnt++]); if (sc->txcnt == sc->txlen) break; } return; } static inline void aw_spi_read_rxfifo(struct aw_spi_softc *sc) { uint32_t reg; uint8_t val; int i; if (sc->rxcnt == sc->rxlen) return; reg = AW_SPI_READ_4(sc, AW_SPI_FSR); reg = (reg & AW_SPI_FSR_RF_CNT_MASK) >> AW_SPI_FSR_RF_CNT_SHIFT; for (i = 0; i < reg; i++) { val = AW_SPI_READ_1(sc, AW_SPI_RDX); if (sc->rxcnt < sc->rxlen) sc->rxbuf[sc->rxcnt++] = val; } } static int aw_spi_intr(void *arg) { struct aw_spi_softc *sc; uint32_t intr; sc = (struct aw_spi_softc *)arg; intr = AW_SPI_READ_4(sc, AW_SPI_ISR); if (intr & AW_SPI_IER_RF_RDY) aw_spi_read_rxfifo(sc); if (intr & AW_SPI_IER_TF_ERQ) { aw_spi_fill_txfifo(sc); /* * If we don't have anything else to write * disable TXFifo interrupts */ if (sc->txcnt == sc->txlen) AW_SPI_WRITE_4(sc, AW_SPI_IER, AW_SPI_IER_TC | AW_SPI_IER_RF_RDY); } if (intr & AW_SPI_IER_TC) { /* read the rest of the data from the fifo */ aw_spi_read_rxfifo(sc); /* Disable the interrupts */ AW_SPI_WRITE_4(sc, AW_SPI_IER, 0); sc->transfer = 0; wakeup(sc); } /* Clear Interrupts */ AW_SPI_WRITE_4(sc, AW_SPI_ISR, intr); return (intr != 0 ? FILTER_HANDLED : FILTER_STRAY); } static int aw_spi_xfer(struct aw_spi_softc *sc, void *rxbuf, void *txbuf, uint32_t txlen, uint32_t rxlen) { uint32_t reg; int error = 0, timeout; sc->rxbuf = rxbuf; sc->rxcnt = 0; sc->txbuf = txbuf; sc->txcnt = 0; sc->txlen = txlen; sc->rxlen = rxlen; /* Reset the FIFOs */ AW_SPI_WRITE_4(sc, AW_SPI_FCR, AW_SPI_FCR_TX_RST | AW_SPI_FCR_RX_RST); for (timeout = 1000; timeout > 0; timeout--) { reg = AW_SPI_READ_4(sc, AW_SPI_FCR); if (reg == 0) break; } if (timeout == 0) { device_printf(sc->dev, "Cannot reset the FIFOs\n"); return (EIO); } /* * Set the TX FIFO threshold to 3/4-th the size and * the RX FIFO one to 1/4-th. */ AW_SPI_WRITE_4(sc, AW_SPI_FCR, ((3 * AW_SPI_FIFO_SIZE / 4) << AW_SPI_FCR_TX_TRIG_SHIFT) | ((AW_SPI_FIFO_SIZE / 4) << AW_SPI_FCR_RX_TRIG_SHIFT)); /* Write the counters */ AW_SPI_WRITE_4(sc, AW_SPI_MBC, txlen); AW_SPI_WRITE_4(sc, AW_SPI_MTC, txlen); AW_SPI_WRITE_4(sc, AW_SPI_BCC, txlen); /* First fill */ aw_spi_fill_txfifo(sc); /* Start transmit */ reg = AW_SPI_READ_4(sc, AW_SPI_TCR); reg |= AW_SPI_TCR_XCH; AW_SPI_WRITE_4(sc, AW_SPI_TCR, reg); /* * Enable interrupts for : * Transmit complete * TX Fifo is below its trigger threshold * RX Fifo is above its trigger threshold */ AW_SPI_WRITE_4(sc, AW_SPI_IER, AW_SPI_IER_TC | AW_SPI_IER_TF_ERQ | AW_SPI_IER_RF_RDY); sc->transfer = 1; while (error == 0 && sc->transfer != 0) error = msleep(sc, &sc->mtx, 0, "aw_spi", 10 * hz); return (0); } static int aw_spi_transfer(device_t dev, device_t child, struct spi_command *cmd) { struct aw_spi_softc *sc; uint32_t cs, mode, clock, reg; int err = 0; sc = device_get_softc(dev); spibus_get_cs(child, &cs); spibus_get_clock(child, &clock); spibus_get_mode(child, &mode); /* The minimum divider is 2 so set the clock at twice the needed speed */ clk_set_freq(sc->clk_mod, 2 * clock, CLK_SET_ROUND_DOWN); clk_get_freq(sc->clk_mod, &sc->mod_freq); if (cs >= AW_SPI_MAX_CS) { device_printf(dev, "Invalid cs %d\n", cs); return (EINVAL); } mtx_lock(&sc->mtx); /* Enable and reset the module */ reg = AW_SPI_READ_4(sc, AW_SPI_GCR); reg |= AW_SPI_GCR_EN | AW_SPI_GCR_SRST; AW_SPI_WRITE_4(sc, AW_SPI_GCR, reg); /* Setup clock, CS and mode */ aw_spi_setup_clock(sc, clock); aw_spi_setup_mode(sc, mode); if (cs & SPIBUS_CS_HIGH) aw_spi_setup_cs(sc, cs, false); else aw_spi_setup_cs(sc, cs, true); /* xfer */ err = 0; if (cmd->tx_cmd_sz > 0) err = aw_spi_xfer(sc, cmd->rx_cmd, cmd->tx_cmd, cmd->tx_cmd_sz, cmd->rx_cmd_sz); if (cmd->tx_data_sz > 0 && err == 0) err = aw_spi_xfer(sc, cmd->rx_data, cmd->tx_data, cmd->tx_data_sz, cmd->rx_data_sz); if (cs & SPIBUS_CS_HIGH) aw_spi_setup_cs(sc, cs, true); else aw_spi_setup_cs(sc, cs, false); /* Disable the module */ reg = AW_SPI_READ_4(sc, AW_SPI_GCR); reg &= ~AW_SPI_GCR_EN; AW_SPI_WRITE_4(sc, AW_SPI_GCR, reg); mtx_unlock(&sc->mtx); return (err); } static device_method_t aw_spi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aw_spi_probe), DEVMETHOD(device_attach, aw_spi_attach), DEVMETHOD(device_detach, aw_spi_detach), /* spibus_if */ DEVMETHOD(spibus_transfer, aw_spi_transfer), /* ofw_bus_if */ DEVMETHOD(ofw_bus_get_node, aw_spi_get_node), DEVMETHOD_END }; static driver_t aw_spi_driver = { "aw_spi", aw_spi_methods, sizeof(struct aw_spi_softc), }; DRIVER_MODULE(aw_spi, simplebus, aw_spi_driver, 0, 0); DRIVER_MODULE(ofw_spibus, aw_spi, ofw_spibus_driver, 0, 0); MODULE_DEPEND(aw_spi, ofw_spibus, 1, 1, 1); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/dev/spibus/controller/rockchip/rk_spi.c b/sys/dev/spibus/controller/rockchip/rk_spi.c index c7a79c4e3cad..42f12e6ddaee 100644 --- a/sys/dev/spibus/controller/rockchip/rk_spi.c +++ b/sys/dev/spibus/controller/rockchip/rk_spi.c @@ -1,476 +1,476 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Oleksandr Tymoshenko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include "spibus_if.h" #define RK_SPI_CTRLR0 0x0000 #define CTRLR0_OPM_MASTER (0 << 20) #define CTRLR0_XFM_TR (0 << 18) #define CTRLR0_FRF_MOTO (0 << 16) #define CTRLR0_BHT_8BIT (1 << 13) #define CTRLR0_EM_BIG (1 << 11) #define CTRLR0_SSD_ONE (1 << 10) #define CTRLR0_SCPOL (1 << 7) #define CTRLR0_SCPH (1 << 6) #define CTRLR0_DFS_8BIT (1 << 0) #define RK_SPI_CTRLR1 0x0004 #define RK_SPI_ENR 0x0008 #define RK_SPI_SER 0x000c #define RK_SPI_BAUDR 0x0010 #define RK_SPI_TXFTLR 0x0014 #define RK_SPI_RXFTLR 0x0018 #define RK_SPI_TXFLR 0x001c #define RK_SPI_RXFLR 0x0020 #define RK_SPI_SR 0x0024 #define SR_BUSY (1 << 0) #define RK_SPI_IPR 0x0028 #define RK_SPI_IMR 0x002c #define IMR_RFFIM (1 << 4) #define IMR_TFEIM (1 << 0) #define RK_SPI_ISR 0x0030 #define ISR_RFFIS (1 << 4) #define ISR_TFEIS (1 << 0) #define RK_SPI_RISR 0x0034 #define RK_SPI_ICR 0x0038 #define RK_SPI_DMACR 0x003c #define RK_SPI_DMATDLR 0x0040 #define RK_SPI_DMARDLR 0x0044 #define RK_SPI_TXDR 0x0400 #define RK_SPI_RXDR 0x0800 #define CS_MAX 1 static struct ofw_compat_data compat_data[] = { { "rockchip,rk3328-spi", 1 }, { "rockchip,rk3399-spi", 1 }, { "rockchip,rk3568-spi", 1 }, { NULL, 0 } }; static struct resource_spec rk_spi_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; struct rk_spi_softc { device_t dev; device_t spibus; struct resource *res[2]; struct mtx mtx; clk_t clk_apb; clk_t clk_spi; void * intrhand; int transfer; uint32_t fifo_size; uint64_t max_freq; uint32_t intreg; uint8_t *rxbuf; uint32_t rxidx; uint8_t *txbuf; uint32_t txidx; uint32_t txlen; uint32_t rxlen; }; #define RK_SPI_LOCK(sc) mtx_lock(&(sc)->mtx) #define RK_SPI_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define RK_SPI_READ_4(sc, reg) bus_read_4((sc)->res[0], (reg)) #define RK_SPI_WRITE_4(sc, reg, val) bus_write_4((sc)->res[0], (reg), (val)) static int rk_spi_probe(device_t dev); static int rk_spi_attach(device_t dev); static int rk_spi_detach(device_t dev); static void rk_spi_intr(void *arg); static void rk_spi_enable_chip(struct rk_spi_softc *sc, int enable) { RK_SPI_WRITE_4(sc, RK_SPI_ENR, enable ? 1 : 0); } static int rk_spi_set_cs(struct rk_spi_softc *sc, uint32_t cs, bool active) { uint32_t reg; if (cs & SPIBUS_CS_HIGH) { device_printf(sc->dev, "SPIBUS_CS_HIGH is not supported\n"); return (EINVAL); } if (cs > CS_MAX) return (EINVAL); reg = RK_SPI_READ_4(sc, RK_SPI_SER); if (active) reg |= (1 << cs); else reg &= ~(1 << cs); RK_SPI_WRITE_4(sc, RK_SPI_SER, reg); return (0); } static void rk_spi_hw_setup(struct rk_spi_softc *sc, uint32_t mode, uint32_t freq) { uint32_t cr0; uint32_t div; cr0 = CTRLR0_OPM_MASTER | CTRLR0_XFM_TR | CTRLR0_FRF_MOTO | CTRLR0_BHT_8BIT | CTRLR0_EM_BIG | CTRLR0_SSD_ONE | CTRLR0_DFS_8BIT; if (mode & SPIBUS_MODE_CPHA) cr0 |= CTRLR0_SCPH; if (mode & SPIBUS_MODE_CPOL) cr0 |= CTRLR0_SCPOL; /* minimum divider is 2 */ if (sc->max_freq < freq*2) { clk_set_freq(sc->clk_spi, 2 * freq, CLK_SET_ROUND_DOWN); clk_get_freq(sc->clk_spi, &sc->max_freq); } div = ((sc->max_freq + freq - 1) / freq); div = (div + 1) & 0xfffe; RK_SPI_WRITE_4(sc, RK_SPI_BAUDR, div); RK_SPI_WRITE_4(sc, RK_SPI_CTRLR0, cr0); } static uint32_t rk_spi_fifo_size(struct rk_spi_softc *sc) { uint32_t txftlr, reg; for (txftlr = 2; txftlr < 32; txftlr++) { RK_SPI_WRITE_4(sc, RK_SPI_TXFTLR, txftlr); reg = RK_SPI_READ_4(sc, RK_SPI_TXFTLR); if (reg != txftlr) break; } RK_SPI_WRITE_4(sc, RK_SPI_TXFTLR, 0); if (txftlr == 31) return 0; return txftlr; } static void rk_spi_empty_rxfifo(struct rk_spi_softc *sc) { uint32_t rxlevel; rxlevel = RK_SPI_READ_4(sc, RK_SPI_RXFLR); while (sc->rxidx < sc->rxlen && (rxlevel-- > 0)) { sc->rxbuf[sc->rxidx++] = (uint8_t)RK_SPI_READ_4(sc, RK_SPI_RXDR); } } static void rk_spi_fill_txfifo(struct rk_spi_softc *sc) { uint32_t txlevel; txlevel = RK_SPI_READ_4(sc, RK_SPI_TXFLR); while (sc->txidx < sc->txlen && txlevel < sc->fifo_size) { RK_SPI_WRITE_4(sc, RK_SPI_TXDR, sc->txbuf[sc->txidx++]); txlevel++; } if (sc->txidx != sc->txlen) sc->intreg |= (IMR_TFEIM | IMR_RFFIM); } static int rk_spi_xfer_buf(struct rk_spi_softc *sc, void *rxbuf, void *txbuf, uint32_t len) { int err; if (len == 0) return (0); sc->rxbuf = rxbuf; sc->rxlen = len; sc->rxidx = 0; sc->txbuf = txbuf; sc->txlen = len; sc->txidx = 0; sc->intreg = 0; rk_spi_fill_txfifo(sc); RK_SPI_WRITE_4(sc, RK_SPI_IMR, sc->intreg); err = 0; while (err == 0 && sc->intreg != 0) err = msleep(sc, &sc->mtx, 0, "rk_spi", 10 * hz); while (err == 0 && sc->rxidx != sc->txidx) { /* read residual data from RX fifo */ rk_spi_empty_rxfifo(sc); } if (sc->rxidx != sc->rxlen || sc->txidx != sc->txlen) err = EIO; return (err); } static int rk_spi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Rockchip SPI"); return (BUS_PROBE_DEFAULT); } static int rk_spi_attach(device_t dev) { struct rk_spi_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); if (bus_alloc_resources(dev, rk_spi_spec, sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); error = ENXIO; goto fail; } if (bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE, NULL, rk_spi_intr, sc, &sc->intrhand)) { bus_release_resources(dev, rk_spi_spec, sc->res); device_printf(dev, "cannot setup interrupt handler\n"); return (ENXIO); } /* Activate the module clock. */ error = clk_get_by_ofw_name(dev, 0, "apb_pclk", &sc->clk_apb); if (error != 0) { device_printf(dev, "cannot get apb_pclk clock\n"); goto fail; } error = clk_get_by_ofw_name(dev, 0, "spiclk", &sc->clk_spi); if (error != 0) { device_printf(dev, "cannot get spiclk clock\n"); goto fail; } error = clk_enable(sc->clk_apb); if (error != 0) { device_printf(dev, "cannot enable ahb clock\n"); goto fail; } error = clk_enable(sc->clk_spi); if (error != 0) { device_printf(dev, "cannot enable spiclk clock\n"); goto fail; } clk_get_freq(sc->clk_spi, &sc->max_freq); sc->fifo_size = rk_spi_fifo_size(sc); if (sc->fifo_size == 0) { device_printf(dev, "failed to get fifo size\n"); goto fail; } sc->spibus = device_add_child(dev, "spibus", -1); RK_SPI_WRITE_4(sc, RK_SPI_IMR, 0); RK_SPI_WRITE_4(sc, RK_SPI_TXFTLR, sc->fifo_size/2 - 1); RK_SPI_WRITE_4(sc, RK_SPI_RXFTLR, sc->fifo_size/2 - 1); return (bus_generic_attach(dev)); fail: rk_spi_detach(dev); return (error); } static int rk_spi_detach(device_t dev) { struct rk_spi_softc *sc; sc = device_get_softc(dev); bus_generic_detach(sc->dev); if (sc->spibus != NULL) device_delete_child(dev, sc->spibus); if (sc->clk_spi != NULL) clk_release(sc->clk_spi); if (sc->clk_apb) clk_release(sc->clk_apb); if (sc->intrhand != NULL) bus_teardown_intr(sc->dev, sc->res[1], sc->intrhand); bus_release_resources(dev, rk_spi_spec, sc->res); mtx_destroy(&sc->mtx); return (0); } static void rk_spi_intr(void *arg) { struct rk_spi_softc *sc; uint32_t intreg, isr; sc = arg; RK_SPI_LOCK(sc); intreg = RK_SPI_READ_4(sc, RK_SPI_IMR); isr = RK_SPI_READ_4(sc, RK_SPI_ISR); RK_SPI_WRITE_4(sc, RK_SPI_ICR, isr); if (isr & ISR_RFFIS) rk_spi_empty_rxfifo(sc); if (isr & ISR_TFEIS) rk_spi_fill_txfifo(sc); /* no bytes left, disable interrupt */ if (sc->txidx == sc->txlen) { sc->intreg = 0; wakeup(sc); } if (sc->intreg != intreg) { (void)RK_SPI_WRITE_4(sc, RK_SPI_IMR, sc->intreg); (void)RK_SPI_READ_4(sc, RK_SPI_IMR); } RK_SPI_UNLOCK(sc); } static phandle_t rk_spi_get_node(device_t bus, device_t dev) { return ofw_bus_get_node(bus); } static int rk_spi_transfer(device_t dev, device_t child, struct spi_command *cmd) { struct rk_spi_softc *sc; uint32_t cs, mode, clock; int err = 0; sc = device_get_softc(dev); spibus_get_cs(child, &cs); spibus_get_clock(child, &clock); spibus_get_mode(child, &mode); RK_SPI_LOCK(sc); rk_spi_hw_setup(sc, mode, clock); rk_spi_enable_chip(sc, 1); err = rk_spi_set_cs(sc, cs, true); if (err != 0) { rk_spi_enable_chip(sc, 0); RK_SPI_UNLOCK(sc); return (err); } /* Transfer command then data bytes. */ err = 0; if (cmd->tx_cmd_sz > 0) err = rk_spi_xfer_buf(sc, cmd->rx_cmd, cmd->tx_cmd, cmd->tx_cmd_sz); if (cmd->tx_data_sz > 0 && err == 0) err = rk_spi_xfer_buf(sc, cmd->rx_data, cmd->tx_data, cmd->tx_data_sz); rk_spi_set_cs(sc, cs, false); rk_spi_enable_chip(sc, 0); RK_SPI_UNLOCK(sc); return (err); } static device_method_t rk_spi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk_spi_probe), DEVMETHOD(device_attach, rk_spi_attach), DEVMETHOD(device_detach, rk_spi_detach), /* spibus_if */ DEVMETHOD(spibus_transfer, rk_spi_transfer), /* ofw_bus_if */ DEVMETHOD(ofw_bus_get_node, rk_spi_get_node), DEVMETHOD_END }; static driver_t rk_spi_driver = { "spi", rk_spi_methods, sizeof(struct rk_spi_softc), }; DRIVER_MODULE(rk_spi, simplebus, rk_spi_driver, 0, 0); DRIVER_MODULE(ofw_spibus, rk_spi, ofw_spibus_driver, 0, 0); MODULE_DEPEND(rk_spi, ofw_spibus, 1, 1, 1); OFWBUS_PNP_INFO(compat_data); diff --git a/sys/dev/uart/uart_dev_imx.c b/sys/dev/uart/uart_dev_imx.c index 698497644ee2..b40012a313ca 100644 --- a/sys/dev/uart/uart_dev_imx.c +++ b/sys/dev/uart/uart_dev_imx.c @@ -1,670 +1,670 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2012 The FreeBSD Foundation * * This software was developed by Oleksandr Rybalko under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #if defined(__aarch64__) #define IMX_ENABLE_CLOCKS #endif #ifdef IMX_ENABLE_CLOCKS -#include +#include #endif #include "uart_if.h" #include /* * The hardare FIFOs are 32 bytes. We want an interrupt when there are 24 bytes * available to read or space for 24 more bytes to write. While 8 bytes of * slack before over/underrun might seem excessive, the hardware can run at * 5mbps, which means 2uS per char, so at full speed 8 bytes provides only 16uS * to get into the interrupt handler and service the fifo. */ #define IMX_FIFOSZ 32 #define IMX_RXFIFO_LEVEL 24 #define IMX_TXFIFO_LEVEL 24 /* * Low-level UART interface. */ static int imx_uart_probe(struct uart_bas *bas); static void imx_uart_init(struct uart_bas *bas, int, int, int, int); static void imx_uart_term(struct uart_bas *bas); static void imx_uart_putc(struct uart_bas *bas, int); static int imx_uart_rxready(struct uart_bas *bas); static int imx_uart_getc(struct uart_bas *bas, struct mtx *); static struct uart_ops uart_imx_uart_ops = { .probe = imx_uart_probe, .init = imx_uart_init, .term = imx_uart_term, .putc = imx_uart_putc, .rxready = imx_uart_rxready, .getc = imx_uart_getc, }; #if 0 /* Handy when debugging. */ static void dumpregs(struct uart_bas *bas, const char * msg) { if (!bootverbose) return; printf("%s bsh 0x%08lx UCR1 0x%08x UCR2 0x%08x " "UCR3 0x%08x UCR4 0x%08x USR1 0x%08x USR2 0x%08x\n", msg, bas->bsh, GETREG(bas, REG(UCR1)), GETREG(bas, REG(UCR2)), GETREG(bas, REG(UCR3)), GETREG(bas, REG(UCR4)), GETREG(bas, REG(USR1)), GETREG(bas, REG(USR2))); } #endif static int imx_uart_probe(struct uart_bas *bas) { return (0); } static u_int imx_uart_getbaud(struct uart_bas *bas) { uint32_t rate, ubir, ubmr; u_int baud, blo, bhi, i; static const u_int predivs[] = {6, 5, 4, 3, 2, 1, 7, 1}; static const u_int std_rates[] = { 9600, 14400, 19200, 38400, 57600, 115200, 230400, 460800, 921600 }; /* * Get the baud rate the hardware is programmed for, then search the * table of standard baud rates for a number that's within 3% of the * actual rate the hardware is programmed for. It's more comforting to * see that your console is running at 115200 than 114942. Note that * here we cannot make a simplifying assumption that the predivider and * numerator are 1 (like we do when setting the baud rate), because we * don't know what u-boot might have set up. */ i = (GETREG(bas, REG(UFCR)) & IMXUART_UFCR_RFDIV_MASK) >> IMXUART_UFCR_RFDIV_SHIFT; rate = bas->rclk / predivs[i]; ubir = GETREG(bas, REG(UBIR)) + 1; ubmr = GETREG(bas, REG(UBMR)) + 1; baud = ((rate / 16 ) * ubir) / ubmr; blo = (baud * 100) / 103; bhi = (baud * 100) / 97; for (i = 0; i < nitems(std_rates); i++) { rate = std_rates[i]; if (rate >= blo && rate <= bhi) { baud = rate; break; } } return (baud); } static void imx_uart_init(struct uart_bas *bas, int baudrate, int databits, int stopbits, int parity) { uint32_t baseclk, reg; /* Enable the device and the RX/TX channels. */ SET(bas, REG(UCR1), FLD(UCR1, UARTEN)); SET(bas, REG(UCR2), FLD(UCR2, RXEN) | FLD(UCR2, TXEN)); if (databits == 7) DIS(bas, UCR2, WS); else ENA(bas, UCR2, WS); if (stopbits == 2) ENA(bas, UCR2, STPB); else DIS(bas, UCR2, STPB); switch (parity) { case UART_PARITY_ODD: DIS(bas, UCR2, PROE); ENA(bas, UCR2, PREN); break; case UART_PARITY_EVEN: ENA(bas, UCR2, PROE); ENA(bas, UCR2, PREN); break; case UART_PARITY_MARK: case UART_PARITY_SPACE: /* FALLTHROUGH: Hardware doesn't support mark/space. */ case UART_PARITY_NONE: default: DIS(bas, UCR2, PREN); break; } /* * The hardware has an extremely flexible baud clock: it allows setting * both the numerator and denominator of the divider, as well as a * separate pre-divider. We simplify the problem of coming up with a * workable pair of numbers by assuming a pre-divider and numerator of * one because our base clock is so fast we can reach virtually any * reasonable speed with a simple divisor. The numerator value actually * includes the 16x over-sampling (so a value of 16 means divide by 1); * the register value is the numerator-1, so we have a hard-coded 15. * Note that a quirk of the hardware requires that both UBIR and UBMR be * set back to back in order for the change to take effect. */ if ((baudrate > 0) && (bas->rclk != 0)) { baseclk = bas->rclk; reg = GETREG(bas, REG(UFCR)); reg = (reg & ~IMXUART_UFCR_RFDIV_MASK) | IMXUART_UFCR_RFDIV_DIV1; SETREG(bas, REG(UFCR), reg); SETREG(bas, REG(UBIR), 15); SETREG(bas, REG(UBMR), (baseclk / baudrate) - 1); } /* * Program the tx lowater and rx hiwater levels at which fifo-service * interrupts are signaled. The tx value is interpetted as "when there * are only this many bytes remaining" (not "this many free"). */ reg = GETREG(bas, REG(UFCR)); reg &= ~(IMXUART_UFCR_TXTL_MASK | IMXUART_UFCR_RXTL_MASK); reg |= (IMX_FIFOSZ - IMX_TXFIFO_LEVEL) << IMXUART_UFCR_TXTL_SHIFT; reg |= IMX_RXFIFO_LEVEL << IMXUART_UFCR_RXTL_SHIFT; SETREG(bas, REG(UFCR), reg); } static void imx_uart_term(struct uart_bas *bas) { } static void imx_uart_putc(struct uart_bas *bas, int c) { while (!(IS(bas, USR1, TRDY))) ; SETREG(bas, REG(UTXD), c); } static int imx_uart_rxready(struct uart_bas *bas) { return ((IS(bas, USR2, RDR)) ? 1 : 0); } static int imx_uart_getc(struct uart_bas *bas, struct mtx *hwmtx) { int c; uart_lock(hwmtx); while (!(IS(bas, USR2, RDR))) ; c = GETREG(bas, REG(URXD)); uart_unlock(hwmtx); #if defined(KDB) if (c & FLD(URXD, BRK)) { if (kdb_break()) return (0); } #endif return (c & 0xff); } /* * High-level UART interface. */ struct imx_uart_softc { struct uart_softc base; }; static int imx_uart_bus_attach(struct uart_softc *); static int imx_uart_bus_detach(struct uart_softc *); static int imx_uart_bus_flush(struct uart_softc *, int); static int imx_uart_bus_getsig(struct uart_softc *); static int imx_uart_bus_ioctl(struct uart_softc *, int, intptr_t); static int imx_uart_bus_ipend(struct uart_softc *); static int imx_uart_bus_param(struct uart_softc *, int, int, int, int); static int imx_uart_bus_probe(struct uart_softc *); static int imx_uart_bus_receive(struct uart_softc *); static int imx_uart_bus_setsig(struct uart_softc *, int); static int imx_uart_bus_transmit(struct uart_softc *); static void imx_uart_bus_grab(struct uart_softc *); static void imx_uart_bus_ungrab(struct uart_softc *); static kobj_method_t imx_uart_methods[] = { KOBJMETHOD(uart_attach, imx_uart_bus_attach), KOBJMETHOD(uart_detach, imx_uart_bus_detach), KOBJMETHOD(uart_flush, imx_uart_bus_flush), KOBJMETHOD(uart_getsig, imx_uart_bus_getsig), KOBJMETHOD(uart_ioctl, imx_uart_bus_ioctl), KOBJMETHOD(uart_ipend, imx_uart_bus_ipend), KOBJMETHOD(uart_param, imx_uart_bus_param), KOBJMETHOD(uart_probe, imx_uart_bus_probe), KOBJMETHOD(uart_receive, imx_uart_bus_receive), KOBJMETHOD(uart_setsig, imx_uart_bus_setsig), KOBJMETHOD(uart_transmit, imx_uart_bus_transmit), KOBJMETHOD(uart_grab, imx_uart_bus_grab), KOBJMETHOD(uart_ungrab, imx_uart_bus_ungrab), { 0, 0 } }; static struct uart_class uart_imx_class = { "imx", imx_uart_methods, sizeof(struct imx_uart_softc), .uc_ops = &uart_imx_uart_ops, .uc_range = 0x100, .uc_rclk = 24000000, /* TODO: get value from CCM */ .uc_rshift = 0 }; static struct ofw_compat_data compat_data[] = { {"fsl,imx6q-uart", (uintptr_t)&uart_imx_class}, {"fsl,imx53-uart", (uintptr_t)&uart_imx_class}, {"fsl,imx51-uart", (uintptr_t)&uart_imx_class}, {"fsl,imx31-uart", (uintptr_t)&uart_imx_class}, {"fsl,imx27-uart", (uintptr_t)&uart_imx_class}, {"fsl,imx25-uart", (uintptr_t)&uart_imx_class}, {"fsl,imx21-uart", (uintptr_t)&uart_imx_class}, {NULL, (uintptr_t)NULL}, }; UART_FDT_CLASS_AND_DEVICE(compat_data); #define SIGCHG(c, i, s, d) \ if (c) { \ i |= (i & s) ? s : s | d; \ } else { \ i = (i & s) ? (i & ~s) | d : i; \ } #ifdef IMX_ENABLE_CLOCKS static int imx_uart_setup_clocks(struct uart_softc *sc) { struct uart_bas *bas; clk_t ipgclk, perclk; uint64_t freq; int error; bas = &sc->sc_bas; if (clk_get_by_ofw_name(sc->sc_dev, 0, "ipg", &ipgclk) != 0) return (ENOENT); if (clk_get_by_ofw_name(sc->sc_dev, 0, "per", &perclk) != 0) { return (ENOENT); } error = clk_enable(ipgclk); if (error != 0) { device_printf(sc->sc_dev, "cannot enable ipg clock\n"); return (error); } error = clk_get_freq(perclk, &freq); if (error != 0) { device_printf(sc->sc_dev, "cannot get frequency\n"); return (error); } bas->rclk = (uint32_t)freq; return (0); } #endif static int imx_uart_bus_attach(struct uart_softc *sc) { struct uart_bas *bas; struct uart_devinfo *di; bas = &sc->sc_bas; #ifdef IMX_ENABLE_CLOCKS int error = imx_uart_setup_clocks(sc); if (error) return (error); #else bas->rclk = imx_ccm_uart_hz(); #endif if (sc->sc_sysdev != NULL) { di = sc->sc_sysdev; imx_uart_init(bas, di->baudrate, di->databits, di->stopbits, di->parity); } else { imx_uart_init(bas, 115200, 8, 1, 0); } (void)imx_uart_bus_getsig(sc); /* Clear all pending interrupts. */ SETREG(bas, REG(USR1), 0xffff); SETREG(bas, REG(USR2), 0xffff); DIS(bas, UCR4, DREN); ENA(bas, UCR1, RRDYEN); DIS(bas, UCR1, IDEN); DIS(bas, UCR3, RXDSEN); ENA(bas, UCR2, ATEN); DIS(bas, UCR1, TXMPTYEN); DIS(bas, UCR1, TRDYEN); DIS(bas, UCR4, TCEN); DIS(bas, UCR4, OREN); ENA(bas, UCR4, BKEN); DIS(bas, UCR4, WKEN); DIS(bas, UCR1, ADEN); DIS(bas, UCR3, ACIEN); DIS(bas, UCR2, ESCI); DIS(bas, UCR4, ENIRI); DIS(bas, UCR3, AIRINTEN); DIS(bas, UCR3, AWAKEN); DIS(bas, UCR3, FRAERREN); DIS(bas, UCR3, PARERREN); DIS(bas, UCR1, RTSDEN); DIS(bas, UCR2, RTSEN); DIS(bas, UCR3, DTREN); DIS(bas, UCR3, RI); DIS(bas, UCR3, DCD); DIS(bas, UCR3, DTRDEN); ENA(bas, UCR2, IRTS); ENA(bas, UCR3, RXDMUXSEL); return (0); } static int imx_uart_bus_detach(struct uart_softc *sc) { SETREG(&sc->sc_bas, REG(UCR4), 0); return (0); } static int imx_uart_bus_flush(struct uart_softc *sc, int what) { /* TODO */ return (0); } static int imx_uart_bus_getsig(struct uart_softc *sc) { uint32_t new, old, sig; uint8_t bes; do { old = sc->sc_hwsig; sig = old; uart_lock(sc->sc_hwmtx); bes = GETREG(&sc->sc_bas, REG(USR2)); uart_unlock(sc->sc_hwmtx); /* XXX: chip can show delta */ SIGCHG(bes & FLD(USR2, DCDIN), sig, SER_DCD, SER_DDCD); new = sig & ~SER_MASK_DELTA; } while (!atomic_cmpset_32(&sc->sc_hwsig, old, new)); return (sig); } static int imx_uart_bus_ioctl(struct uart_softc *sc, int request, intptr_t data) { struct uart_bas *bas; int error; bas = &sc->sc_bas; error = 0; uart_lock(sc->sc_hwmtx); switch (request) { case UART_IOCTL_BREAK: /* TODO */ break; case UART_IOCTL_BAUD: *(u_int*)data = imx_uart_getbaud(bas); break; default: error = EINVAL; break; } uart_unlock(sc->sc_hwmtx); return (error); } static int imx_uart_bus_ipend(struct uart_softc *sc) { struct uart_bas *bas; int ipend; uint32_t usr1, usr2; uint32_t ucr1, ucr2, ucr4; bas = &sc->sc_bas; ipend = 0; uart_lock(sc->sc_hwmtx); /* Read pending interrupts */ usr1 = GETREG(bas, REG(USR1)); usr2 = GETREG(bas, REG(USR2)); /* ACK interrupts */ SETREG(bas, REG(USR1), usr1); SETREG(bas, REG(USR2), usr2); ucr1 = GETREG(bas, REG(UCR1)); ucr2 = GETREG(bas, REG(UCR2)); ucr4 = GETREG(bas, REG(UCR4)); /* If we have reached tx low-water, we can tx some more now. */ if ((usr1 & FLD(USR1, TRDY)) && (ucr1 & FLD(UCR1, TRDYEN))) { DIS(bas, UCR1, TRDYEN); ipend |= SER_INT_TXIDLE; } /* * If we have reached the rx high-water, or if there are bytes in the rx * fifo and no new data has arrived for 8 character periods (aging * timer), we have input data to process. */ if (((usr1 & FLD(USR1, RRDY)) && (ucr1 & FLD(UCR1, RRDYEN))) || ((usr1 & FLD(USR1, AGTIM)) && (ucr2 & FLD(UCR2, ATEN)))) { DIS(bas, UCR1, RRDYEN); DIS(bas, UCR2, ATEN); ipend |= SER_INT_RXREADY; } /* A break can come in at any time, it never gets disabled. */ if ((usr2 & FLD(USR2, BRCD)) && (ucr4 & FLD(UCR4, BKEN))) ipend |= SER_INT_BREAK; uart_unlock(sc->sc_hwmtx); return (ipend); } static int imx_uart_bus_param(struct uart_softc *sc, int baudrate, int databits, int stopbits, int parity) { uart_lock(sc->sc_hwmtx); imx_uart_init(&sc->sc_bas, baudrate, databits, stopbits, parity); uart_unlock(sc->sc_hwmtx); return (0); } static int imx_uart_bus_probe(struct uart_softc *sc) { int error; error = imx_uart_probe(&sc->sc_bas); if (error) return (error); /* * On input we can read up to the full fifo size at once. On output, we * want to write only as much as the programmed tx low water level, * because that's all we can be certain we have room for in the fifo * when we get a tx-ready interrupt. */ sc->sc_rxfifosz = IMX_FIFOSZ; sc->sc_txfifosz = IMX_TXFIFO_LEVEL; device_set_desc(sc->sc_dev, "Freescale i.MX UART"); return (0); } static int imx_uart_bus_receive(struct uart_softc *sc) { struct uart_bas *bas; int xc, out; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); /* * Empty the rx fifo. We get the RRDY interrupt when IMX_RXFIFO_LEVEL * (the rx high-water level) is reached, but we set sc_rxfifosz to the * full hardware fifo size, so we can safely process however much is * there, not just the highwater size. */ while (IS(bas, USR2, RDR)) { if (uart_rx_full(sc)) { /* No space left in input buffer */ sc->sc_rxbuf[sc->sc_rxput] = UART_STAT_OVERRUN; break; } xc = GETREG(bas, REG(URXD)); out = xc & 0x000000ff; if (xc & FLD(URXD, FRMERR)) out |= UART_STAT_FRAMERR; if (xc & FLD(URXD, PRERR)) out |= UART_STAT_PARERR; if (xc & FLD(URXD, OVRRUN)) out |= UART_STAT_OVERRUN; if (xc & FLD(URXD, BRK)) out |= UART_STAT_BREAK; uart_rx_put(sc, out); } ENA(bas, UCR1, RRDYEN); ENA(bas, UCR2, ATEN); uart_unlock(sc->sc_hwmtx); return (0); } static int imx_uart_bus_setsig(struct uart_softc *sc, int sig) { return (0); } static int imx_uart_bus_transmit(struct uart_softc *sc) { struct uart_bas *bas = &sc->sc_bas; int i; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); /* * Fill the tx fifo. The uart core puts at most IMX_TXFIFO_LEVEL bytes * into the txbuf (because that's what sc_txfifosz is set to), and * because we got the TRDY (low-water reached) interrupt we know at * least that much space is available in the fifo. */ for (i = 0; i < sc->sc_txdatasz; i++) { SETREG(bas, REG(UTXD), sc->sc_txbuf[i] & 0xff); } sc->sc_txbusy = 1; ENA(bas, UCR1, TRDYEN); uart_unlock(sc->sc_hwmtx); return (0); } static void imx_uart_bus_grab(struct uart_softc *sc) { struct uart_bas *bas = &sc->sc_bas; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); DIS(bas, UCR1, RRDYEN); DIS(bas, UCR2, ATEN); uart_unlock(sc->sc_hwmtx); } static void imx_uart_bus_ungrab(struct uart_softc *sc) { struct uart_bas *bas = &sc->sc_bas; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); ENA(bas, UCR1, RRDYEN); ENA(bas, UCR2, ATEN); uart_unlock(sc->sc_hwmtx); } diff --git a/sys/dev/uart/uart_dev_snps.c b/sys/dev/uart/uart_dev_snps.c index 6dd071f95fe1..fb5894344927 100644 --- a/sys/dev/uart/uart_dev_snps.c +++ b/sys/dev/uart/uart_dev_snps.c @@ -1,285 +1,285 @@ /*- * Copyright (c) 2016 Jared McNeill * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include "uart_if.h" struct snps_softc { struct ns8250_softc ns8250; clk_t baudclk; clk_t apb_pclk; hwreset_t reset; }; /* * To use early printf on 64 bits Allwinner SoC, add to kernel config * options SOCDEV_PA=0x0 * options SOCDEV_VA=0x40000000 * options EARLY_PRINTF * * To use early printf on 32 bits Allwinner SoC, add to kernel config * options SOCDEV_PA=0x01C00000 * options SOCDEV_VA=0x10000000 * options EARLY_PRINTF * * remove the if 0 */ #if 0 #ifdef EARLY_PRINTF static void uart_snps_early_putc(int c) { volatile uint32_t *stat; volatile uint32_t *tx; #ifdef ALLWINNER_64 stat = (uint32_t *) (SOCDEV_VA + 0x1C2807C); tx = (uint32_t *) (SOCDEV_VA + 0x1C28000); #endif #ifdef ALLWINNER_32 stat = (uint32_t *) (SOCDEV_VA + 0x2807C); tx = (uint32_t *) (SOCDEV_VA + 0x28000); #endif while ((*stat & (1 << 2)) == 0) continue; *tx = c; } early_putc_t *early_putc = uart_snps_early_putc; #endif /* EARLY_PRINTF */ #endif static kobj_method_t snps_methods[] = { KOBJMETHOD(uart_probe, ns8250_bus_probe), KOBJMETHOD(uart_attach, ns8250_bus_attach), KOBJMETHOD(uart_detach, ns8250_bus_detach), KOBJMETHOD(uart_flush, ns8250_bus_flush), KOBJMETHOD(uart_getsig, ns8250_bus_getsig), KOBJMETHOD(uart_ioctl, ns8250_bus_ioctl), KOBJMETHOD(uart_ipend, ns8250_bus_ipend), KOBJMETHOD(uart_param, ns8250_bus_param), KOBJMETHOD(uart_receive, ns8250_bus_receive), KOBJMETHOD(uart_setsig, ns8250_bus_setsig), KOBJMETHOD(uart_transmit, ns8250_bus_transmit), KOBJMETHOD(uart_grab, ns8250_bus_grab), KOBJMETHOD(uart_ungrab, ns8250_bus_ungrab), KOBJMETHOD_END }; struct uart_class uart_snps_class = { "snps", snps_methods, sizeof(struct snps_softc), .uc_ops = &uart_ns8250_ops, .uc_range = 8, .uc_rclk = 0, }; static struct ofw_compat_data compat_data[] = { { "snps,dw-apb-uart", (uintptr_t)&uart_snps_class }, { "marvell,armada-38x-uart", (uintptr_t)&uart_snps_class }, { NULL, (uintptr_t)NULL } }; UART_FDT_CLASS(compat_data); static int snps_get_clocks(device_t dev, clk_t *baudclk, clk_t *apb_pclk) { *baudclk = NULL; *apb_pclk = NULL; /* Baud clock is either named "baudclk", or there is a single * unnamed clock. */ if (clk_get_by_ofw_name(dev, 0, "baudclk", baudclk) != 0 && clk_get_by_ofw_index(dev, 0, 0, baudclk) != 0) return (ENOENT); /* APB peripheral clock is optional */ (void)clk_get_by_ofw_name(dev, 0, "apb_pclk", apb_pclk); return (0); } static int snps_probe(device_t dev) { struct snps_softc *sc; struct uart_class *uart_class; phandle_t node; uint32_t shift, iowidth, clock; uint64_t freq; int error; clk_t baudclk, apb_pclk; hwreset_t reset; if (!ofw_bus_status_okay(dev)) return (ENXIO); uart_class = (struct uart_class *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; if (uart_class == NULL) return (ENXIO); freq = 0; sc = device_get_softc(dev); sc->ns8250.base.sc_class = uart_class; node = ofw_bus_get_node(dev); if (OF_getencprop(node, "reg-shift", &shift, sizeof(shift)) <= 0) shift = 0; if (OF_getencprop(node, "reg-io-width", &iowidth, sizeof(iowidth)) <= 0) iowidth = 1; if (OF_getencprop(node, "clock-frequency", &clock, sizeof(clock)) <= 0) clock = 0; if (hwreset_get_by_ofw_idx(dev, 0, 0, &reset) == 0) { error = hwreset_deassert(reset); if (error != 0) { device_printf(dev, "cannot de-assert reset\n"); return (error); } } if (snps_get_clocks(dev, &baudclk, &apb_pclk) == 0) { error = clk_enable(baudclk); if (error != 0) { device_printf(dev, "cannot enable baud clock\n"); return (error); } if (apb_pclk != NULL) { error = clk_enable(apb_pclk); if (error != 0) { device_printf(dev, "cannot enable peripheral clock\n"); return (error); } } if (clock == 0) { error = clk_get_freq(baudclk, &freq); if (error != 0) { device_printf(dev, "cannot get frequency\n"); return (error); } clock = (uint32_t)freq; } } if (bootverbose && clock == 0) device_printf(dev, "could not determine frequency\n"); error = uart_bus_probe(dev, (int)shift, (int)iowidth, (int)clock, 0, 0, UART_F_BUSY_DETECT); if (error > 0) return (error); /* XXX uart_bus_probe has changed the softc, so refresh it */ sc = device_get_softc(dev); /* Store clock and reset handles for detach */ sc->baudclk = baudclk; sc->apb_pclk = apb_pclk; sc->reset = reset; return (BUS_PROBE_VENDOR); } static int snps_detach(device_t dev) { struct snps_softc *sc; clk_t baudclk, apb_pclk; hwreset_t reset; int error; sc = device_get_softc(dev); baudclk = sc->baudclk; apb_pclk = sc->apb_pclk; reset = sc->reset; error = uart_bus_detach(dev); if (error != 0) return (error); if (reset != NULL) { error = hwreset_assert(reset); if (error != 0) { device_printf(dev, "cannot assert reset\n"); return (error); } hwreset_release(reset); } if (apb_pclk != NULL) { error = clk_release(apb_pclk); if (error != 0) { device_printf(dev, "cannot release peripheral clock\n"); return (error); } } if (baudclk != NULL) { error = clk_release(baudclk); if (error != 0) { device_printf(dev, "cannot release baud clock\n"); return (error); } } return (0); } static device_method_t snps_bus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, snps_probe), DEVMETHOD(device_attach, uart_bus_attach), DEVMETHOD(device_detach, snps_detach), DEVMETHOD_END }; static driver_t snps_uart_driver = { uart_driver_name, snps_bus_methods, sizeof(struct snps_softc) }; DRIVER_MODULE(uart_snps, simplebus, snps_uart_driver, 0, 0); diff --git a/sys/dev/usb/controller/dwc3/aw_dwc3.c b/sys/dev/usb/controller/dwc3/aw_dwc3.c index 5b4d5291c28f..7f2869933ee5 100644 --- a/sys/dev/usb/controller/dwc3/aw_dwc3.c +++ b/sys/dev/usb/controller/dwc3/aw_dwc3.c @@ -1,141 +1,141 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Rockchip DWC3 glue */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include static struct ofw_compat_data compat_data[] = { { "allwinner,sun50i-h6-dwc3", 1 }, { NULL, 0 } }; struct aw_dwc3_softc { struct simplebus_softc sc; device_t dev; clk_t clk_bus; hwreset_t rst_bus; }; static int aw_dwc3_probe(device_t dev) { phandle_t node; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); /* Binding says that we need a child node for the actual dwc3 controller */ node = ofw_bus_get_node(dev); if (OF_child(node) <= 0) return (ENXIO); device_set_desc(dev, "Allwinner H6 DWC3"); return (BUS_PROBE_DEFAULT); } static int aw_dwc3_attach(device_t dev) { struct aw_dwc3_softc *sc; device_t cdev; phandle_t node, child; int err; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); /* Enable the clocks */ if (clk_get_by_ofw_name(dev, 0, "bus", &sc->clk_bus) != 0) { device_printf(dev, "Cannot get bus clock\n"); return (ENXIO); } err = clk_enable(sc->clk_bus); if (err != 0) { device_printf(dev, "Could not enable clock %s\n", clk_get_name(sc->clk_bus)); return (ENXIO); } /* Put module out of reset */ if (hwreset_get_by_ofw_name(dev, node, "bus", &sc->rst_bus) == 0) { if (hwreset_deassert(sc->rst_bus) != 0) { device_printf(dev, "Cannot deassert reset\n"); return (ENXIO); } } simplebus_init(dev, node); if (simplebus_fill_ranges(node, &sc->sc) < 0) { device_printf(dev, "could not get ranges\n"); return (ENXIO); } for (child = OF_child(node); child > 0; child = OF_peer(child)) { cdev = simplebus_add_device(dev, child, 0, NULL, -1, NULL); if (cdev != NULL) device_probe_and_attach(cdev); } return (bus_generic_attach(dev)); } static device_method_t aw_dwc3_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aw_dwc3_probe), DEVMETHOD(device_attach, aw_dwc3_attach), DEVMETHOD_END }; DEFINE_CLASS_1(aw_dwc3, aw_dwc3_driver, aw_dwc3_methods, sizeof(struct aw_dwc3_softc), simplebus_driver); DRIVER_MODULE(aw_dwc3, simplebus, aw_dwc3_driver, 0, 0); diff --git a/sys/dev/usb/controller/dwc3/dwc3.c b/sys/dev/usb/controller/dwc3/dwc3.c index d97b0b11d44c..e0ad19fb835f 100644 --- a/sys/dev/usb/controller/dwc3/dwc3.c +++ b/sys/dev/usb/controller/dwc3/dwc3.c @@ -1,620 +1,620 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Emmanuel Vadot * Copyright (c) 2021-2022 Bjoern A. Zeeb * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_platform.h" #include "opt_acpi.h" #include #include #include #include #include #include #include #include #ifdef FDT #include #endif #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include #include #include -#include +#include #include #endif #ifdef DEV_ACPI #include #include #include #endif struct snps_dwc3_softc { struct xhci_softc sc; device_t dev; struct resource * mem_res; bus_space_tag_t bst; bus_space_handle_t bsh; uint32_t snpsid; uint32_t snpsversion; uint32_t snpsrevision; uint32_t snpsversion_type; #ifdef FDT clk_t clk_ref; clk_t clk_suspend; clk_t clk_bus; #endif }; #define DWC3_WRITE(_sc, _off, _val) \ bus_space_write_4(_sc->bst, _sc->bsh, _off, _val) #define DWC3_READ(_sc, _off) \ bus_space_read_4(_sc->bst, _sc->bsh, _off) #define IS_DMA_32B 1 static void xhci_interrupt_poll(void *_sc) { struct xhci_softc *sc = _sc; USB_BUS_UNLOCK(&sc->sc_bus); xhci_interrupt(sc); USB_BUS_LOCK(&sc->sc_bus); usb_callout_reset(&sc->sc_callout, 1, (void *)&xhci_interrupt_poll, sc); } static int snps_dwc3_attach_xhci(device_t dev) { struct snps_dwc3_softc *snps_sc = device_get_softc(dev); struct xhci_softc *sc = &snps_sc->sc; int err = 0, rid = 0; sc->sc_io_res = snps_sc->mem_res; sc->sc_io_tag = snps_sc->bst; sc->sc_io_hdl = snps_sc->bsh; sc->sc_io_size = rman_get_size(snps_sc->mem_res); sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->sc_irq_res == NULL) { device_printf(dev, "Failed to allocate IRQ\n"); return (ENXIO); } sc->sc_bus.bdev = device_add_child(dev, "usbus", -1); if (sc->sc_bus.bdev == NULL) { device_printf(dev, "Failed to add USB device\n"); return (ENXIO); } device_set_ivars(sc->sc_bus.bdev, &sc->sc_bus); sprintf(sc->sc_vendor, "Synopsys"); device_set_desc(sc->sc_bus.bdev, "Synopsys"); if (xhci_use_polling() == 0) { err = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, (driver_intr_t *)xhci_interrupt, sc, &sc->sc_intr_hdl); if (err != 0) { device_printf(dev, "Failed to setup IRQ, %d\n", err); sc->sc_intr_hdl = NULL; return (err); } } err = xhci_init(sc, dev, IS_DMA_32B); if (err != 0) { device_printf(dev, "Failed to init XHCI, with error %d\n", err); return (ENXIO); } usb_callout_init_mtx(&sc->sc_callout, &sc->sc_bus.bus_mtx, 0); if (xhci_use_polling() != 0) { device_printf(dev, "Interrupt polling at %dHz\n", hz); USB_BUS_LOCK(&sc->sc_bus); xhci_interrupt_poll(sc); USB_BUS_UNLOCK(&sc->sc_bus); } err = xhci_start_controller(sc); if (err != 0) { device_printf(dev, "Failed to start XHCI controller, with error %d\n", err); return (ENXIO); } device_printf(sc->sc_bus.bdev, "trying to attach\n"); err = device_probe_and_attach(sc->sc_bus.bdev); if (err != 0) { device_printf(dev, "Failed to initialize USB, with error %d\n", err); return (ENXIO); } return (0); } #ifdef DWC3_DEBUG static void snsp_dwc3_dump_regs(struct snps_dwc3_softc *sc, const char *msg) { struct xhci_softc *xsc; uint32_t reg; if (!bootverbose) return; device_printf(sc->dev, "%s: %s:\n", __func__, msg ? msg : ""); reg = DWC3_READ(sc, DWC3_GCTL); device_printf(sc->dev, "GCTL: %#012x\n", reg); reg = DWC3_READ(sc, DWC3_GUCTL); device_printf(sc->dev, "GUCTL: %#012x\n", reg); reg = DWC3_READ(sc, DWC3_GUCTL1); device_printf(sc->dev, "GUCTL1: %#012x\n", reg); reg = DWC3_READ(sc, DWC3_GUSB2PHYCFG0); device_printf(sc->dev, "GUSB2PHYCFG0: %#012x\n", reg); reg = DWC3_READ(sc, DWC3_GUSB3PIPECTL0); device_printf(sc->dev, "GUSB3PIPECTL0: %#012x\n", reg); reg = DWC3_READ(sc, DWC3_DCFG); device_printf(sc->dev, "DCFG: %#012x\n", reg); xsc = &sc->sc; device_printf(sc->dev, "xhci quirks: %#012x\n", xsc->sc_quirks); } static void snps_dwc3_dump_ctrlparams(struct snps_dwc3_softc *sc) { const bus_size_t offs[] = { DWC3_GHWPARAMS0, DWC3_GHWPARAMS1, DWC3_GHWPARAMS2, DWC3_GHWPARAMS3, DWC3_GHWPARAMS4, DWC3_GHWPARAMS5, DWC3_GHWPARAMS6, DWC3_GHWPARAMS7, DWC3_GHWPARAMS8, }; uint32_t reg; int i; for (i = 0; i < nitems(offs); i++) { reg = DWC3_READ(sc, offs[i]); if (bootverbose) device_printf(sc->dev, "hwparams[%d]: %#012x\n", i, reg); } } #endif static void snps_dwc3_reset(struct snps_dwc3_softc *sc) { uint32_t gctl, ghwp0, phy2, phy3; ghwp0 = DWC3_READ(sc, DWC3_GHWPARAMS0); gctl = DWC3_READ(sc, DWC3_GCTL); gctl |= DWC3_GCTL_CORESOFTRESET; DWC3_WRITE(sc, DWC3_GCTL, gctl); phy2 = DWC3_READ(sc, DWC3_GUSB2PHYCFG0); phy2 |= DWC3_GUSB2PHYCFG0_PHYSOFTRST; if ((ghwp0 & DWC3_GHWPARAMS0_MODE_MASK) == DWC3_GHWPARAMS0_MODE_DUALROLEDEVICE) phy2 &= ~DWC3_GUSB2PHYCFG0_SUSPENDUSB20; DWC3_WRITE(sc, DWC3_GUSB2PHYCFG0, phy2); phy3 = DWC3_READ(sc, DWC3_GUSB3PIPECTL0); phy3 |= DWC3_GUSB3PIPECTL0_PHYSOFTRST; if ((ghwp0 & DWC3_GHWPARAMS0_MODE_MASK) == DWC3_GHWPARAMS0_MODE_DUALROLEDEVICE) phy3 &= ~DWC3_GUSB3PIPECTL0_SUSPENDUSB3; DWC3_WRITE(sc, DWC3_GUSB3PIPECTL0, phy3); DELAY(1000); phy2 &= ~DWC3_GUSB2PHYCFG0_PHYSOFTRST; DWC3_WRITE(sc, DWC3_GUSB2PHYCFG0, phy2); phy3 &= ~DWC3_GUSB3PIPECTL0_PHYSOFTRST; DWC3_WRITE(sc, DWC3_GUSB3PIPECTL0, phy3); gctl &= ~DWC3_GCTL_CORESOFTRESET; DWC3_WRITE(sc, DWC3_GCTL, gctl); } static void snps_dwc3_configure_host(struct snps_dwc3_softc *sc) { uint32_t reg; reg = DWC3_READ(sc, DWC3_GCTL); reg &= ~DWC3_GCTL_PRTCAPDIR_MASK; reg |= DWC3_GCTL_PRTCAPDIR_HOST; DWC3_WRITE(sc, DWC3_GCTL, reg); /* * Enable the Host IN Auto Retry feature, making the * host respond with a non-terminating retry ACK. * XXX If we ever support more than host mode this needs a dr_mode check. */ reg = DWC3_READ(sc, DWC3_GUCTL); reg |= DWC3_GUCTL_HOST_AUTO_RETRY; DWC3_WRITE(sc, DWC3_GUCTL, reg); } #ifdef FDT static void snps_dwc3_configure_phy(struct snps_dwc3_softc *sc, phandle_t node) { char *phy_type; uint32_t reg; int nphy_types; phy_type = NULL; nphy_types = OF_getprop_alloc(node, "phy_type", (void **)&phy_type); if (nphy_types <= 0) return; reg = DWC3_READ(sc, DWC3_GUSB2PHYCFG0); if (strncmp(phy_type, "utmi_wide", 9) == 0) { reg &= ~(DWC3_GUSB2PHYCFG0_PHYIF | DWC3_GUSB2PHYCFG0_USBTRDTIM(0xf)); reg |= DWC3_GUSB2PHYCFG0_PHYIF | DWC3_GUSB2PHYCFG0_USBTRDTIM(DWC3_GUSB2PHYCFG0_USBTRDTIM_16BITS); } else { reg &= ~(DWC3_GUSB2PHYCFG0_PHYIF | DWC3_GUSB2PHYCFG0_USBTRDTIM(0xf)); reg |= DWC3_GUSB2PHYCFG0_PHYIF | DWC3_GUSB2PHYCFG0_USBTRDTIM(DWC3_GUSB2PHYCFG0_USBTRDTIM_8BITS); } DWC3_WRITE(sc, DWC3_GUSB2PHYCFG0, reg); OF_prop_free(phy_type); } #endif static void snps_dwc3_do_quirks(struct snps_dwc3_softc *sc) { struct xhci_softc *xsc; uint32_t ghwp0, reg; ghwp0 = DWC3_READ(sc, DWC3_GHWPARAMS0); reg = DWC3_READ(sc, DWC3_GUSB2PHYCFG0); if (device_has_property(sc->dev, "snps,dis-u2-freeclk-exists-quirk")) reg &= ~DWC3_GUSB2PHYCFG0_U2_FREECLK_EXISTS; else reg |= DWC3_GUSB2PHYCFG0_U2_FREECLK_EXISTS; if (device_has_property(sc->dev, "snps,dis_u2_susphy_quirk")) reg &= ~DWC3_GUSB2PHYCFG0_SUSPENDUSB20; else if ((ghwp0 & DWC3_GHWPARAMS0_MODE_MASK) == DWC3_GHWPARAMS0_MODE_DUALROLEDEVICE) reg |= DWC3_GUSB2PHYCFG0_SUSPENDUSB20; if (device_has_property(sc->dev, "snps,dis_enblslpm_quirk")) reg &= ~DWC3_GUSB2PHYCFG0_ENBLSLPM; else reg |= DWC3_GUSB2PHYCFG0_ENBLSLPM; DWC3_WRITE(sc, DWC3_GUSB2PHYCFG0, reg); reg = DWC3_READ(sc, DWC3_GUCTL1); if (device_has_property(sc->dev, "snps,dis-tx-ipgap-linecheck-quirk")) reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS; DWC3_WRITE(sc, DWC3_GUCTL1, reg); reg = DWC3_READ(sc, DWC3_GUSB3PIPECTL0); if (device_has_property(sc->dev, "snps,dis-del-phy-power-chg-quirk")) reg &= ~DWC3_GUSB3PIPECTL0_DELAYP1TRANS; if (device_has_property(sc->dev, "snps,dis_rxdet_inp3_quirk")) reg |= DWC3_GUSB3PIPECTL0_DISRXDETINP3; if (device_has_property(sc->dev, "snps,dis_u3_susphy_quirk")) reg &= ~DWC3_GUSB3PIPECTL0_SUSPENDUSB3; else if ((ghwp0 & DWC3_GHWPARAMS0_MODE_MASK) == DWC3_GHWPARAMS0_MODE_DUALROLEDEVICE) reg |= DWC3_GUSB3PIPECTL0_SUSPENDUSB3; DWC3_WRITE(sc, DWC3_GUSB3PIPECTL0, reg); /* Port Disable does not work on <= 3.00a. Disable PORT_PED. */ if ((sc->snpsid & 0xffff) <= 0x300a) { xsc = &sc->sc; xsc->sc_quirks |= XHCI_QUIRK_DISABLE_PORT_PED; } } static int snps_dwc3_probe_common(device_t dev) { char dr_mode[16] = { 0 }; ssize_t s; s = device_get_property(dev, "dr_mode", dr_mode, sizeof(dr_mode), DEVICE_PROP_BUFFER); if (s == -1) { device_printf(dev, "Cannot determine dr_mode\n"); return (ENXIO); } if (strcmp(dr_mode, "host") != 0) { device_printf(dev, "Found dr_mode '%s' but only 'host' supported. s=%zd\n", dr_mode, s); return (ENXIO); } device_set_desc(dev, "Synopsys Designware DWC3"); return (BUS_PROBE_DEFAULT); } static int snps_dwc3_common_attach(device_t dev, bool is_fdt) { struct snps_dwc3_softc *sc; #ifdef FDT phandle_t node; phy_t usb2_phy, usb3_phy; uint32_t reg; #endif int error, rid; sc = device_get_softc(dev); sc->dev = dev; rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Failed to map memory\n"); return (ENXIO); } sc->bst = rman_get_bustag(sc->mem_res); sc->bsh = rman_get_bushandle(sc->mem_res); sc->snpsid = DWC3_READ(sc, DWC3_GSNPSID); sc->snpsversion = DWC3_VERSION(sc->snpsid); sc->snpsrevision = DWC3_REVISION(sc->snpsid); if (sc->snpsversion == DWC3_1_IP_ID || sc->snpsversion == DWC3_2_IP_ID) { sc->snpsrevision = DWC3_READ(sc, DWC3_1_VER_NUMBER); sc->snpsversion_type = DWC3_READ(sc, DWC3_1_VER_TYPE); } if (bootverbose) { switch (sc->snpsversion) { case DWC3_IP_ID: device_printf(sc->dev, "SNPS Version: DWC3 (%x %x)\n", sc->snpsversion, sc->snpsrevision); break; case DWC3_1_IP_ID: device_printf(sc->dev, "SNPS Version: DWC3.1 (%x %x %x)\n", sc->snpsversion, sc->snpsrevision, sc->snpsversion_type); break; case DWC3_2_IP_ID: device_printf(sc->dev, "SNPS Version: DWC3.2 (%x %x %x)\n", sc->snpsversion, sc->snpsrevision, sc->snpsversion_type); break; } } #ifdef DWC3_DEBUG snps_dwc3_dump_ctrlparams(sc); #endif #ifdef FDT if (!is_fdt) goto skip_phys; node = ofw_bus_get_node(dev); /* Get the clocks if any */ if (ofw_bus_is_compatible(dev, "rockchip,rk3328-dwc3") == 1 || ofw_bus_is_compatible(dev, "rockchip,rk3568-dwc3") == 1) { if (clk_get_by_ofw_name(dev, node, "ref_clk", &sc->clk_ref) != 0) device_printf(dev, "Cannot get ref_clk\n"); if (clk_get_by_ofw_name(dev, node, "suspend_clk", &sc->clk_suspend) != 0) device_printf(dev, "Cannot get suspend_clk\n"); if (clk_get_by_ofw_name(dev, node, "bus_clk", &sc->clk_bus) != 0) device_printf(dev, "Cannot get bus_clk\n"); } if (sc->clk_ref != NULL) { if (clk_enable(sc->clk_ref) != 0) device_printf(dev, "Cannot enable ref_clk\n"); } if (sc->clk_suspend != NULL) { if (clk_enable(sc->clk_suspend) != 0) device_printf(dev, "Cannot enable suspend_clk\n"); } if (sc->clk_bus != NULL) { if (clk_enable(sc->clk_bus) != 0) device_printf(dev, "Cannot enable bus_clk\n"); } /* Get the phys */ usb2_phy = usb3_phy = NULL; error = phy_get_by_ofw_name(dev, node, "usb2-phy", &usb2_phy); if (error == 0 && usb2_phy != NULL) phy_enable(usb2_phy); error = phy_get_by_ofw_name(dev, node, "usb3-phy", &usb3_phy); if (error == 0 && usb3_phy != NULL) phy_enable(usb3_phy); if (sc->snpsversion == DWC3_IP_ID) { if (sc->snpsrevision >= 0x290A) { uint32_t hwparams3; hwparams3 = DWC3_READ(sc, DWC3_GHWPARAMS3); if (DWC3_HWPARAMS3_SSPHY(hwparams3) == DWC3_HWPARAMS3_SSPHY_DISABLE) { reg = DWC3_READ(sc, DWC3_GUCTL1); if (bootverbose) device_printf(dev, "Forcing USB2 clock only\n"); reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK; DWC3_WRITE(sc, DWC3_GUCTL1, reg); } } } snps_dwc3_configure_phy(sc, node); skip_phys: #endif snps_dwc3_reset(sc); snps_dwc3_configure_host(sc); snps_dwc3_do_quirks(sc); #ifdef DWC3_DEBUG snsp_dwc3_dump_regs(sc, "Pre XHCI init"); #endif error = snps_dwc3_attach_xhci(dev); #ifdef DWC3_DEBUG snsp_dwc3_dump_regs(sc, "Post XHCI init"); #endif #ifdef FDT if (error) { if (sc->clk_ref != NULL) clk_disable(sc->clk_ref); if (sc->clk_suspend != NULL) clk_disable(sc->clk_suspend); if (sc->clk_bus != NULL) clk_disable(sc->clk_bus); } #endif return (error); } #ifdef FDT static struct ofw_compat_data compat_data[] = { { "snps,dwc3", 1 }, { NULL, 0 } }; static int snps_dwc3_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); return (snps_dwc3_probe_common(dev)); } static int snps_dwc3_fdt_attach(device_t dev) { return (snps_dwc3_common_attach(dev, true)); } static device_method_t snps_dwc3_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, snps_dwc3_fdt_probe), DEVMETHOD(device_attach, snps_dwc3_fdt_attach), DEVMETHOD_END }; DEFINE_CLASS_1(snps_dwc3_fdt, snps_dwc3_fdt_driver, snps_dwc3_fdt_methods, sizeof(struct snps_dwc3_softc), generic_xhci_driver); DRIVER_MODULE(snps_dwc3_fdt, simplebus, snps_dwc3_fdt_driver, 0, 0); MODULE_DEPEND(snps_dwc3_fdt, xhci, 1, 1, 1); #endif #ifdef DEV_ACPI static char *dwc3_acpi_ids[] = { "808622B7", /* This was an Intel PCI Vendor/Device ID used. */ "PNP0D10", /* The generic XHCI PNP ID needing extra probe checks. */ NULL }; static int snps_dwc3_acpi_probe(device_t dev) { char *match; int error; if (acpi_disabled("snps_dwc3")) return (ENXIO); error = ACPI_ID_PROBE(device_get_parent(dev), dev, dwc3_acpi_ids, &match); if (error > 0) return (ENXIO); /* * If we found the Generic XHCI PNP ID we can only attach if we have * some other means to identify the device as dwc3. */ if (strcmp(match, "PNP0D10") == 0) { /* This is needed in SolidRun's HoneyComb. */ if (device_has_property(dev, "snps,dis_rxdet_inp3_quirk")) goto is_dwc3; return (ENXIO); } is_dwc3: return (snps_dwc3_probe_common(dev)); } static int snps_dwc3_acpi_attach(device_t dev) { return (snps_dwc3_common_attach(dev, false)); } static device_method_t snps_dwc3_acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, snps_dwc3_acpi_probe), DEVMETHOD(device_attach, snps_dwc3_acpi_attach), DEVMETHOD_END }; DEFINE_CLASS_1(snps_dwc3_acpi, snps_dwc3_acpi_driver, snps_dwc3_acpi_methods, sizeof(struct snps_dwc3_softc), generic_xhci_driver); DRIVER_MODULE(snps_dwc3_acpi, acpi, snps_dwc3_acpi_driver, 0, 0); MODULE_DEPEND(snps_dwc3_acpi, usb, 1, 1, 1); #endif diff --git a/sys/dev/usb/controller/dwc3/rk_dwc3.c b/sys/dev/usb/controller/dwc3/rk_dwc3.c index f336490c386e..f0cec78a6f22 100644 --- a/sys/dev/usb/controller/dwc3/rk_dwc3.c +++ b/sys/dev/usb/controller/dwc3/rk_dwc3.c @@ -1,198 +1,198 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Rockchip DWC3 glue */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include enum rk_dwc3_type { RK3399 = 1, }; static struct ofw_compat_data compat_data[] = { { "rockchip,rk3399-dwc3", RK3399 }, { NULL, 0 } }; struct rk_dwc3_softc { struct simplebus_softc sc; device_t dev; clk_t clk_ref; clk_t clk_suspend; clk_t clk_bus; clk_t clk_axi_perf; clk_t clk_usb3; clk_t clk_grf; hwreset_t rst_usb3; enum rk_dwc3_type type; }; static int rk_dwc3_probe(device_t dev) { phandle_t node; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); /* Binding says that we need a child node for the actual dwc3 controller */ node = ofw_bus_get_node(dev); if (OF_child(node) <= 0) return (ENXIO); device_set_desc(dev, "Rockchip RK3399 DWC3"); return (BUS_PROBE_DEFAULT); } static int rk_dwc3_attach(device_t dev) { struct rk_dwc3_softc *sc; device_t cdev; phandle_t node, child; int err; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; /* Mandatory clocks */ if (clk_get_by_ofw_name(dev, 0, "ref_clk", &sc->clk_ref) != 0) { device_printf(dev, "Cannot get ref_clk clock\n"); return (ENXIO); } err = clk_enable(sc->clk_ref); if (err != 0) { device_printf(dev, "Could not enable clock %s\n", clk_get_name(sc->clk_ref)); return (ENXIO); } if (clk_get_by_ofw_name(dev, 0, "suspend_clk", &sc->clk_suspend) != 0) { device_printf(dev, "Cannot get suspend_clk clock\n"); return (ENXIO); } err = clk_enable(sc->clk_suspend); if (err != 0) { device_printf(dev, "Could not enable clock %s\n", clk_get_name(sc->clk_suspend)); return (ENXIO); } if (clk_get_by_ofw_name(dev, 0, "bus_clk", &sc->clk_bus) != 0) { device_printf(dev, "Cannot get bus_clk clock\n"); return (ENXIO); } err = clk_enable(sc->clk_bus); if (err != 0) { device_printf(dev, "Could not enable clock %s\n", clk_get_name(sc->clk_bus)); return (ENXIO); } if (clk_get_by_ofw_name(dev, 0, "grf_clk", &sc->clk_grf) == 0) { err = clk_enable(sc->clk_grf); if (err != 0) { device_printf(dev, "Could not enable clock %s\n", clk_get_name(sc->clk_grf)); return (ENXIO); } } /* Optional clocks */ if (clk_get_by_ofw_name(dev, 0, "aclk_usb3_rksoc_axi_perf", &sc->clk_axi_perf) == 0) { err = clk_enable(sc->clk_axi_perf); if (err != 0) { device_printf(dev, "Could not enable clock %s\n", clk_get_name(sc->clk_axi_perf)); return (ENXIO); } } if (clk_get_by_ofw_name(dev, 0, "aclk_usb3", &sc->clk_usb3) == 0) { err = clk_enable(sc->clk_usb3); if (err != 0) { device_printf(dev, "Could not enable clock %s\n", clk_get_name(sc->clk_usb3)); return (ENXIO); } } /* Put module out of reset */ if (hwreset_get_by_ofw_name(dev, node, "usb3-otg", &sc->rst_usb3) == 0) { if (hwreset_deassert(sc->rst_usb3) != 0) { device_printf(dev, "Cannot deassert reset\n"); return (ENXIO); } } simplebus_init(dev, node); if (simplebus_fill_ranges(node, &sc->sc) < 0) { device_printf(dev, "could not get ranges\n"); return (ENXIO); } for (child = OF_child(node); child > 0; child = OF_peer(child)) { cdev = simplebus_add_device(dev, child, 0, NULL, -1, NULL); if (cdev != NULL) device_probe_and_attach(cdev); } return (bus_generic_attach(dev)); } static device_method_t rk_dwc3_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk_dwc3_probe), DEVMETHOD(device_attach, rk_dwc3_attach), DEVMETHOD_END }; DEFINE_CLASS_1(rk_dwc3, rk_dwc3_driver, rk_dwc3_methods, sizeof(struct rk_dwc3_softc), simplebus_driver); DRIVER_MODULE(rk_dwc3, simplebus, rk_dwc3_driver, 0, 0); diff --git a/sys/dev/usb/controller/generic_ehci_fdt.c b/sys/dev/usb/controller/generic_ehci_fdt.c index 4dc3758b59f3..8f9558c6636a 100644 --- a/sys/dev/usb/controller/generic_ehci_fdt.c +++ b/sys/dev/usb/controller/generic_ehci_fdt.c @@ -1,239 +1,239 @@ /*- * Copyright (c) 2012 Ganbold Tsagaankhuu * Copyright (c) 2016 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_bus.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "generic_ehci.h" struct clk_list { TAILQ_ENTRY(clk_list) next; clk_t clk; }; struct hwrst_list { TAILQ_ENTRY(hwrst_list) next; hwreset_t rst; }; struct phy_list { TAILQ_ENTRY(phy_list) next; phy_t phy; }; struct generic_ehci_fdt_softc { ehci_softc_t ehci_sc; TAILQ_HEAD(, clk_list) clk_list; TAILQ_HEAD(, hwrst_list) rst_list; TAILQ_HEAD(, phy_list) phy_list; }; static device_probe_t generic_ehci_fdt_probe; static device_attach_t generic_ehci_fdt_attach; static device_detach_t generic_ehci_fdt_detach; static int generic_ehci_fdt_probe(device_t self) { if (!ofw_bus_status_okay(self)) return (ENXIO); if (!ofw_bus_is_compatible(self, "generic-ehci")) return (ENXIO); device_set_desc(self, "Generic EHCI Controller"); return (BUS_PROBE_DEFAULT); } static int generic_ehci_fdt_attach(device_t dev) { int err; struct generic_ehci_fdt_softc *sc; struct clk_list *clkp; clk_t clk; struct hwrst_list *rstp; hwreset_t rst; struct phy_list *phyp; phy_t phy; int off; sc = device_get_softc(dev); TAILQ_INIT(&sc->clk_list); /* Enable clock */ for (off = 0; clk_get_by_ofw_index(dev, 0, off, &clk) == 0; off++) { err = clk_enable(clk); if (err != 0) { device_printf(dev, "Could not enable clock %s\n", clk_get_name(clk)); goto error; } clkp = malloc(sizeof(*clkp), M_DEVBUF, M_WAITOK | M_ZERO); clkp->clk = clk; TAILQ_INSERT_TAIL(&sc->clk_list, clkp, next); } /* De-assert reset */ TAILQ_INIT(&sc->rst_list); for (off = 0; hwreset_get_by_ofw_idx(dev, 0, off, &rst) == 0; off++) { err = hwreset_deassert(rst); if (err != 0) { device_printf(dev, "Could not de-assert reset\n"); goto error; } rstp = malloc(sizeof(*rstp), M_DEVBUF, M_WAITOK | M_ZERO); rstp->rst = rst; TAILQ_INSERT_TAIL(&sc->rst_list, rstp, next); } /* Enable USB PHY */ TAILQ_INIT(&sc->phy_list); for (off = 0; phy_get_by_ofw_idx(dev, 0, off, &phy) == 0; off++) { err = phy_usb_set_mode(phy, PHY_USB_MODE_HOST); if (err != 0) { device_printf(dev, "Could not set phy to host mode\n"); goto error; } err = phy_enable(phy); if (err != 0) { device_printf(dev, "Could not enable phy\n"); goto error; } phyp = malloc(sizeof(*phyp), M_DEVBUF, M_WAITOK | M_ZERO); phyp->phy = phy; TAILQ_INSERT_TAIL(&sc->phy_list, phyp, next); } err = generic_ehci_attach(dev); if (err != 0) goto error; return (0); error: generic_ehci_fdt_detach(dev); return (err); } static int generic_ehci_fdt_detach(device_t dev) { struct generic_ehci_fdt_softc *sc; struct clk_list *clk, *clk_tmp; struct hwrst_list *rst, *rst_tmp; struct phy_list *phy, *phy_tmp; int err; err = generic_ehci_detach(dev); if (err != 0) return (err); sc = device_get_softc(dev); /* Disable clock */ TAILQ_FOREACH_SAFE(clk, &sc->clk_list, next, clk_tmp) { err = clk_disable(clk->clk); if (err != 0) device_printf(dev, "Could not disable clock %s\n", clk_get_name(clk->clk)); err = clk_release(clk->clk); if (err != 0) device_printf(dev, "Could not release clock %s\n", clk_get_name(clk->clk)); TAILQ_REMOVE(&sc->clk_list, clk, next); free(clk, M_DEVBUF); } /* Assert reset */ TAILQ_FOREACH_SAFE(rst, &sc->rst_list, next, rst_tmp) { hwreset_assert(rst->rst); hwreset_release(rst->rst); TAILQ_REMOVE(&sc->rst_list, rst, next); free(rst, M_DEVBUF); } /* Disable phys */ TAILQ_FOREACH_SAFE(phy, &sc->phy_list, next, phy_tmp) { err = phy_disable(phy->phy); if (err != 0) device_printf(dev, "Could not disable phy\n"); phy_release(phy->phy); TAILQ_REMOVE(&sc->phy_list, phy, next); free(phy, M_DEVBUF); } return (0); } static device_method_t ehci_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, generic_ehci_fdt_probe), DEVMETHOD(device_attach, generic_ehci_fdt_attach), DEVMETHOD(device_detach, generic_ehci_fdt_detach), DEVMETHOD_END }; DEFINE_CLASS_1(ehci, ehci_fdt_driver, ehci_fdt_methods, sizeof(ehci_softc_t), generic_ehci_driver); DRIVER_MODULE(generic_ehci, simplebus, ehci_fdt_driver, 0, 0); MODULE_DEPEND(generic_ehci, usb, 1, 1, 1); diff --git a/sys/dev/usb/controller/generic_ohci.c b/sys/dev/usb/controller/generic_ohci.c index 5098a12446b0..efedc92ebfb9 100644 --- a/sys/dev/usb/controller/generic_ohci.c +++ b/sys/dev/usb/controller/generic_ohci.c @@ -1,328 +1,328 @@ /*- * Copyright (c) 2016 Emmanuel Vadot All rights reserved. * Copyright (c) 2006 M. Warner Losh * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Generic OHCI driver based on AT91 OHCI */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include "generic_usb_if.h" struct clk_list { TAILQ_ENTRY(clk_list) next; clk_t clk; }; struct phy_list { TAILQ_ENTRY(phy_list) next; phy_t phy; }; struct hwrst_list { TAILQ_ENTRY(hwrst_list) next; hwreset_t rst; }; struct generic_ohci_softc { ohci_softc_t ohci_sc; TAILQ_HEAD(, clk_list) clk_list; TAILQ_HEAD(, phy_list) phy_list; TAILQ_HEAD(, hwrst_list) rst_list; }; static int generic_ohci_detach(device_t); static int generic_ohci_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "generic-ohci")) return (ENXIO); device_set_desc(dev, "Generic OHCI Controller"); return (BUS_PROBE_DEFAULT); } static int generic_ohci_attach(device_t dev) { struct generic_ohci_softc *sc = device_get_softc(dev); int err, rid; int off; struct clk_list *clkp; struct phy_list *phyp; struct hwrst_list *rstp; clk_t clk; phy_t phy; hwreset_t rst; sc->ohci_sc.sc_bus.parent = dev; sc->ohci_sc.sc_bus.devices = sc->ohci_sc.sc_devices; sc->ohci_sc.sc_bus.devices_max = OHCI_MAX_DEVICES; sc->ohci_sc.sc_bus.dma_bits = 32; /* get all DMA memory */ if (usb_bus_mem_alloc_all(&sc->ohci_sc.sc_bus, USB_GET_DMA_TAG(dev), &ohci_iterate_hw_softc)) { return (ENOMEM); } rid = 0; sc->ohci_sc.sc_io_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->ohci_sc.sc_io_res == 0) { err = ENOMEM; goto error; } sc->ohci_sc.sc_io_tag = rman_get_bustag(sc->ohci_sc.sc_io_res); sc->ohci_sc.sc_io_hdl = rman_get_bushandle(sc->ohci_sc.sc_io_res); sc->ohci_sc.sc_io_size = rman_get_size(sc->ohci_sc.sc_io_res); rid = 0; sc->ohci_sc.sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->ohci_sc.sc_irq_res == 0) { err = ENXIO; goto error; } sc->ohci_sc.sc_bus.bdev = device_add_child(dev, "usbus", -1); if (sc->ohci_sc.sc_bus.bdev == 0) { err = ENXIO; goto error; } device_set_ivars(sc->ohci_sc.sc_bus.bdev, &sc->ohci_sc.sc_bus); strlcpy(sc->ohci_sc.sc_vendor, "Generic", sizeof(sc->ohci_sc.sc_vendor)); err = bus_setup_intr(dev, sc->ohci_sc.sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, (driver_intr_t *)ohci_interrupt, sc, &sc->ohci_sc.sc_intr_hdl); if (err) { sc->ohci_sc.sc_intr_hdl = NULL; goto error; } TAILQ_INIT(&sc->clk_list); /* Enable clock */ for (off = 0; clk_get_by_ofw_index(dev, 0, off, &clk) == 0; off++) { err = clk_enable(clk); if (err != 0) { device_printf(dev, "Could not enable clock %s\n", clk_get_name(clk)); goto error; } clkp = malloc(sizeof(*clkp), M_DEVBUF, M_WAITOK | M_ZERO); clkp->clk = clk; TAILQ_INSERT_TAIL(&sc->clk_list, clkp, next); } /* De-assert reset */ TAILQ_INIT(&sc->rst_list); for (off = 0; hwreset_get_by_ofw_idx(dev, 0, off, &rst) == 0; off++) { err = hwreset_deassert(rst); if (err != 0) { device_printf(dev, "Could not de-assert reset\n"); goto error; } rstp = malloc(sizeof(*rstp), M_DEVBUF, M_WAITOK | M_ZERO); rstp->rst = rst; TAILQ_INSERT_TAIL(&sc->rst_list, rstp, next); } /* Enable phy */ TAILQ_INIT(&sc->phy_list); for (off = 0; phy_get_by_ofw_idx(dev, 0, off, &phy) == 0; off++) { err = phy_usb_set_mode(phy, PHY_USB_MODE_HOST); if (err != 0) { device_printf(dev, "Could not set phy to host mode\n"); goto error; } err = phy_enable(phy); if (err != 0) { device_printf(dev, "Could not enable phy\n"); goto error; } phyp = malloc(sizeof(*phyp), M_DEVBUF, M_WAITOK | M_ZERO); phyp->phy = phy; TAILQ_INSERT_TAIL(&sc->phy_list, phyp, next); } if (GENERIC_USB_INIT(dev) != 0) { err = ENXIO; goto error; } err = ohci_init(&sc->ohci_sc); if (err == 0) err = device_probe_and_attach(sc->ohci_sc.sc_bus.bdev); if (err) goto error; return (0); error: generic_ohci_detach(dev); return (err); } static int generic_ohci_detach(device_t dev) { struct generic_ohci_softc *sc = device_get_softc(dev); int err; struct clk_list *clk, *clk_tmp; struct phy_list *phy, *phy_tmp; struct hwrst_list *rst, *rst_tmp; /* during module unload there are lots of children leftover */ device_delete_children(dev); /* * Put the controller into reset, then disable clocks and do * the MI tear down. We have to disable the clocks/hardware * after we do the rest of the teardown. We also disable the * clocks in the opposite order we acquire them, but that * doesn't seem to be absolutely necessary. We free up the * clocks after we disable them, so the system could, in * theory, reuse them. */ bus_space_write_4(sc->ohci_sc.sc_io_tag, sc->ohci_sc.sc_io_hdl, OHCI_CONTROL, 0); if (sc->ohci_sc.sc_irq_res && sc->ohci_sc.sc_intr_hdl) { /* * only call ohci_detach() after ohci_init() */ ohci_detach(&sc->ohci_sc); err = bus_teardown_intr(dev, sc->ohci_sc.sc_irq_res, sc->ohci_sc.sc_intr_hdl); sc->ohci_sc.sc_intr_hdl = NULL; } if (sc->ohci_sc.sc_irq_res) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ohci_sc.sc_irq_res); sc->ohci_sc.sc_irq_res = NULL; } if (sc->ohci_sc.sc_io_res) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->ohci_sc.sc_io_res); sc->ohci_sc.sc_io_res = NULL; } usb_bus_mem_free_all(&sc->ohci_sc.sc_bus, &ohci_iterate_hw_softc); /* Disable phy */ TAILQ_FOREACH_SAFE(phy, &sc->phy_list, next, phy_tmp) { err = phy_disable(phy->phy); if (err != 0) device_printf(dev, "Could not disable phy\n"); phy_release(phy->phy); TAILQ_REMOVE(&sc->phy_list, phy, next); free(phy, M_DEVBUF); } /* Assert reset */ TAILQ_FOREACH_SAFE(rst, &sc->rst_list, next, rst_tmp) { hwreset_assert(rst->rst); hwreset_release(rst->rst); TAILQ_REMOVE(&sc->rst_list, rst, next); free(rst, M_DEVBUF); } /* Disable clock */ TAILQ_FOREACH_SAFE(clk, &sc->clk_list, next, clk_tmp) { err = clk_disable(clk->clk); if (err != 0) device_printf(dev, "Could not disable clock %s\n", clk_get_name(clk->clk)); err = clk_release(clk->clk); if (err != 0) device_printf(dev, "Could not release clock %s\n", clk_get_name(clk->clk)); TAILQ_REMOVE(&sc->clk_list, clk, next); free(clk, M_DEVBUF); } if (GENERIC_USB_DEINIT(dev) != 0) return (ENXIO); return (0); } static device_method_t generic_ohci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, generic_ohci_probe), DEVMETHOD(device_attach, generic_ohci_attach), DEVMETHOD(device_detach, generic_ohci_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD_END }; driver_t generic_ohci_driver = { .name = "ohci", .methods = generic_ohci_methods, .size = sizeof(struct generic_ohci_softc), }; DRIVER_MODULE(ohci, simplebus, generic_ohci_driver, 0, 0); MODULE_DEPEND(ohci, usb, 1, 1, 1); diff --git a/sys/dev/usb/controller/musb_otg_allwinner.c b/sys/dev/usb/controller/musb_otg_allwinner.c index 2949f730cd7d..4e630ac431e8 100644 --- a/sys/dev/usb/controller/musb_otg_allwinner.c +++ b/sys/dev/usb/controller/musb_otg_allwinner.c @@ -1,621 +1,621 @@ /*- * Copyright (c) 2016 Jared McNeill * Copyright (c) 2018 Andrew Turner * All rights reserved. * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237 * ("CTSRD"), as part of the DARPA CRASH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Allwinner USB Dual-Role Device (DRD) controller */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #ifdef __arm__ #include #include #endif #define DRD_EP_MAX 5 #define DRD_EP_MAX_H3 4 #define MUSB2_REG_AWIN_VEND0 0x0043 #define VEND0_PIO_MODE 0 #if defined(__arm__) #define bs_parent_space(bs) ((bs)->bs_parent) typedef bus_space_tag_t awusb_bs_tag; #elif defined(__aarch64__) #define bs_parent_space(bs) (bs) typedef void * awusb_bs_tag; #endif #define AWUSB_OKAY 0x01 #define AWUSB_NO_CONFDATA 0x02 static struct ofw_compat_data compat_data[] = { { "allwinner,sun4i-a10-musb", AWUSB_OKAY }, { "allwinner,sun6i-a31-musb", AWUSB_OKAY }, { "allwinner,sun8i-a33-musb", AWUSB_OKAY | AWUSB_NO_CONFDATA }, { "allwinner,sun8i-h3-musb", AWUSB_OKAY | AWUSB_NO_CONFDATA }, { NULL, 0 } }; static const struct musb_otg_ep_cfg musbotg_ep_allwinner[] = { { .ep_end = DRD_EP_MAX, .ep_fifosz_shift = 9, .ep_fifosz_reg = MUSB2_VAL_FIFOSZ_512, }, { .ep_end = -1, }, }; static const struct musb_otg_ep_cfg musbotg_ep_allwinner_h3[] = { { .ep_end = DRD_EP_MAX_H3, .ep_fifosz_shift = 9, .ep_fifosz_reg = MUSB2_VAL_FIFOSZ_512, }, { .ep_end = -1, }, }; struct awusbdrd_softc { struct musbotg_softc sc; struct resource *res[2]; clk_t clk; hwreset_t reset; phy_t phy; struct bus_space bs; int flags; }; static struct resource_spec awusbdrd_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; #define REMAPFLAG 0x8000 #define REGDECL(a, b) [(a)] = ((b) | REMAPFLAG) /* Allwinner USB DRD register mappings */ static const uint16_t awusbdrd_regmap[] = { REGDECL(MUSB2_REG_EPFIFO(0), 0x0000), REGDECL(MUSB2_REG_EPFIFO(1), 0x0004), REGDECL(MUSB2_REG_EPFIFO(2), 0x0008), REGDECL(MUSB2_REG_EPFIFO(3), 0x000c), REGDECL(MUSB2_REG_EPFIFO(4), 0x0010), REGDECL(MUSB2_REG_EPFIFO(5), 0x0014), REGDECL(MUSB2_REG_POWER, 0x0040), REGDECL(MUSB2_REG_DEVCTL, 0x0041), REGDECL(MUSB2_REG_EPINDEX, 0x0042), REGDECL(MUSB2_REG_INTTX, 0x0044), REGDECL(MUSB2_REG_INTRX, 0x0046), REGDECL(MUSB2_REG_INTTXE, 0x0048), REGDECL(MUSB2_REG_INTRXE, 0x004a), REGDECL(MUSB2_REG_INTUSB, 0x004c), REGDECL(MUSB2_REG_INTUSBE, 0x0050), REGDECL(MUSB2_REG_FRAME, 0x0054), REGDECL(MUSB2_REG_TESTMODE, 0x007c), REGDECL(MUSB2_REG_TXMAXP, 0x0080), REGDECL(MUSB2_REG_TXCSRL, 0x0082), REGDECL(MUSB2_REG_TXCSRH, 0x0083), REGDECL(MUSB2_REG_RXMAXP, 0x0084), REGDECL(MUSB2_REG_RXCSRL, 0x0086), REGDECL(MUSB2_REG_RXCSRH, 0x0087), REGDECL(MUSB2_REG_RXCOUNT, 0x0088), REGDECL(MUSB2_REG_TXTI, 0x008c), REGDECL(MUSB2_REG_TXNAKLIMIT, 0x008d), REGDECL(MUSB2_REG_RXNAKLIMIT, 0x008f), REGDECL(MUSB2_REG_RXTI, 0x008e), REGDECL(MUSB2_REG_TXFIFOSZ, 0x0090), REGDECL(MUSB2_REG_TXFIFOADD, 0x0092), REGDECL(MUSB2_REG_RXFIFOSZ, 0x0094), REGDECL(MUSB2_REG_RXFIFOADD, 0x0096), REGDECL(MUSB2_REG_FADDR, 0x0098), REGDECL(MUSB2_REG_TXFADDR(0), 0x0098), REGDECL(MUSB2_REG_TXHADDR(0), 0x009a), REGDECL(MUSB2_REG_TXHUBPORT(0), 0x009b), REGDECL(MUSB2_REG_RXFADDR(0), 0x009c), REGDECL(MUSB2_REG_RXHADDR(0), 0x009e), REGDECL(MUSB2_REG_RXHUBPORT(0), 0x009f), REGDECL(MUSB2_REG_TXFADDR(1), 0x0098), REGDECL(MUSB2_REG_TXHADDR(1), 0x009a), REGDECL(MUSB2_REG_TXHUBPORT(1), 0x009b), REGDECL(MUSB2_REG_RXFADDR(1), 0x009c), REGDECL(MUSB2_REG_RXHADDR(1), 0x009e), REGDECL(MUSB2_REG_RXHUBPORT(1), 0x009f), REGDECL(MUSB2_REG_TXFADDR(2), 0x0098), REGDECL(MUSB2_REG_TXHADDR(2), 0x009a), REGDECL(MUSB2_REG_TXHUBPORT(2), 0x009b), REGDECL(MUSB2_REG_RXFADDR(2), 0x009c), REGDECL(MUSB2_REG_RXHADDR(2), 0x009e), REGDECL(MUSB2_REG_RXHUBPORT(2), 0x009f), REGDECL(MUSB2_REG_TXFADDR(3), 0x0098), REGDECL(MUSB2_REG_TXHADDR(3), 0x009a), REGDECL(MUSB2_REG_TXHUBPORT(3), 0x009b), REGDECL(MUSB2_REG_RXFADDR(3), 0x009c), REGDECL(MUSB2_REG_RXHADDR(3), 0x009e), REGDECL(MUSB2_REG_RXHUBPORT(3), 0x009f), REGDECL(MUSB2_REG_TXFADDR(4), 0x0098), REGDECL(MUSB2_REG_TXHADDR(4), 0x009a), REGDECL(MUSB2_REG_TXHUBPORT(4), 0x009b), REGDECL(MUSB2_REG_RXFADDR(4), 0x009c), REGDECL(MUSB2_REG_RXHADDR(4), 0x009e), REGDECL(MUSB2_REG_RXHUBPORT(4), 0x009f), REGDECL(MUSB2_REG_TXFADDR(5), 0x0098), REGDECL(MUSB2_REG_TXHADDR(5), 0x009a), REGDECL(MUSB2_REG_TXHUBPORT(5), 0x009b), REGDECL(MUSB2_REG_RXFADDR(5), 0x009c), REGDECL(MUSB2_REG_RXHADDR(5), 0x009e), REGDECL(MUSB2_REG_RXHUBPORT(5), 0x009f), REGDECL(MUSB2_REG_CONFDATA, 0x00c0), }; static bus_size_t awusbdrd_reg(bus_size_t o) { bus_size_t v; KASSERT(o < nitems(awusbdrd_regmap), ("%s: Invalid register %#lx", __func__, o)); if (o >= nitems(awusbdrd_regmap)) return (o); v = awusbdrd_regmap[o]; KASSERT((v & REMAPFLAG) != 0, ("%s: reg %#lx not in regmap", __func__, o)); return (v & ~REMAPFLAG); } static int awusbdrd_filt(bus_size_t o) { switch (o) { case MUSB2_REG_MISC: case MUSB2_REG_RXDBDIS: case MUSB2_REG_TXDBDIS: return (1); default: return (0); } } static uint8_t awusbdrd_bs_r_1(awusb_bs_tag t, bus_space_handle_t h, bus_size_t o) { struct bus_space *bs = t; switch (o) { case MUSB2_REG_HWVERS: return (0); /* no known equivalent */ } return (bus_space_read_1(bs_parent_space(bs), h, awusbdrd_reg(o))); } static uint8_t awusbdrd_bs_r_1_noconf(awusb_bs_tag t, bus_space_handle_t h, bus_size_t o) { /* * There is no confdata register on some SoCs, return the same * magic value as Linux. */ if (o == MUSB2_REG_CONFDATA) return (0xde); return (awusbdrd_bs_r_1(t, h, o)); } static uint16_t awusbdrd_bs_r_2(awusb_bs_tag t, bus_space_handle_t h, bus_size_t o) { struct bus_space *bs = t; if (awusbdrd_filt(o) != 0) return (0); return bus_space_read_2(bs_parent_space(bs), h, awusbdrd_reg(o)); } static void awusbdrd_bs_w_1(awusb_bs_tag t, bus_space_handle_t h, bus_size_t o, uint8_t v) { struct bus_space *bs = t; if (awusbdrd_filt(o) != 0) return; bus_space_write_1(bs_parent_space(bs), h, awusbdrd_reg(o), v); } static void awusbdrd_bs_w_2(awusb_bs_tag t, bus_space_handle_t h, bus_size_t o, uint16_t v) { struct bus_space *bs = t; if (awusbdrd_filt(o) != 0) return; bus_space_write_2(bs_parent_space(bs), h, awusbdrd_reg(o), v); } static void awusbdrd_bs_rm_1(awusb_bs_tag t, bus_space_handle_t h, bus_size_t o, uint8_t *d, bus_size_t c) { struct bus_space *bs = t; bus_space_read_multi_1(bs_parent_space(bs), h, awusbdrd_reg(o), d, c); } static void awusbdrd_bs_rm_4(awusb_bs_tag t, bus_space_handle_t h, bus_size_t o, uint32_t *d, bus_size_t c) { struct bus_space *bs = t; bus_space_read_multi_4(bs_parent_space(bs), h, awusbdrd_reg(o), d, c); } static void awusbdrd_bs_wm_1(awusb_bs_tag t, bus_space_handle_t h, bus_size_t o, const uint8_t *d, bus_size_t c) { struct bus_space *bs = t; if (awusbdrd_filt(o) != 0) return; bus_space_write_multi_1(bs_parent_space(bs), h, awusbdrd_reg(o), d, c); } static void awusbdrd_bs_wm_4(awusb_bs_tag t, bus_space_handle_t h, bus_size_t o, const uint32_t *d, bus_size_t c) { struct bus_space *bs = t; if (awusbdrd_filt(o) != 0) return; bus_space_write_multi_4(bs_parent_space(bs), h, awusbdrd_reg(o), d, c); } static void awusbdrd_intr(void *arg) { struct awusbdrd_softc *sc = arg; uint8_t intusb; uint16_t inttx, intrx; intusb = MUSB2_READ_1(&sc->sc, MUSB2_REG_INTUSB); inttx = MUSB2_READ_2(&sc->sc, MUSB2_REG_INTTX); intrx = MUSB2_READ_2(&sc->sc, MUSB2_REG_INTRX); if (intusb == 0 && inttx == 0 && intrx == 0) return; if (intusb) MUSB2_WRITE_1(&sc->sc, MUSB2_REG_INTUSB, intusb); if (inttx) MUSB2_WRITE_2(&sc->sc, MUSB2_REG_INTTX, inttx); if (intrx) MUSB2_WRITE_2(&sc->sc, MUSB2_REG_INTRX, intrx); musbotg_interrupt(arg, intrx, inttx, intusb); } static int awusbdrd_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner USB DRD"); return (BUS_PROBE_DEFAULT); } static int awusbdrd_attach(device_t dev) { char usb_mode[24]; struct awusbdrd_softc *sc; uint8_t musb_mode; int phy_mode; int error; sc = device_get_softc(dev); sc->flags = ofw_bus_search_compatible(dev, compat_data)->ocd_data; error = bus_alloc_resources(dev, awusbdrd_spec, sc->res); if (error != 0) return (error); musb_mode = MUSB2_HOST_MODE; /* default */ phy_mode = PHY_USB_MODE_HOST; if (OF_getprop(ofw_bus_get_node(dev), "dr_mode", &usb_mode, sizeof(usb_mode)) > 0) { usb_mode[sizeof(usb_mode) - 1] = 0; if (strcasecmp(usb_mode, "host") == 0) { musb_mode = MUSB2_HOST_MODE; phy_mode = PHY_USB_MODE_HOST; } else if (strcasecmp(usb_mode, "peripheral") == 0) { musb_mode = MUSB2_DEVICE_MODE; phy_mode = PHY_USB_MODE_DEVICE; } else if (strcasecmp(usb_mode, "otg") == 0) { /* * XXX phy has PHY_USB_MODE_OTG, but MUSB does not have * it. It's not clear how to propagate mode changes * from phy layer (that detects them) to MUSB. */ musb_mode = MUSB2_DEVICE_MODE; phy_mode = PHY_USB_MODE_DEVICE; } else { device_printf(dev, "Invalid FDT dr_mode: %s\n", usb_mode); } } /* AHB gate clock is required */ error = clk_get_by_ofw_index(dev, 0, 0, &sc->clk); if (error != 0) goto fail; /* AHB reset is only present on some SoCs */ (void)hwreset_get_by_ofw_idx(dev, 0, 0, &sc->reset); /* Enable clocks */ error = clk_enable(sc->clk); if (error != 0) { device_printf(dev, "failed to enable clock: %d\n", error); goto fail; } if (sc->reset != NULL) { error = hwreset_deassert(sc->reset); if (error != 0) { device_printf(dev, "failed to de-assert reset: %d\n", error); goto fail; } } /* XXX not sure if this is universally needed. */ (void)phy_get_by_ofw_name(dev, 0, "usb", &sc->phy); if (sc->phy != NULL) { device_printf(dev, "setting phy mode %d\n", phy_mode); if (musb_mode == MUSB2_HOST_MODE) { error = phy_enable(sc->phy); if (error != 0) { device_printf(dev, "Could not enable phy\n"); goto fail; } } error = phy_usb_set_mode(sc->phy, phy_mode); if (error != 0) { device_printf(dev, "Could not set phy mode\n"); goto fail; } } sc->sc.sc_bus.parent = dev; sc->sc.sc_bus.devices = sc->sc.sc_devices; sc->sc.sc_bus.devices_max = MUSB2_MAX_DEVICES; sc->sc.sc_bus.dma_bits = 32; error = usb_bus_mem_alloc_all(&sc->sc.sc_bus, USB_GET_DMA_TAG(dev), NULL); if (error != 0) { error = ENOMEM; goto fail; } #if defined(__arm__) sc->bs.bs_parent = rman_get_bustag(sc->res[0]); #elif defined(__aarch64__) sc->bs.bs_cookie = rman_get_bustag(sc->res[0]); #endif if ((sc->flags & AWUSB_NO_CONFDATA) == AWUSB_NO_CONFDATA) sc->bs.bs_r_1 = awusbdrd_bs_r_1_noconf; else sc->bs.bs_r_1 = awusbdrd_bs_r_1; sc->bs.bs_r_2 = awusbdrd_bs_r_2; sc->bs.bs_w_1 = awusbdrd_bs_w_1; sc->bs.bs_w_2 = awusbdrd_bs_w_2; sc->bs.bs_rm_1 = awusbdrd_bs_rm_1; sc->bs.bs_rm_4 = awusbdrd_bs_rm_4; sc->bs.bs_wm_1 = awusbdrd_bs_wm_1; sc->bs.bs_wm_4 = awusbdrd_bs_wm_4; sc->sc.sc_io_tag = &sc->bs; sc->sc.sc_io_hdl = rman_get_bushandle(sc->res[0]); sc->sc.sc_io_size = rman_get_size(sc->res[0]); sc->sc.sc_bus.bdev = device_add_child(dev, "usbus", -1); if (sc->sc.sc_bus.bdev == NULL) { error = ENXIO; goto fail; } device_set_ivars(sc->sc.sc_bus.bdev, &sc->sc.sc_bus); sc->sc.sc_id = 0; sc->sc.sc_platform_data = sc; sc->sc.sc_mode = musb_mode; if (ofw_bus_is_compatible(dev, "allwinner,sun8i-h3-musb")) { sc->sc.sc_ep_cfg = musbotg_ep_allwinner_h3; sc->sc.sc_ep_max = DRD_EP_MAX_H3; } else { sc->sc.sc_ep_cfg = musbotg_ep_allwinner; sc->sc.sc_ep_max = DRD_EP_MAX; } error = bus_setup_intr(dev, sc->res[1], INTR_MPSAFE | INTR_TYPE_BIO, NULL, awusbdrd_intr, sc, &sc->sc.sc_intr_hdl); if (error != 0) goto fail; /* Enable PIO mode */ bus_write_1(sc->res[0], MUSB2_REG_AWIN_VEND0, VEND0_PIO_MODE); #ifdef __arm__ /* Map SRAMD area to USB0 (sun4i/sun7i only) */ switch (allwinner_soc_family()) { case ALLWINNERSOC_SUN4I: case ALLWINNERSOC_SUN7I: a10_map_to_otg(); break; } #endif error = musbotg_init(&sc->sc); if (error != 0) goto fail; error = device_probe_and_attach(sc->sc.sc_bus.bdev); if (error != 0) goto fail; musbotg_vbus_interrupt(&sc->sc, 1); /* XXX VBUS */ return (0); fail: if (sc->phy != NULL) { if (musb_mode == MUSB2_HOST_MODE) (void)phy_disable(sc->phy); phy_release(sc->phy); } if (sc->reset != NULL) { hwreset_assert(sc->reset); hwreset_release(sc->reset); } if (sc->clk != NULL) clk_release(sc->clk); bus_release_resources(dev, awusbdrd_spec, sc->res); return (error); } static int awusbdrd_detach(device_t dev) { struct awusbdrd_softc *sc; device_t bdev; int error; sc = device_get_softc(dev); if (sc->sc.sc_bus.bdev != NULL) { bdev = sc->sc.sc_bus.bdev; device_detach(bdev); device_delete_child(dev, bdev); } musbotg_uninit(&sc->sc); error = bus_teardown_intr(dev, sc->res[1], sc->sc.sc_intr_hdl); if (error != 0) return (error); usb_bus_mem_free_all(&sc->sc.sc_bus, NULL); if (sc->phy != NULL) { if (sc->sc.sc_mode == MUSB2_HOST_MODE) phy_disable(sc->phy); phy_release(sc->phy); } if (sc->reset != NULL) { if (hwreset_assert(sc->reset) != 0) device_printf(dev, "failed to assert reset\n"); hwreset_release(sc->reset); } if (sc->clk != NULL) clk_release(sc->clk); bus_release_resources(dev, awusbdrd_spec, sc->res); device_delete_children(dev); return (0); } static device_method_t awusbdrd_methods[] = { /* Device interface */ DEVMETHOD(device_probe, awusbdrd_probe), DEVMETHOD(device_attach, awusbdrd_attach), DEVMETHOD(device_detach, awusbdrd_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD_END }; static driver_t awusbdrd_driver = { .name = "musbotg", .methods = awusbdrd_methods, .size = sizeof(struct awusbdrd_softc), }; DRIVER_MODULE(musbotg, simplebus, awusbdrd_driver, 0, 0); MODULE_DEPEND(musbotg, usb, 1, 1, 1); diff --git a/sys/dev/usb/controller/usb_nop_xceiv.c b/sys/dev/usb/controller/usb_nop_xceiv.c index 3d2583d561cf..25fc13cb0020 100644 --- a/sys/dev/usb/controller/usb_nop_xceiv.c +++ b/sys/dev/usb/controller/usb_nop_xceiv.c @@ -1,206 +1,206 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Rubicon Communications, LLC (Netgate) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include "phynode_if.h" struct usb_nop_xceiv_softc { device_t dev; regulator_t vcc_supply; clk_t clk; uint32_t clk_freq; }; static struct ofw_compat_data compat_data[] = { {"usb-nop-xceiv", 1}, {NULL, 0} }; /* Phy class and methods. */ static int usb_nop_xceiv_phy_enable(struct phynode *phy, bool enable); static phynode_usb_method_t usb_nop_xceiv_phynode_methods[] = { PHYNODEMETHOD(phynode_enable, usb_nop_xceiv_phy_enable), PHYNODEMETHOD_END }; DEFINE_CLASS_1(usb_nop_xceiv_phynode, usb_nop_xceiv_phynode_class, usb_nop_xceiv_phynode_methods, sizeof(struct phynode_usb_sc), phynode_usb_class); static int usb_nop_xceiv_phy_enable(struct phynode *phynode, bool enable) { struct usb_nop_xceiv_softc *sc; device_t dev; intptr_t phy; int error; dev = phynode_get_device(phynode); phy = phynode_get_id(phynode); sc = device_get_softc(dev); if (phy != 0) return (ERANGE); /* Enable the phy clock */ if (sc->clk_freq != 0) { if (enable) { error = clk_set_freq(sc->clk, sc->clk_freq, CLK_SET_ROUND_ANY); if (error != 0) { device_printf(dev, "Cannot set clock to %dMhz\n", sc->clk_freq); goto fail; } error = clk_enable(sc->clk); } else error = clk_disable(sc->clk); if (error != 0) { device_printf(dev, "Cannot %sable the clock\n", enable ? "En" : "Dis"); goto fail; } } if (sc->vcc_supply) { if (enable) error = regulator_enable(sc->vcc_supply); else error = regulator_disable(sc->vcc_supply); if (error != 0) { device_printf(dev, "Cannot %sable the regulator\n", enable ? "En" : "Dis"); goto fail; } } return (0); fail: return (ENXIO); } static int usb_nop_xceiv_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "USB NOP PHY"); return (BUS_PROBE_DEFAULT); } static int usb_nop_xceiv_attach(device_t dev) { struct usb_nop_xceiv_softc *sc; struct phynode *phynode; struct phynode_init_def phy_init; phandle_t node; int error; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); /* Parse the optional properties */ OF_getencprop(node, "clock-frequency", &sc->clk_freq, sizeof(uint32_t)); error = clk_get_by_ofw_name(dev, node, "main_clk", &sc->clk); if (error != 0 && sc->clk_freq != 0) { device_printf(dev, "clock property is mandatory if clock-frequency is present\n"); return (ENXIO); } regulator_get_by_ofw_property(dev, node, "vcc-supply", &sc->vcc_supply); phy_init.id = 0; phy_init.ofw_node = node; phynode = phynode_create(dev, &usb_nop_xceiv_phynode_class, &phy_init); if (phynode == NULL) { device_printf(dev, "failed to create USB NOP PHY\n"); return (ENXIO); } if (phynode_register(phynode) == NULL) { device_printf(dev, "failed to create USB NOP PHY\n"); return (ENXIO); } OF_device_register_xref(OF_xref_from_node(node), dev); return (0); } static int usb_nop_xceiv_detach(device_t dev) { return (EBUSY); } static device_method_t usb_nop_xceiv_methods[] = { /* Device interface */ DEVMETHOD(device_probe, usb_nop_xceiv_probe), DEVMETHOD(device_attach, usb_nop_xceiv_attach), DEVMETHOD(device_detach, usb_nop_xceiv_detach), DEVMETHOD_END }; static driver_t usb_nop_xceiv_driver = { "usb_nop_xceiv", usb_nop_xceiv_methods, sizeof(struct usb_nop_xceiv_softc), }; EARLY_DRIVER_MODULE(usb_nop_xceiv, simplebus, usb_nop_xceiv_driver, 0, 0, BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/dev/usb/controller/xlnx_dwc3.c b/sys/dev/usb/controller/xlnx_dwc3.c index 0a53fb855034..df91d5e5bc3b 100644 --- a/sys/dev/usb/controller/xlnx_dwc3.c +++ b/sys/dev/usb/controller/xlnx_dwc3.c @@ -1,149 +1,149 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Beckhoff Automation GmbH & Co. KG * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Xilinx DWC3 glue */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include static struct ofw_compat_data compat_data[] = { { "xlnx,zynqmp-dwc3", 1 }, { NULL, 0 } }; struct xlnx_dwc3_softc { struct simplebus_softc sc; device_t dev; hwreset_t rst_crst; hwreset_t rst_hibrst; hwreset_t rst_apbrst; }; static int xlnx_dwc3_probe(device_t dev) { phandle_t node; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); /* Binding says that we need a child node for the actual dwc3 controller */ node = ofw_bus_get_node(dev); if (OF_child(node) <= 0) return (ENXIO); device_set_desc(dev, "Xilinx ZYNQMP DWC3"); return (BUS_PROBE_DEFAULT); } static int xlnx_dwc3_attach(device_t dev) { struct xlnx_dwc3_softc *sc; device_t cdev; phandle_t node, child; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); /* * Put module out of reset * Based on the bindings this should be mandatory to have * but reality shows that they aren't always there. * This is the case on the DTB in the AVnet Ultra96 */ if (hwreset_get_by_ofw_name(dev, node, "usb_crst", &sc->rst_crst) == 0) { if (hwreset_deassert(sc->rst_crst) != 0) { device_printf(dev, "Cannot deassert reset\n"); return (ENXIO); } } if (hwreset_get_by_ofw_name(dev, node, "usb_hibrst", &sc->rst_hibrst) == 0) { if (hwreset_deassert(sc->rst_hibrst) != 0) { device_printf(dev, "Cannot deassert reset\n"); return (ENXIO); } } if (hwreset_get_by_ofw_name(dev, node, "usb_apbrst", &sc->rst_apbrst) == 0) { if (hwreset_deassert(sc->rst_apbrst) != 0) { device_printf(dev, "Cannot deassert reset\n"); return (ENXIO); } } simplebus_init(dev, node); if (simplebus_fill_ranges(node, &sc->sc) < 0) { device_printf(dev, "could not get ranges\n"); return (ENXIO); } for (child = OF_child(node); child > 0; child = OF_peer(child)) { cdev = simplebus_add_device(dev, child, 0, NULL, -1, NULL); if (cdev != NULL) device_probe_and_attach(cdev); } return (bus_generic_attach(dev)); } static device_method_t xlnx_dwc3_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xlnx_dwc3_probe), DEVMETHOD(device_attach, xlnx_dwc3_attach), DEVMETHOD_END }; DEFINE_CLASS_1(xlnx_dwc3, xlnx_dwc3_driver, xlnx_dwc3_methods, sizeof(struct xlnx_dwc3_softc), simplebus_driver); DRIVER_MODULE(xlnx_dwc3, simplebus, xlnx_dwc3_driver, 0, 0); diff --git a/sys/riscv/sifive/fu740_pci_dw.c b/sys/riscv/sifive/fu740_pci_dw.c index 67305f1ee790..ff2078464379 100644 --- a/sys/riscv/sifive/fu740_pci_dw.c +++ b/sys/riscv/sifive/fu740_pci_dw.c @@ -1,460 +1,460 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2021 Jessica Clarke * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* SiFive FU740 DesignWare PCIe driver */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include "pci_dw_if.h" #define FUDW_PHYS 2 #define FUDW_LANES_PER_PHY 4 #define FUDW_MGMT_PERST_N 0x0 #define FUDW_MGMT_LTSSM_EN 0x10 #define FUDW_MGMT_HOLD_PHY_RST 0x18 #define FUDW_MGMT_DEVICE_TYPE 0x708 #define FUDW_MGMT_DEVICE_TYPE_RC 0x4 #define FUDW_MGMT_PHY_CR_PARA_REG(_n, _r) \ (0x860 + (_n) * 0x40 + FUDW_MGMT_PHY_CR_PARA_##_r) #define FUDW_MGMT_PHY_CR_PARA_ADDR 0x0 #define FUDW_MGMT_PHY_CR_PARA_READ_EN 0x10 #define FUDW_MGMT_PHY_CR_PARA_READ_DATA 0x18 #define FUDW_MGMT_PHY_CR_PARA_SEL 0x20 #define FUDW_MGMT_PHY_CR_PARA_WRITE_DATA 0x28 #define FUDW_MGMT_PHY_CR_PARA_WRITE_EN 0x30 #define FUDW_MGMT_PHY_CR_PARA_ACK 0x38 #define FUDW_MGMT_PHY_LANE(_n) (0x1008 + (_n) * 0x100) #define FUDW_MGMT_PHY_LANE_CDR_TRACK_EN (1 << 0) #define FUDW_MGMT_PHY_LANE_LOS_THRESH (1 << 5) #define FUDW_MGMT_PHY_LANE_TERM_EN (1 << 9) #define FUDW_MGMT_PHY_LANE_TERM_ACDC (1 << 10) #define FUDW_MGMT_PHY_LANE_EN (1 << 11) #define FUDW_MGMT_PHY_LANE_INIT \ (FUDW_MGMT_PHY_LANE_CDR_TRACK_EN | FUDW_MGMT_PHY_LANE_LOS_THRESH | \ FUDW_MGMT_PHY_LANE_TERM_EN | FUDW_MGMT_PHY_LANE_TERM_ACDC | \ FUDW_MGMT_PHY_LANE_EN) #define FUDW_DBI_PORT_DBG1 0x72c #define FUDW_DBI_PORT_DBG1_LINK_UP (1 << 4) #define FUDW_DBI_PORT_DBG1_LINK_IN_TRAINING (1 << 29) struct fupci_softc { struct pci_dw_softc dw_sc; device_t dev; struct resource *mgmt_res; gpio_pin_t porst_pin; gpio_pin_t pwren_pin; clk_t pcie_aux_clk; hwreset_t pcie_aux_rst; }; #define FUDW_MGMT_READ(_sc, _o) bus_read_4((_sc)->mgmt_res, (_o)) #define FUDW_MGMT_WRITE(_sc, _o, _v) bus_write_4((_sc)->mgmt_res, (_o), (_v)) static struct ofw_compat_data compat_data[] = { { "sifive,fu740-pcie", 1 }, { NULL, 0 }, }; /* Currently unused; included for completeness */ static int __unused fupci_phy_read(struct fupci_softc *sc, int phy, uint32_t reg, uint32_t *val) { unsigned timeout; uint32_t ack; FUDW_MGMT_WRITE(sc, FUDW_MGMT_PHY_CR_PARA_REG(phy, ADDR), reg); FUDW_MGMT_WRITE(sc, FUDW_MGMT_PHY_CR_PARA_REG(phy, READ_EN), 1); timeout = 10; do { ack = FUDW_MGMT_READ(sc, FUDW_MGMT_PHY_CR_PARA_REG(phy, ACK)); if (ack != 0) break; DELAY(10); } while (--timeout > 0); if (timeout == 0) { device_printf(sc->dev, "Timeout waiting for read ACK\n"); return (ETIMEDOUT); } *val = FUDW_MGMT_READ(sc, FUDW_MGMT_PHY_CR_PARA_REG(phy, READ_DATA)); FUDW_MGMT_WRITE(sc, FUDW_MGMT_PHY_CR_PARA_REG(phy, READ_EN), 0); timeout = 10; do { ack = FUDW_MGMT_READ(sc, FUDW_MGMT_PHY_CR_PARA_REG(phy, ACK)); if (ack == 0) break; DELAY(10); } while (--timeout > 0); if (timeout == 0) { device_printf(sc->dev, "Timeout waiting for read un-ACK\n"); return (ETIMEDOUT); } return (0); } static int fupci_phy_write(struct fupci_softc *sc, int phy, uint32_t reg, uint32_t val) { unsigned timeout; uint32_t ack; FUDW_MGMT_WRITE(sc, FUDW_MGMT_PHY_CR_PARA_REG(phy, ADDR), reg); FUDW_MGMT_WRITE(sc, FUDW_MGMT_PHY_CR_PARA_REG(phy, WRITE_DATA), val); FUDW_MGMT_WRITE(sc, FUDW_MGMT_PHY_CR_PARA_REG(phy, WRITE_EN), 1); timeout = 10; do { ack = FUDW_MGMT_READ(sc, FUDW_MGMT_PHY_CR_PARA_REG(phy, ACK)); if (ack != 0) break; DELAY(10); } while (--timeout > 0); if (timeout == 0) { device_printf(sc->dev, "Timeout waiting for write ACK\n"); return (ETIMEDOUT); } FUDW_MGMT_WRITE(sc, FUDW_MGMT_PHY_CR_PARA_REG(phy, WRITE_EN), 0); timeout = 10; do { ack = FUDW_MGMT_READ(sc, FUDW_MGMT_PHY_CR_PARA_REG(phy, ACK)); if (ack == 0) break; DELAY(10); } while (--timeout > 0); if (timeout == 0) { device_printf(sc->dev, "Timeout waiting for write un-ACK\n"); return (ETIMEDOUT); } return (0); } static int fupci_phy_init(struct fupci_softc *sc) { device_t dev; int error, phy, lane; dev = sc->dev; /* Assert core power-on reset (active low) */ error = gpio_pin_set_active(sc->porst_pin, false); if (error != 0) { device_printf(dev, "Cannot assert power-on reset: %d\n", error); return (error); } /* Assert PERST_N */ FUDW_MGMT_WRITE(sc, FUDW_MGMT_PERST_N, 0); /* Enable power */ error = gpio_pin_set_active(sc->pwren_pin, true); if (error != 0) { device_printf(dev, "Cannot enable power: %d\n", error); return (error); } /* Hold PERST for 100ms as per the PCIe spec */ DELAY(100); /* Deassert PERST_N */ FUDW_MGMT_WRITE(sc, FUDW_MGMT_PERST_N, 1); /* Deassert core power-on reset (active low) */ error = gpio_pin_set_active(sc->porst_pin, true); if (error != 0) { device_printf(dev, "Cannot deassert power-on reset: %d\n", error); return (error); } /* Enable the aux clock */ error = clk_enable(sc->pcie_aux_clk); if (error != 0) { device_printf(dev, "Cannot enable aux clock: %d\n", error); return (error); } /* Hold LTSSM in reset whilst initialising the PHYs */ FUDW_MGMT_WRITE(sc, FUDW_MGMT_HOLD_PHY_RST, 1); /* Deassert the aux reset */ error = hwreset_deassert(sc->pcie_aux_rst); if (error != 0) { device_printf(dev, "Cannot deassert aux reset: %d\n", error); return (error); } /* Enable control register interface */ for (phy = 0; phy < FUDW_PHYS; ++phy) FUDW_MGMT_WRITE(sc, FUDW_MGMT_PHY_CR_PARA_REG(phy, SEL), 1); /* Wait for enable to take effect */ DELAY(1); /* Initialise lane configuration */ for (phy = 0; phy < FUDW_PHYS; ++phy) { for (lane = 0; lane < FUDW_LANES_PER_PHY; ++lane) fupci_phy_write(sc, phy, FUDW_MGMT_PHY_LANE(lane), FUDW_MGMT_PHY_LANE_INIT); } /* Disable the aux clock whilst taking the LTSSM out of reset */ error = clk_disable(sc->pcie_aux_clk); if (error != 0) { device_printf(dev, "Cannot disable aux clock: %d\n", error); return (error); } /* Take LTSSM out of reset */ FUDW_MGMT_WRITE(sc, FUDW_MGMT_HOLD_PHY_RST, 0); /* Enable the aux clock again */ error = clk_enable(sc->pcie_aux_clk); if (error != 0) { device_printf(dev, "Cannot re-enable aux clock: %d\n", error); return (error); } /* Put the controller in Root Complex mode */ FUDW_MGMT_WRITE(sc, FUDW_MGMT_DEVICE_TYPE, FUDW_MGMT_DEVICE_TYPE_RC); return (0); } static void fupci_dbi_protect(struct fupci_softc *sc, bool protect) { uint32_t reg; reg = pci_dw_dbi_rd4(sc->dev, DW_MISC_CONTROL_1); if (protect) reg &= ~DBI_RO_WR_EN; else reg |= DBI_RO_WR_EN; pci_dw_dbi_wr4(sc->dev, DW_MISC_CONTROL_1, reg); } static int fupci_init(struct fupci_softc *sc) { /* Enable 32-bit I/O window */ fupci_dbi_protect(sc, false); pci_dw_dbi_wr2(sc->dev, PCIR_IOBASEL_1, (PCIM_BRIO_32 << 8) | PCIM_BRIO_32); fupci_dbi_protect(sc, true); /* Enable LTSSM */ FUDW_MGMT_WRITE(sc, FUDW_MGMT_LTSSM_EN, 1); return (0); } static int fupci_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "SiFive FU740 PCIe Controller"); return (BUS_PROBE_DEFAULT); } static int fupci_attach(device_t dev) { struct fupci_softc *sc; phandle_t node; int error, rid; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); sc->dev = dev; rid = 0; error = ofw_bus_find_string_index(node, "reg-names", "dbi", &rid); if (error != 0) { device_printf(dev, "Cannot get DBI memory: %d\n", error); goto fail; } sc->dw_sc.dbi_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->dw_sc.dbi_res == NULL) { device_printf(dev, "Cannot allocate DBI memory\n"); error = ENXIO; goto fail; } rid = 0; error = ofw_bus_find_string_index(node, "reg-names", "mgmt", &rid); if (error != 0) { device_printf(dev, "Cannot get management space memory: %d\n", error); goto fail; } sc->mgmt_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mgmt_res == NULL) { device_printf(dev, "Cannot allocate management space memory\n"); error = ENXIO; goto fail; } error = gpio_pin_get_by_ofw_property(dev, node, "reset-gpios", &sc->porst_pin); /* Old U-Boot device tree uses perstn-gpios */ if (error == ENOENT) error = gpio_pin_get_by_ofw_property(dev, node, "perstn-gpios", &sc->porst_pin); if (error != 0) { device_printf(dev, "Cannot get power-on reset GPIO: %d\n", error); goto fail; } error = gpio_pin_setflags(sc->porst_pin, GPIO_PIN_OUTPUT); if (error != 0) { device_printf(dev, "Cannot configure power-on reset GPIO: %d\n", error); goto fail; } error = gpio_pin_get_by_ofw_property(dev, node, "pwren-gpios", &sc->pwren_pin); if (error != 0) { device_printf(dev, "Cannot get power enable GPIO: %d\n", error); goto fail; } error = gpio_pin_setflags(sc->pwren_pin, GPIO_PIN_OUTPUT); if (error != 0) { device_printf(dev, "Cannot configure power enable GPIO: %d\n", error); goto fail; } error = clk_get_by_ofw_name(dev, node, "pcie_aux", &sc->pcie_aux_clk); /* Old U-Boot device tree uses pcieaux */ if (error == ENOENT) error = clk_get_by_ofw_name(dev, node, "pcieaux", &sc->pcie_aux_clk); if (error != 0) { device_printf(dev, "Cannot get aux clock: %d\n", error); goto fail; } error = hwreset_get_by_ofw_idx(dev, node, 0, &sc->pcie_aux_rst); if (error != 0) { device_printf(dev, "Cannot get aux reset: %d\n", error); goto fail; } error = fupci_phy_init(sc); if (error != 0) goto fail; error = pci_dw_init(dev); if (error != 0) goto fail; error = fupci_init(sc); if (error != 0) goto fail; return (bus_generic_attach(dev)); fail: /* XXX Cleanup */ return (error); } static int fupci_get_link(device_t dev, bool *status) { uint32_t reg; reg = pci_dw_dbi_rd4(dev, FUDW_DBI_PORT_DBG1); *status = (reg & FUDW_DBI_PORT_DBG1_LINK_UP) != 0 && (reg & FUDW_DBI_PORT_DBG1_LINK_IN_TRAINING) == 0; return (0); } static device_method_t fupci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fupci_probe), DEVMETHOD(device_attach, fupci_attach), /* PCI DW interface */ DEVMETHOD(pci_dw_get_link, fupci_get_link), DEVMETHOD_END }; DEFINE_CLASS_1(pcib, fupci_driver, fupci_methods, sizeof(struct fupci_softc), pci_dw_driver); DRIVER_MODULE(fu740_pci_dw, simplebus, fupci_driver, NULL, NULL); diff --git a/sys/riscv/sifive/sifive_prci.c b/sys/riscv/sifive/sifive_prci.c index 0065711a62d9..ecfaf936e516 100644 --- a/sys/riscv/sifive/sifive_prci.c +++ b/sys/riscv/sifive/sifive_prci.c @@ -1,698 +1,698 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Axiado Corporation * All rights reserved. * Copyright (c) 2021 Jessica Clarke * * This software was developed in part by Kristof Provost under contract for * Axiado Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include -#include -#include +#include +#include +#include #include #include #include #include "clkdev_if.h" #include "hwreset_if.h" static struct resource_spec prci_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, RESOURCE_SPEC_END }; struct prci_softc { device_t dev; struct mtx mtx; struct clkdom *clkdom; struct resource *res; bus_space_tag_t bst; bus_space_handle_t bsh; int nresets; }; struct prci_clk_pll_sc { struct prci_softc *parent_sc; uint32_t reg; }; struct prci_clk_div_sc { struct prci_softc *parent_sc; uint32_t reg; uint32_t bias; }; #define PRCI_LOCK(sc) mtx_lock(&(sc)->mtx) #define PRCI_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define PRCI_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED); #define PRCI_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED); #define PRCI_PLL_DIVR_MASK 0x3f #define PRCI_PLL_DIVR_SHIFT 0 #define PRCI_PLL_DIVF_MASK 0x7fc0 #define PRCI_PLL_DIVF_SHIFT 6 #define PRCI_PLL_DIVQ_MASK 0x38000 #define PRCI_PLL_DIVQ_SHIFT 15 /* Called devicesresetreg on the FU540 */ #define PRCI_DEVICES_RESET_N 0x28 #define PRCI_READ(_sc, _reg) \ bus_space_read_4((_sc)->bst, (_sc)->bsh, (_reg)) #define PRCI_WRITE(_sc, _reg, _val) \ bus_space_write_4((_sc)->bst, (_sc)->bsh, (_reg), (_val)) struct prci_pll_def { uint32_t id; const char *name; uint32_t reg; }; #define PLL(_id, _name, _base) \ { \ .id = (_id), \ .name = (_name), \ .reg = (_base), \ } #define PLL_END PLL(0, NULL, 0) struct prci_div_def { uint32_t id; const char *name; const char *parent_name; uint32_t reg; uint32_t bias; }; #define DIV(_id, _name, _parent_name, _base, _bias) \ { \ .id = (_id), \ .name = (_name), \ .parent_name = (_parent_name), \ .reg = (_base), \ .bias = (_bias), \ } #define DIV_END DIV(0, NULL, NULL, 0, 0) struct prci_gate_def { uint32_t id; const char *name; const char *parent_name; uint32_t reg; }; #define GATE(_id, _name, _parent_name, _base) \ { \ .id = (_id), \ .name = (_name), \ .parent_name = (_parent_name), \ .reg = (_base), \ } #define GATE_END GATE(0, NULL, NULL, 0) struct prci_config { struct prci_pll_def *pll_clks; struct prci_div_def *div_clks; struct prci_gate_def *gate_clks; struct clk_fixed_def *tlclk_def; int nresets; }; /* FU540 clock numbers */ #define FU540_PRCI_CORECLK 0 #define FU540_PRCI_DDRCLK 1 #define FU540_PRCI_GEMGXLCLK 2 #define FU540_PRCI_TLCLK 3 /* FU540 registers */ #define FU540_PRCI_COREPLL_CFG0 0x4 #define FU540_PRCI_DDRPLL_CFG0 0xC #define FU540_PRCI_GEMGXLPLL_CFG0 0x1C /* FU540 PLL clocks */ static struct prci_pll_def fu540_pll_clks[] = { PLL(FU540_PRCI_CORECLK, "coreclk", FU540_PRCI_COREPLL_CFG0), PLL(FU540_PRCI_DDRCLK, "ddrclk", FU540_PRCI_DDRPLL_CFG0), PLL(FU540_PRCI_GEMGXLCLK, "gemgxlclk", FU540_PRCI_GEMGXLPLL_CFG0), PLL_END }; /* FU540 fixed divisor clock TLCLK. */ static struct clk_fixed_def fu540_tlclk_def = { .clkdef.id = FU540_PRCI_TLCLK, .clkdef.name = "tlclk", .clkdef.parent_names = (const char *[]){"coreclk"}, .clkdef.parent_cnt = 1, .clkdef.flags = CLK_NODE_STATIC_STRINGS, .mult = 1, .div = 2, }; /* FU540 config */ struct prci_config fu540_prci_config = { .pll_clks = fu540_pll_clks, .tlclk_def = &fu540_tlclk_def, .nresets = 6, }; /* FU740 clock numbers */ #define FU740_PRCI_CORECLK 0 #define FU740_PRCI_DDRCLK 1 #define FU740_PRCI_GEMGXLCLK 2 #define FU740_PRCI_DVFSCORECLK 3 #define FU740_PRCI_HFPCLK 4 #define FU740_PRCI_CLTXCLK 5 #define FU740_PRCI_TLCLK 6 #define FU740_PRCI_PCLK 7 #define FU740_PRCI_PCIEAUXCLK 8 /* FU740 registers */ #define FU740_PRCI_COREPLL_CFG0 0x4 #define FU740_PRCI_DDRPLL_CFG0 0xC #define FU740_PRCI_PCIEAUX_GATE 0x14 #define FU740_PRCI_GEMGXLPLL_CFG0 0x1C #define FU740_PRCI_DVFSCOREPLL_CFG0 0x38 #define FU740_PRCI_HFPCLKPLL_CFG0 0x50 #define FU740_PRCI_CLTXPLL_CFG0 0x30 #define FU740_PRCI_HFPCLK_DIV 0x5C /* FU740 PLL clocks */ static struct prci_pll_def fu740_pll_clks[] = { PLL(FU740_PRCI_CORECLK, "coreclk", FU740_PRCI_COREPLL_CFG0), PLL(FU740_PRCI_DDRCLK, "ddrclk", FU740_PRCI_DDRPLL_CFG0), PLL(FU740_PRCI_GEMGXLCLK, "gemgxlclk", FU740_PRCI_GEMGXLPLL_CFG0), PLL(FU740_PRCI_DVFSCORECLK, "dvfscoreclk", FU740_PRCI_DVFSCOREPLL_CFG0), PLL(FU740_PRCI_HFPCLK, "hfpclk", FU740_PRCI_HFPCLKPLL_CFG0), PLL(FU740_PRCI_CLTXCLK, "cltxclk", FU740_PRCI_CLTXPLL_CFG0), PLL_END }; /* FU740 divisor clocks */ static struct prci_div_def fu740_div_clks[] = { DIV(FU740_PRCI_PCLK, "pclk", "hfpclk", FU740_PRCI_HFPCLK_DIV, 2), DIV_END }; /* FU740 gated clocks */ static struct prci_gate_def fu740_gate_clks[] = { GATE(FU740_PRCI_PCIEAUXCLK, "pcieauxclk", "hfclk", FU740_PRCI_PCIEAUX_GATE), GATE_END }; /* FU740 fixed divisor clock TLCLK. */ static struct clk_fixed_def fu740_tlclk_def = { .clkdef.id = FU740_PRCI_TLCLK, .clkdef.name = "tlclk", .clkdef.parent_names = (const char *[]){"coreclk"}, .clkdef.parent_cnt = 1, .clkdef.flags = CLK_NODE_STATIC_STRINGS, .mult = 1, .div = 2, }; /* FU740 config */ struct prci_config fu740_prci_config = { .pll_clks = fu740_pll_clks, .div_clks = fu740_div_clks, .gate_clks = fu740_gate_clks, .tlclk_def = &fu740_tlclk_def, .nresets = 7, }; static struct ofw_compat_data compat_data[] = { { "sifive,aloeprci0", (uintptr_t)&fu540_prci_config }, { "sifive,ux00prci0", (uintptr_t)&fu540_prci_config }, { "sifive,fu540-c000-prci", (uintptr_t)&fu540_prci_config }, { "sifive,fu740-c000-prci", (uintptr_t)&fu740_prci_config }, { NULL, 0 }, }; static int prci_clk_pll_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static int prci_clk_pll_recalc(struct clknode *clk, uint64_t *freq) { struct prci_clk_pll_sc *sc; struct clknode *parent_clk; uint32_t val; uint64_t refclk, divf, divq, divr; int err; KASSERT(freq != NULL, ("freq cannot be NULL")); sc = clknode_get_softc(clk); PRCI_LOCK(sc->parent_sc); /* Get refclock frequency. */ parent_clk = clknode_get_parent(clk); err = clknode_get_freq(parent_clk, &refclk); if (err) { device_printf(sc->parent_sc->dev, "Failed to get refclk frequency\n"); PRCI_UNLOCK(sc->parent_sc); return (err); } /* Calculate the PLL output */ val = PRCI_READ(sc->parent_sc, sc->reg); divf = (val & PRCI_PLL_DIVF_MASK) >> PRCI_PLL_DIVF_SHIFT; divq = (val & PRCI_PLL_DIVQ_MASK) >> PRCI_PLL_DIVQ_SHIFT; divr = (val & PRCI_PLL_DIVR_MASK) >> PRCI_PLL_DIVR_SHIFT; *freq = refclk / (divr + 1) * (2 * (divf + 1)) / (1 << divq); PRCI_UNLOCK(sc->parent_sc); return (0); } static clknode_method_t prci_clk_pll_clknode_methods[] = { CLKNODEMETHOD(clknode_init, prci_clk_pll_init), CLKNODEMETHOD(clknode_recalc_freq, prci_clk_pll_recalc), CLKNODEMETHOD_END }; DEFINE_CLASS_1(prci_clk_pll_clknode, prci_clk_pll_clknode_class, prci_clk_pll_clknode_methods, sizeof(struct prci_clk_pll_sc), clknode_class); static int prci_clk_div_init(struct clknode *clk, device_t dev) { clknode_init_parent_idx(clk, 0); return (0); } static int prci_clk_div_recalc(struct clknode *clk, uint64_t *freq) { struct prci_clk_div_sc *sc; struct clknode *parent_clk; uint32_t div; uint64_t refclk; int err; KASSERT(freq != NULL, ("freq cannot be NULL")); sc = clknode_get_softc(clk); PRCI_LOCK(sc->parent_sc); /* Get refclock frequency. */ parent_clk = clknode_get_parent(clk); err = clknode_get_freq(parent_clk, &refclk); if (err) { device_printf(sc->parent_sc->dev, "Failed to get refclk frequency\n"); PRCI_UNLOCK(sc->parent_sc); return (err); } /* Calculate the divisor output */ div = PRCI_READ(sc->parent_sc, sc->reg); *freq = refclk / (div + sc->bias); PRCI_UNLOCK(sc->parent_sc); return (0); } static clknode_method_t prci_clk_div_clknode_methods[] = { CLKNODEMETHOD(clknode_init, prci_clk_div_init), CLKNODEMETHOD(clknode_recalc_freq, prci_clk_div_recalc), CLKNODEMETHOD_END }; DEFINE_CLASS_1(prci_clk_div_clknode, prci_clk_div_clknode_class, prci_clk_div_clknode_methods, sizeof(struct prci_clk_div_sc), clknode_class); static int prci_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "SiFive Power Reset Clocking Interrupt"); return (BUS_PROBE_DEFAULT); } static void prci_pll_register(struct prci_softc *parent_sc, struct clknode_init_def *clkdef, uint32_t reg) { struct clknode *clk; struct prci_clk_pll_sc *sc; clk = clknode_create(parent_sc->clkdom, &prci_clk_pll_clknode_class, clkdef); if (clk == NULL) panic("Failed to create clknode"); sc = clknode_get_softc(clk); sc->parent_sc = parent_sc; sc->reg = reg; clknode_register(parent_sc->clkdom, clk); } static void prci_div_register(struct prci_softc *parent_sc, struct clknode_init_def *clkdef, uint32_t reg, uint32_t bias) { struct clknode *clk; struct prci_clk_div_sc *sc; clk = clknode_create(parent_sc->clkdom, &prci_clk_div_clknode_class, clkdef); if (clk == NULL) panic("Failed to create clknode"); sc = clknode_get_softc(clk); sc->parent_sc = parent_sc; sc->reg = reg; sc->bias = bias; clknode_register(parent_sc->clkdom, clk); } static int prci_attach(device_t dev) { struct clknode_init_def clkdef, clkdef_div; struct clk_gate_def clkdef_gate; struct prci_softc *sc; clk_t clk_parent; phandle_t node; int i, ncells, error; struct prci_config *cfg; struct prci_pll_def *pll_clk; struct prci_div_def *div_clk; struct prci_gate_def *gate_clk; sc = device_get_softc(dev); sc->dev = dev; cfg = (struct prci_config *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; mtx_init(&sc->mtx, device_get_nameunit(sc->dev), NULL, MTX_DEF); error = bus_alloc_resources(dev, prci_spec, &sc->res); if (error) { device_printf(dev, "Couldn't allocate resources\n"); goto fail; } sc->bst = rman_get_bustag(sc->res); sc->bsh = rman_get_bushandle(sc->res); node = ofw_bus_get_node(dev); error = ofw_bus_parse_xref_list_get_length(node, "clocks", "#clock-cells", &ncells); if (error != 0 || ncells < 1) { device_printf(dev, "couldn't find parent clock\n"); goto fail; } bzero(&clkdef, sizeof(clkdef)); clkdef.parent_names = mallocarray(ncells, sizeof(char *), M_OFWPROP, M_WAITOK); for (i = 0; i < ncells; i++) { error = clk_get_by_ofw_index(dev, 0, i, &clk_parent); if (error != 0) { device_printf(dev, "cannot get clock %d\n", error); goto fail1; } clkdef.parent_names[i] = clk_get_name(clk_parent); if (bootverbose) device_printf(dev, "clk parent: %s\n", clkdef.parent_names[i]); clk_release(clk_parent); } clkdef.parent_cnt = ncells; sc->clkdom = clkdom_create(dev); if (sc->clkdom == NULL) { device_printf(dev, "Couldn't create clock domain\n"); goto fail; } /* We can't free a clkdom, so from now on we cannot fail. */ for (pll_clk = cfg->pll_clks; pll_clk->name; pll_clk++) { clkdef.id = pll_clk->id; clkdef.name = pll_clk->name; prci_pll_register(sc, &clkdef, pll_clk->reg); } if (cfg->div_clks != NULL) { bzero(&clkdef_div, sizeof(clkdef_div)); for (div_clk = cfg->div_clks; div_clk->name; div_clk++) { clkdef_div.id = div_clk->id; clkdef_div.name = div_clk->name; clkdef_div.parent_names = &div_clk->parent_name; clkdef_div.parent_cnt = 1; prci_div_register(sc, &clkdef_div, div_clk->reg, div_clk->bias); } } if (cfg->gate_clks != NULL) { bzero(&clkdef_gate, sizeof(clkdef_gate)); for (gate_clk = cfg->gate_clks; gate_clk->name; gate_clk++) { clkdef_gate.clkdef.id = gate_clk->id; clkdef_gate.clkdef.name = gate_clk->name; clkdef_gate.clkdef.parent_names = &gate_clk->parent_name; clkdef_gate.clkdef.parent_cnt = 1; clkdef_gate.offset = gate_clk->reg; clkdef_gate.shift = 0; clkdef_gate.mask = 1; clkdef_gate.on_value = 1; clkdef_gate.off_value = 0; error = clknode_gate_register(sc->clkdom, &clkdef_gate); if (error != 0) { device_printf(dev, "Couldn't create gated clock %s: %d\n", gate_clk->name, error); goto fail; } } } /* * Register the fixed clock "tlclk". * * If an older device tree is being used, tlclk may appear as its own * entity in the device tree, under soc/tlclk. If this is the case it * will be registered automatically by the fixed_clk driver, and the * version we register here will be an unreferenced duplicate. */ clknode_fixed_register(sc->clkdom, cfg->tlclk_def); error = clkdom_finit(sc->clkdom); if (error) panic("Couldn't finalise clock domain"); sc->nresets = cfg->nresets; return (0); fail1: free(clkdef.parent_names, M_OFWPROP); fail: bus_release_resources(dev, prci_spec, &sc->res); mtx_destroy(&sc->mtx); return (error); } static int prci_write_4(device_t dev, bus_addr_t addr, uint32_t val) { struct prci_softc *sc; sc = device_get_softc(dev); PRCI_WRITE(sc, addr, val); return (0); } static int prci_read_4(device_t dev, bus_addr_t addr, uint32_t *val) { struct prci_softc *sc; sc = device_get_softc(dev); *val = PRCI_READ(sc, addr); return (0); } static int prci_modify_4(device_t dev, bus_addr_t addr, uint32_t clr, uint32_t set) { struct prci_softc *sc; uint32_t reg; sc = device_get_softc(dev); reg = PRCI_READ(sc, addr); reg &= ~clr; reg |= set; PRCI_WRITE(sc, addr, reg); return (0); } static void prci_device_lock(device_t dev) { struct prci_softc *sc; sc = device_get_softc(dev); PRCI_LOCK(sc); } static void prci_device_unlock(device_t dev) { struct prci_softc *sc; sc = device_get_softc(dev); PRCI_UNLOCK(sc); } static int prci_reset_assert(device_t dev, intptr_t id, bool reset) { struct prci_softc *sc; uint32_t reg; sc = device_get_softc(dev); if (id >= sc->nresets) return (ENXIO); PRCI_LOCK(sc); reg = PRCI_READ(sc, PRCI_DEVICES_RESET_N); if (reset) reg &= ~(1u << id); else reg |= (1u << id); PRCI_WRITE(sc, PRCI_DEVICES_RESET_N, reg); PRCI_UNLOCK(sc); return (0); } static int prci_reset_is_asserted(device_t dev, intptr_t id, bool *reset) { struct prci_softc *sc; uint32_t reg; sc = device_get_softc(dev); if (id >= sc->nresets) return (ENXIO); PRCI_LOCK(sc); reg = PRCI_READ(sc, PRCI_DEVICES_RESET_N); *reset = (reg & (1u << id)) == 0; PRCI_UNLOCK(sc); return (0); } static device_method_t prci_methods[] = { DEVMETHOD(device_probe, prci_probe), DEVMETHOD(device_attach, prci_attach), /* clkdev interface */ DEVMETHOD(clkdev_write_4, prci_write_4), DEVMETHOD(clkdev_read_4, prci_read_4), DEVMETHOD(clkdev_modify_4, prci_modify_4), DEVMETHOD(clkdev_device_lock, prci_device_lock), DEVMETHOD(clkdev_device_unlock, prci_device_unlock), /* Reset interface */ DEVMETHOD(hwreset_assert, prci_reset_assert), DEVMETHOD(hwreset_is_asserted, prci_reset_is_asserted), DEVMETHOD_END }; static driver_t prci_driver = { "sifive_prci", prci_methods, sizeof(struct prci_softc) }; /* * hfclk and rtcclk appear later in the device tree than prci, so we must * attach late. */ EARLY_DRIVER_MODULE(sifive_prci, simplebus, prci_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_LATE); diff --git a/sys/riscv/sifive/sifive_spi.c b/sys/riscv/sifive/sifive_spi.c index e27859954408..df6e50ba21ae 100644 --- a/sys/riscv/sifive/sifive_spi.c +++ b/sys/riscv/sifive/sifive_spi.c @@ -1,400 +1,400 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Axiado Corporation * All rights reserved. * * This software was developed in part by Philip Paeps and Kristof Provost * under contract for Axiado Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include "spibus_if.h" #if 1 #define DBGPRINT(dev, fmt, args...) \ device_printf(dev, "%s: " fmt "\n", __func__, ## args) #else #define DBGPRINT(dev, fmt, args...) #endif static struct resource_spec sfspi_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, RESOURCE_SPEC_END }; struct sfspi_softc { device_t dev; device_t parent; struct mtx mtx; struct resource *res; bus_space_tag_t bst; bus_space_handle_t bsh; void *ih; clk_t clk; uint64_t freq; uint32_t cs_max; }; #define SFSPI_LOCK(sc) mtx_lock(&(sc)->mtx) #define SFSPI_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define SFSPI_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED); #define SFSPI_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED); /* * Register offsets. * From Sifive-Unleashed-FU540-C000-v1.0.pdf page 101. */ #define SFSPI_REG_SCKDIV 0x00 /* Serial clock divisor */ #define SFSPI_REG_SCKMODE 0x04 /* Serial clock mode */ #define SFSPI_REG_CSID 0x10 /* Chip select ID */ #define SFSPI_REG_CSDEF 0x14 /* Chip select default */ #define SFSPI_REG_CSMODE 0x18 /* Chip select mode */ #define SFSPI_REG_DELAY0 0x28 /* Delay control 0 */ #define SFSPI_REG_DELAY1 0x2C /* Delay control 1 */ #define SFSPI_REG_FMT 0x40 /* Frame format */ #define SFSPI_REG_TXDATA 0x48 /* Tx FIFO data */ #define SFSPI_REG_RXDATA 0x4C /* Rx FIFO data */ #define SFSPI_REG_TXMARK 0x50 /* Tx FIFO watermark */ #define SFSPI_REG_RXMARK 0x54 /* Rx FIFO watermark */ #define SFSPI_REG_FCTRL 0x60 /* SPI flash interface control* */ #define SFSPI_REG_FFMT 0x64 /* SPI flash instruction format* */ #define SFSPI_REG_IE 0x70 /* SPI interrupt enable */ #define SFSPI_REG_IP 0x74 /* SPI interrupt pending */ #define SFSPI_SCKDIV_MASK 0xfff #define SFSPI_CSDEF_ALL ((1 << sc->cs_max)-1) #define SFSPI_CSMODE_AUTO 0x0U #define SFSPI_CSMODE_HOLD 0x2U #define SFSPI_CSMODE_OFF 0x3U #define SFSPI_TXDATA_DATA_MASK 0xff #define SFSPI_TXDATA_FULL (1 << 31) #define SFSPI_RXDATA_DATA_MASK 0xff #define SFSPI_RXDATA_EMPTY (1 << 31) #define SFSPI_SCKMODE_PHA (1 << 0) #define SFSPI_SCKMODE_POL (1 << 1) #define SFSPI_FMT_PROTO_SINGLE 0x0U #define SFSPI_FMT_PROTO_DUAL 0x1U #define SFSPI_FMT_PROTO_QUAD 0x2U #define SFSPI_FMT_PROTO_MASK 0x3U #define SFSPI_FMT_ENDIAN (1 << 2) #define SFSPI_FMT_DIR (1 << 3) #define SFSPI_FMT_LEN(x) ((uint32_t)(x) << 16) #define SFSPI_FMT_LEN_MASK (0xfU << 16) #define SFSPI_FIFO_DEPTH 8 #define SFSPI_READ(_sc, _reg) \ bus_space_read_4((_sc)->bst, (_sc)->bsh, (_reg)) #define SFSPI_WRITE(_sc, _reg, _val) \ bus_space_write_4((_sc)->bst, (_sc)->bsh, (_reg), (_val)) static void sfspi_tx(struct sfspi_softc *sc, uint8_t *buf, uint32_t bufsiz) { uint32_t val; uint8_t *p, *end; KASSERT(buf != NULL, ("TX buffer cannot be NULL")); end = buf + bufsiz; for (p = buf; p < end; p++) { do { val = SFSPI_READ(sc, SFSPI_REG_TXDATA); } while (val & SFSPI_TXDATA_FULL); val = *p; SFSPI_WRITE(sc, SFSPI_REG_TXDATA, val); } } static void sfspi_rx(struct sfspi_softc *sc, uint8_t *buf, uint32_t bufsiz) { uint32_t val; uint8_t *p, *end; KASSERT(buf != NULL, ("RX buffer cannot be NULL")); KASSERT(bufsiz <= SFSPI_FIFO_DEPTH, ("Cannot receive more than %d bytes at a time\n", SFSPI_FIFO_DEPTH)); end = buf + bufsiz; for (p = buf; p < end; p++) { do { val = SFSPI_READ(sc, SFSPI_REG_RXDATA); } while (val & SFSPI_RXDATA_EMPTY); *p = val & SFSPI_RXDATA_DATA_MASK; }; } static int sfspi_xfer_buf(struct sfspi_softc *sc, uint8_t *rxbuf, uint8_t *txbuf, uint32_t txlen, uint32_t rxlen) { uint32_t bytes; KASSERT(txlen == rxlen, ("TX and RX lengths must be equal")); KASSERT(rxbuf != NULL, ("RX buffer cannot be NULL")); KASSERT(txbuf != NULL, ("TX buffer cannot be NULL")); while (txlen) { bytes = (txlen > SFSPI_FIFO_DEPTH) ? SFSPI_FIFO_DEPTH : txlen; sfspi_tx(sc, txbuf, bytes); txbuf += bytes; sfspi_rx(sc, rxbuf, bytes); rxbuf += bytes; txlen -= bytes; } return (0); } static int sfspi_setup(struct sfspi_softc *sc, uint32_t cs, uint32_t mode, uint32_t freq) { uint32_t csmode, fmt, sckdiv, sckmode; SFSPI_ASSERT_LOCKED(sc); /* * Fsck = Fin / 2 * (div + 1) * -> div = Fin / (2 * Fsck) - 1 */ sckdiv = (howmany(sc->freq >> 1, freq) - 1) & SFSPI_SCKDIV_MASK; SFSPI_WRITE(sc, SFSPI_REG_SCKDIV, sckdiv); switch (mode) { case SPIBUS_MODE_NONE: sckmode = 0; break; case SPIBUS_MODE_CPHA: sckmode = SFSPI_SCKMODE_PHA; break; case SPIBUS_MODE_CPOL: sckmode = SFSPI_SCKMODE_POL; break; case SPIBUS_MODE_CPOL_CPHA: sckmode = SFSPI_SCKMODE_PHA | SFSPI_SCKMODE_POL; break; default: return (EINVAL); } SFSPI_WRITE(sc, SFSPI_REG_SCKMODE, sckmode); csmode = SFSPI_CSMODE_HOLD; if (cs & SPIBUS_CS_HIGH) csmode = SFSPI_CSMODE_AUTO; SFSPI_WRITE(sc, SFSPI_REG_CSMODE, csmode); SFSPI_WRITE(sc, SFSPI_REG_CSID, cs & ~SPIBUS_CS_HIGH); fmt = SFSPI_FMT_PROTO_SINGLE | SFSPI_FMT_LEN(8); SFSPI_WRITE(sc, SFSPI_REG_FMT, fmt); return (0); } static int sfspi_transfer(device_t dev, device_t child, struct spi_command *cmd) { struct sfspi_softc *sc; uint32_t clock, cs, csdef, mode; int err; KASSERT(cmd->tx_cmd_sz == cmd->rx_cmd_sz, ("TX and RX command sizes must be equal")); KASSERT(cmd->tx_data_sz == cmd->rx_data_sz, ("TX and RX data sizes must be equal")); sc = device_get_softc(dev); spibus_get_cs(child, &cs); spibus_get_clock(child, &clock); spibus_get_mode(child, &mode); if (cs > sc->cs_max) { device_printf(sc->dev, "Invalid chip select %u\n", cs); return (EINVAL); } SFSPI_LOCK(sc); device_busy(sc->dev); err = sfspi_setup(sc, cs, mode, clock); if (err != 0) { SFSPI_UNLOCK(sc); return (err); } err = 0; if (cmd->tx_cmd_sz > 0) err = sfspi_xfer_buf(sc, cmd->rx_cmd, cmd->tx_cmd, cmd->tx_cmd_sz, cmd->rx_cmd_sz); if (cmd->tx_data_sz > 0 && err == 0) err = sfspi_xfer_buf(sc, cmd->rx_data, cmd->tx_data, cmd->tx_data_sz, cmd->rx_data_sz); /* Deassert chip select. */ csdef = SFSPI_CSDEF_ALL & ~(1 << cs); SFSPI_WRITE(sc, SFSPI_REG_CSDEF, csdef); SFSPI_WRITE(sc, SFSPI_REG_CSDEF, SFSPI_CSDEF_ALL); device_unbusy(sc->dev); SFSPI_UNLOCK(sc); return (err); } static int sfspi_attach(device_t dev) { struct sfspi_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->mtx, device_get_nameunit(sc->dev), NULL, MTX_DEF); error = bus_alloc_resources(dev, sfspi_spec, &sc->res); if (error) { device_printf(dev, "Couldn't allocate resources\n"); goto fail; } sc->bst = rman_get_bustag(sc->res); sc->bsh = rman_get_bushandle(sc->res); error = clk_get_by_ofw_index(dev, 0, 0, &sc->clk); if (error) { device_printf(dev, "Couldn't allocate clock: %d\n", error); goto fail; } error = clk_enable(sc->clk); if (error) { device_printf(dev, "Couldn't enable clock: %d\n", error); goto fail; } error = clk_get_freq(sc->clk, &sc->freq); if (error) { device_printf(sc->dev, "Couldn't get frequency: %d\n", error); goto fail; } /* * From Sifive-Unleashed-FU540-C000-v1.0.pdf page 103: * csdef is cs_width bits wide and all ones on reset. */ sc->cs_max = SFSPI_READ(sc, SFSPI_REG_CSDEF); /* * We don't support the direct-mapped flash interface. * Disable it. */ SFSPI_WRITE(sc, SFSPI_REG_FCTRL, 0x0); /* Probe and attach the spibus when interrupts are available. */ sc->parent = device_add_child(dev, "spibus", -1); config_intrhook_oneshot((ich_func_t)bus_generic_attach, dev); return (0); fail: bus_release_resources(dev, sfspi_spec, &sc->res); mtx_destroy(&sc->mtx); return (error); } static int sfspi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "sifive,spi0")) return (ENXIO); device_set_desc(dev, "SiFive SPI controller"); return (BUS_PROBE_DEFAULT); } static phandle_t sfspi_get_node(device_t bus, device_t dev) { return (ofw_bus_get_node(bus)); } static device_method_t sfspi_methods[] = { DEVMETHOD(device_probe, sfspi_probe), DEVMETHOD(device_attach, sfspi_attach), DEVMETHOD(spibus_transfer, sfspi_transfer), DEVMETHOD(ofw_bus_get_node, sfspi_get_node), DEVMETHOD_END }; static driver_t sfspi_driver = { "sifive_spi", sfspi_methods, sizeof(struct sfspi_softc) }; DRIVER_MODULE(sifive_spi, simplebus, sfspi_driver, 0, 0); DRIVER_MODULE(ofw_spibus, sifive_spi, ofw_spibus_driver, 0, 0); MODULE_DEPEND(sifive_spi, ofw_spibus, 1, 1, 1); diff --git a/sys/riscv/sifive/sifive_uart.c b/sys/riscv/sifive/sifive_uart.c index 594773cd3263..0baa32f183ec 100644 --- a/sys/riscv/sifive/sifive_uart.c +++ b/sys/riscv/sifive/sifive_uart.c @@ -1,546 +1,546 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Axiado Corporation * All rights reserved. * * This software was developed in part by Kristof Provost under contract for * Axiado Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include "uart_if.h" #define SFUART_TXDATA 0x00 #define SFUART_TXDATA_FULL (1 << 31) #define SFUART_RXDATA 0x04 #define SFUART_RXDATA_EMPTY (1 << 31) #define SFUART_TXCTRL 0x08 #define SFUART_TXCTRL_ENABLE 0x01 #define SFUART_TXCTRL_NSTOP 0x02 #define SFUART_TXCTRL_TXCNT 0x70000 #define SFUART_TXCTRL_TXCNT_SHIFT 16 #define SFUART_RXCTRL 0x0c #define SFUART_RXCTRL_ENABLE 0x01 #define SFUART_RXCTRL_RXCNT 0x70000 #define SFUART_RXCTRL_RXCNT_SHIFT 16 #define SFUART_IRQ_ENABLE 0x10 #define SFUART_IRQ_ENABLE_TXWM 0x01 #define SFUART_IRQ_ENABLE_RXWM 0x02 #define SFUART_IRQ_PENDING 0x14 #define SFUART_IRQ_PENDING_TXWM 0x01 #define SFUART_IRQ_PENDING_RXQM 0x02 #define SFUART_DIV 0x18 #define SFUART_REGS_SIZE 0x1c #define SFUART_RX_FIFO_DEPTH 8 #define SFUART_TX_FIFO_DEPTH 8 struct sfuart_softc { struct uart_softc uart_softc; clk_t clk; }; static int sfuart_probe(struct uart_bas *bas) { bas->regiowidth = 4; return (0); } static void sfuart_init(struct uart_bas *bas, int baudrate, int databits, int stopbits, int parity) { uint32_t reg; uart_setreg(bas, SFUART_IRQ_ENABLE, 0); /* Enable RX and configure the watermark so that we get an interrupt * when a single character arrives (if interrupts are enabled). */ reg = SFUART_RXCTRL_ENABLE; reg |= (0 << SFUART_RXCTRL_RXCNT_SHIFT); uart_setreg(bas, SFUART_RXCTRL, reg); /* Enable TX and configure the watermark so that we get an interrupt * when there's room for one more character in the TX fifo (if * interrupts are enabled). */ reg = SFUART_TXCTRL_ENABLE; reg |= (1 << SFUART_TXCTRL_TXCNT_SHIFT); if (stopbits == 2) reg |= SFUART_TXCTRL_NSTOP; uart_setreg(bas, SFUART_TXCTRL, reg); /* Don't touch DIV. Assume that's set correctly until we can * reconfigure. */ } static void sfuart_putc(struct uart_bas *bas, int c) { while ((uart_getreg(bas, SFUART_TXDATA) & SFUART_TXDATA_FULL) != 0) cpu_spinwait(); uart_setreg(bas, SFUART_TXDATA, c); } static int sfuart_rxready(struct uart_bas *bas) { /* * Unfortunately the FIFO empty flag is in the FIFO data register so * reading it would dequeue the character. Instead, rely on the fact * we've configured the watermark to be 0 and that interrupts are off * when using the low-level console function, and read the interrupt * pending state instead. */ return ((uart_getreg(bas, SFUART_IRQ_PENDING) & SFUART_IRQ_PENDING_RXQM) != 0); } static int sfuart_getc(struct uart_bas *bas, struct mtx *hwmtx) { int c; uart_lock(hwmtx); while (((c = uart_getreg(bas, SFUART_RXDATA)) & SFUART_RXDATA_EMPTY) != 0) { uart_unlock(hwmtx); DELAY(4); uart_lock(hwmtx); } uart_unlock(hwmtx); return (c & 0xff); } static int sfuart_bus_probe(struct uart_softc *sc) { int error; error = sfuart_probe(&sc->sc_bas); if (error) return (error); sc->sc_rxfifosz = SFUART_RX_FIFO_DEPTH; sc->sc_txfifosz = SFUART_TX_FIFO_DEPTH; sc->sc_hwiflow = 0; sc->sc_hwoflow = 0; device_set_desc(sc->sc_dev, "SiFive UART"); return (0); } static int sfuart_bus_attach(struct uart_softc *sc) { struct uart_bas *bas; struct sfuart_softc *sfsc; uint64_t freq; uint32_t reg; int error; sfsc = (struct sfuart_softc *)sc; bas = &sc->sc_bas; error = clk_get_by_ofw_index(sc->sc_dev, 0, 0, &sfsc->clk); if (error) { device_printf(sc->sc_dev, "couldn't allocate clock\n"); return (ENXIO); } error = clk_enable(sfsc->clk); if (error) { device_printf(sc->sc_dev, "couldn't enable clock\n"); return (ENXIO); } error = clk_get_freq(sfsc->clk, &freq); if (error || freq == 0) { clk_disable(sfsc->clk); device_printf(sc->sc_dev, "couldn't get clock frequency\n"); return (ENXIO); } bas->rclk = freq; /* Enable RX/RX */ reg = SFUART_RXCTRL_ENABLE; reg |= (0 << SFUART_RXCTRL_RXCNT_SHIFT); uart_setreg(bas, SFUART_RXCTRL, reg); reg = SFUART_TXCTRL_ENABLE; reg |= (1 << SFUART_TXCTRL_TXCNT_SHIFT); uart_setreg(bas, SFUART_TXCTRL, reg); /* Enable RX interrupt */ uart_setreg(bas, SFUART_IRQ_ENABLE, SFUART_IRQ_ENABLE_RXWM); return (0); } static int sfuart_bus_detach(struct uart_softc *sc) { struct sfuart_softc *sfsc; struct uart_bas *bas; sfsc = (struct sfuart_softc *)sc; bas = &sc->sc_bas; /* Disable RX/TX */ uart_setreg(bas, SFUART_RXCTRL, 0); uart_setreg(bas, SFUART_TXCTRL, 0); /* Disable interrupts */ uart_setreg(bas, SFUART_IRQ_ENABLE, 0); clk_disable(sfsc->clk); return (0); } static int sfuart_bus_flush(struct uart_softc *sc, int what) { struct uart_bas *bas; uint32_t reg; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); if (what & UART_FLUSH_TRANSMITTER) { do { reg = uart_getreg(bas, SFUART_TXDATA); } while ((reg & SFUART_TXDATA_FULL) != 0); } if (what & UART_FLUSH_RECEIVER) { do { reg = uart_getreg(bas, SFUART_RXDATA); } while ((reg & SFUART_RXDATA_EMPTY) == 0); } uart_unlock(sc->sc_hwmtx); return (0); } #define SIGCHG(c, i, s, d) \ do { \ if (c) \ i |= ((i) & (s)) ? (s) : (s) | (d); \ else \ i = ((i) & (s)) ? ((i) & ~(s)) | (d) : (i); \ } while (0) static int sfuart_bus_getsig(struct uart_softc *sc) { uint32_t new, old, sig; do { old = sc->sc_hwsig; sig = old; SIGCHG(1, sig, SER_DSR, SER_DDSR); SIGCHG(1, sig, SER_DCD, SER_DDCD); SIGCHG(1, sig, SER_CTS, SER_DCTS); new = sig & ~SER_MASK_DELTA; } while (!atomic_cmpset_32(&sc->sc_hwsig, old, new)); return (sig); } static int sfuart_bus_setsig(struct uart_softc *sc, int sig) { uint32_t new, old; do { old = sc->sc_hwsig; new = old; if (sig & SER_DDTR) { SIGCHG(sig & SER_DTR, new, SER_DTR, SER_DDTR); } if (sig & SER_DRTS) { SIGCHG(sig & SER_RTS, new, SER_RTS, SER_DRTS); } } while (!atomic_cmpset_32(&sc->sc_hwsig, old, new)); return (0); } static int sfuart_bus_ioctl(struct uart_softc *sc, int request, intptr_t data) { struct uart_bas *bas; uint32_t reg; int error; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); switch (request) { case UART_IOCTL_BAUD: reg = uart_getreg(bas, SFUART_DIV); if (reg == 0) { /* Possible if the divisor hasn't been set up yet. */ error = ENXIO; break; } *(int*)data = bas->rclk / (reg + 1); error = 0; break; default: error = EINVAL; break; } uart_unlock(sc->sc_hwmtx); return (error); } static int sfuart_bus_ipend(struct uart_softc *sc) { struct uart_bas *bas; int ipend; uint32_t reg, ie; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); ipend = 0; reg = uart_getreg(bas, SFUART_IRQ_PENDING); ie = uart_getreg(bas, SFUART_IRQ_ENABLE); if ((reg & SFUART_IRQ_PENDING_TXWM) != 0 && (ie & SFUART_IRQ_ENABLE_TXWM) != 0) { ipend |= SER_INT_TXIDLE; /* Disable TX interrupt */ ie &= ~(SFUART_IRQ_ENABLE_TXWM); uart_setreg(bas, SFUART_IRQ_ENABLE, ie); } if ((reg & SFUART_IRQ_PENDING_RXQM) != 0) ipend |= SER_INT_RXREADY; uart_unlock(sc->sc_hwmtx); return (ipend); } static int sfuart_bus_param(struct uart_softc *sc, int baudrate, int databits, int stopbits, int parity) { struct uart_bas *bas; uint32_t reg; bas = &sc->sc_bas; if (databits != 8) return (EINVAL); if (parity != UART_PARITY_NONE) return (EINVAL); uart_lock(sc->sc_hwmtx); reg = uart_getreg(bas, SFUART_TXCTRL); if (stopbits == 2) { reg |= SFUART_TXCTRL_NSTOP; } else if (stopbits == 1) { reg &= ~SFUART_TXCTRL_NSTOP; } else { uart_unlock(sc->sc_hwmtx); return (EINVAL); } if (baudrate > 0 && bas->rclk != 0) { reg = (bas->rclk / baudrate) - 1; uart_setreg(bas, SFUART_DIV, reg); } uart_unlock(sc->sc_hwmtx); return (0); } static int sfuart_bus_receive(struct uart_softc *sc) { struct uart_bas *bas; uint32_t reg; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); reg = uart_getreg(bas, SFUART_RXDATA); while ((reg & SFUART_RXDATA_EMPTY) == 0) { if (uart_rx_full(sc)) { sc->sc_rxbuf[sc->sc_rxput] = UART_STAT_OVERRUN; break; } uart_rx_put(sc, reg & 0xff); reg = uart_getreg(bas, SFUART_RXDATA); } uart_unlock(sc->sc_hwmtx); return (0); } static int sfuart_bus_transmit(struct uart_softc *sc) { struct uart_bas *bas; int i; uint32_t reg; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); reg = uart_getreg(bas, SFUART_IRQ_ENABLE); reg |= SFUART_IRQ_ENABLE_TXWM; uart_setreg(bas, SFUART_IRQ_ENABLE, reg); for (i = 0; i < sc->sc_txdatasz; i++) sfuart_putc(bas, sc->sc_txbuf[i]); sc->sc_txbusy = 1; uart_unlock(sc->sc_hwmtx); return (0); } static void sfuart_bus_grab(struct uart_softc *sc) { struct uart_bas *bas; uint32_t reg; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); reg = uart_getreg(bas, SFUART_IRQ_ENABLE); reg &= ~(SFUART_IRQ_ENABLE_TXWM | SFUART_IRQ_PENDING_RXQM); uart_setreg(bas, SFUART_IRQ_ENABLE, reg); uart_unlock(sc->sc_hwmtx); } static void sfuart_bus_ungrab(struct uart_softc *sc) { struct uart_bas *bas; uint32_t reg; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); reg = uart_getreg(bas, SFUART_IRQ_ENABLE); reg |= SFUART_IRQ_ENABLE_TXWM | SFUART_IRQ_PENDING_RXQM; uart_setreg(bas, SFUART_IRQ_ENABLE, reg); uart_unlock(sc->sc_hwmtx); } static kobj_method_t sfuart_methods[] = { KOBJMETHOD(uart_probe, sfuart_bus_probe), KOBJMETHOD(uart_attach, sfuart_bus_attach), KOBJMETHOD(uart_detach, sfuart_bus_detach), KOBJMETHOD(uart_flush, sfuart_bus_flush), KOBJMETHOD(uart_getsig, sfuart_bus_getsig), KOBJMETHOD(uart_setsig, sfuart_bus_setsig), KOBJMETHOD(uart_ioctl, sfuart_bus_ioctl), KOBJMETHOD(uart_ipend, sfuart_bus_ipend), KOBJMETHOD(uart_param, sfuart_bus_param), KOBJMETHOD(uart_receive, sfuart_bus_receive), KOBJMETHOD(uart_transmit, sfuart_bus_transmit), KOBJMETHOD(uart_grab, sfuart_bus_grab), KOBJMETHOD(uart_ungrab, sfuart_bus_ungrab), KOBJMETHOD_END }; static struct uart_ops sfuart_ops = { .probe = sfuart_probe, .init = sfuart_init, .term = NULL, .putc = sfuart_putc, .rxready = sfuart_rxready, .getc = sfuart_getc, }; struct uart_class sfuart_class = { "sifiveuart", sfuart_methods, sizeof(struct sfuart_softc), .uc_ops = &sfuart_ops, .uc_range = SFUART_REGS_SIZE, .uc_rclk = 0, .uc_rshift = 0 }; static struct ofw_compat_data compat_data[] = { { "sifive,uart0", (uintptr_t)&sfuart_class }, { NULL, (uintptr_t)NULL } }; UART_FDT_CLASS_AND_DEVICE(compat_data);