Index: head/sys/arm/amlogic/aml8726/aml8726_timer.c =================================================================== --- head/sys/arm/amlogic/aml8726/aml8726_timer.c (revision 327431) +++ head/sys/arm/amlogic/aml8726/aml8726_timer.c (revision 327432) @@ -1,394 +1,396 @@ /*- * Copyright 2013-2015 John Wehle * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Amlogic aml8726 timer driver. * * 16 bit Timer A is used for the event timer / hard clock. * 32 bit Timer E is used for the timecounter / DELAY. * * The current implementation doesn't use Timers B-D. Another approach is * to split the timers between the cores implementing per cpu event timers. * * The timers all share the MUX register which requires a mutex to serialize * access. The mutex is also used to avoid potential problems between the * interrupt handler and timer_start / timer_stop. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include struct aml8726_timer_softc { device_t dev; struct resource * res[2]; struct mtx mtx; void * ih_cookie; struct eventtimer et; uint32_t first_ticks; uint32_t period_ticks; struct timecounter tc; }; static struct resource_spec aml8726_timer_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, /* INT_TIMER_A */ { -1, 0 } }; /* * devclass_get_device / device_get_softc could be used * to dynamically locate this, however the timers are a * required device which can't be unloaded so there's * no need for the overhead. */ static struct aml8726_timer_softc *aml8726_timer_sc = NULL; #define AML_TIMER_LOCK(sc) mtx_lock_spin(&(sc)->mtx) #define AML_TIMER_UNLOCK(sc) mtx_unlock_spin(&(sc)->mtx) #define AML_TIMER_LOCK_INIT(sc) \ mtx_init(&(sc)->mtx, device_get_nameunit((sc)->dev), \ "timer", MTX_SPIN) #define AML_TIMER_LOCK_DESTROY(sc) mtx_destroy(&(sc)->mtx); #define AML_TIMER_MUX_REG 0 #define AML_TIMER_INPUT_1us 0 #define AML_TIMER_INPUT_10us 1 #define AML_TIMER_INPUT_100us 2 #define AML_TIMER_INPUT_1ms 3 #define AML_TIMER_INPUT_MASK 3 #define AML_TIMER_A_INPUT_MASK 3 #define AML_TIMER_A_INPUT_SHIFT 0 #define AML_TIMER_B_INPUT_MASK (3 << 2) #define AML_TIMER_B_INPUT_SHIFT 2 #define AML_TIMER_C_INPUT_MASK (3 << 4) #define AML_TIMER_C_INPUT_SHIFT 4 #define AML_TIMER_D_INPUT_MASK (3 << 6) #define AML_TIMER_D_INPUT_SHIFT 6 #define AML_TIMER_E_INPUT_SYS 0 #define AML_TIMER_E_INPUT_1us 1 #define AML_TIMER_E_INPUT_10us 2 #define AML_TIMER_E_INPUT_100us 3 #define AML_TIMER_E_INPUT_1ms 4 #define AML_TIMER_E_INPUT_MASK (7 << 8) #define AML_TIMER_E_INPUT_SHIFT 8 #define AML_TIMER_A_PERIODIC (1 << 12) #define AML_TIMER_B_PERIODIC (1 << 13) #define AML_TIMER_C_PERIODIC (1 << 14) #define AML_TIMER_D_PERIODIC (1 << 15) #define AML_TIMER_A_EN (1 << 16) #define AML_TIMER_B_EN (1 << 17) #define AML_TIMER_C_EN (1 << 18) #define AML_TIMER_D_EN (1 << 19) #define AML_TIMER_E_EN (1 << 20) #define AML_TIMER_A_REG 4 #define AML_TIMER_B_REG 8 #define AML_TIMER_C_REG 12 #define AML_TIMER_D_REG 16 #define AML_TIMER_E_REG 20 #define CSR_WRITE_4(sc, reg, val) bus_write_4((sc)->res[0], reg, (val)) #define CSR_READ_4(sc, reg) bus_read_4((sc)->res[0], reg) static unsigned aml8726_get_timecount(struct timecounter *tc) { struct aml8726_timer_softc *sc = (struct aml8726_timer_softc *)tc->tc_priv; return CSR_READ_4(sc, AML_TIMER_E_REG); } static int aml8726_hardclock(void *arg) { struct aml8726_timer_softc *sc = (struct aml8726_timer_softc *)arg; AML_TIMER_LOCK(sc); if (sc->first_ticks != 0 && sc->period_ticks != 0) { sc->first_ticks = 0; CSR_WRITE_4(sc, AML_TIMER_A_REG, sc->period_ticks); CSR_WRITE_4(sc, AML_TIMER_MUX_REG, (CSR_READ_4(sc, AML_TIMER_MUX_REG) | AML_TIMER_A_PERIODIC | AML_TIMER_A_EN)); } AML_TIMER_UNLOCK(sc); if (sc->et.et_active) sc->et.et_event_cb(&sc->et, sc->et.et_arg); return (FILTER_HANDLED); } static int aml8726_timer_start(struct eventtimer *et, sbintime_t first, sbintime_t period) { struct aml8726_timer_softc *sc = (struct aml8726_timer_softc *)et->et_priv; uint32_t first_ticks; uint32_t period_ticks; uint32_t periodic; uint32_t ticks; first_ticks = (first * et->et_frequency) / SBT_1S; period_ticks = (period * et->et_frequency) / SBT_1S; if (first_ticks != 0) { ticks = first_ticks; periodic = 0; } else { ticks = period_ticks; periodic = AML_TIMER_A_PERIODIC; } if (ticks == 0) return (EINVAL); AML_TIMER_LOCK(sc); sc->first_ticks = first_ticks; sc->period_ticks = period_ticks; CSR_WRITE_4(sc, AML_TIMER_A_REG, ticks); CSR_WRITE_4(sc, AML_TIMER_MUX_REG, ((CSR_READ_4(sc, AML_TIMER_MUX_REG) & ~AML_TIMER_A_PERIODIC) | AML_TIMER_A_EN | periodic)); AML_TIMER_UNLOCK(sc); return (0); } static int aml8726_timer_stop(struct eventtimer *et) { struct aml8726_timer_softc *sc = (struct aml8726_timer_softc *)et->et_priv; AML_TIMER_LOCK(sc); CSR_WRITE_4(sc, AML_TIMER_MUX_REG, (CSR_READ_4(sc, AML_TIMER_MUX_REG) & ~AML_TIMER_A_EN)); AML_TIMER_UNLOCK(sc); return (0); } static int aml8726_timer_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "amlogic,meson6-timer")) return (ENXIO); device_set_desc(dev, "Amlogic aml8726 timer"); return (BUS_PROBE_DEFAULT); } static int aml8726_timer_attach(device_t dev) { struct aml8726_timer_softc *sc = device_get_softc(dev); /* There should be exactly one instance. */ if (aml8726_timer_sc != NULL) return (ENXIO); sc->dev = dev; if (bus_alloc_resources(dev, aml8726_timer_spec, sc->res)) { device_printf(dev, "can not allocate resources for device\n"); return (ENXIO); } /* * Disable the timers, select the input for each timer, * clear timer E, and then enable timer E. */ CSR_WRITE_4(sc, AML_TIMER_MUX_REG, ((CSR_READ_4(sc, AML_TIMER_MUX_REG) & ~(AML_TIMER_A_EN | AML_TIMER_A_INPUT_MASK | AML_TIMER_E_EN | AML_TIMER_E_INPUT_MASK)) | (AML_TIMER_INPUT_1us << AML_TIMER_A_INPUT_SHIFT) | (AML_TIMER_E_INPUT_1us << AML_TIMER_E_INPUT_SHIFT))); CSR_WRITE_4(sc, AML_TIMER_E_REG, 0); CSR_WRITE_4(sc, AML_TIMER_MUX_REG, (CSR_READ_4(sc, AML_TIMER_MUX_REG) | AML_TIMER_E_EN)); /* * Initialize the mutex prior to installing the interrupt handler * in case of a spurious interrupt. */ AML_TIMER_LOCK_INIT(sc); if (bus_setup_intr(dev, sc->res[1], INTR_TYPE_CLK, aml8726_hardclock, NULL, sc, &sc->ih_cookie)) { device_printf(dev, "could not setup interrupt handler\n"); bus_release_resources(dev, aml8726_timer_spec, sc->res); AML_TIMER_LOCK_DESTROY(sc); return (ENXIO); } aml8726_timer_sc = sc; sc->et.et_name = "aml8726 timer A"; sc->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT; sc->et.et_frequency = 1000000; sc->et.et_quality = 1000; sc->et.et_min_period = (0x00000002LLU * SBT_1S) / sc->et.et_frequency; sc->et.et_max_period = (0x0000fffeLLU * SBT_1S) / sc->et.et_frequency; sc->et.et_start = aml8726_timer_start; sc->et.et_stop = aml8726_timer_stop; sc->et.et_priv = sc; et_register(&sc->et); sc->tc.tc_get_timecount = aml8726_get_timecount; sc->tc.tc_name = "aml8726 timer E"; sc->tc.tc_frequency = 1000000; sc->tc.tc_counter_mask = ~0u; sc->tc.tc_quality = 1000; sc->tc.tc_priv = sc; tc_init(&sc->tc); return (0); } static int aml8726_timer_detach(device_t dev) { return (EBUSY); } static device_method_t aml8726_timer_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aml8726_timer_probe), DEVMETHOD(device_attach, aml8726_timer_attach), DEVMETHOD(device_detach, aml8726_timer_detach), DEVMETHOD_END }; static driver_t aml8726_timer_driver = { "timer", aml8726_timer_methods, sizeof(struct aml8726_timer_softc), }; static devclass_t aml8726_timer_devclass; EARLY_DRIVER_MODULE(timer, simplebus, aml8726_timer_driver, aml8726_timer_devclass, 0, 0, BUS_PASS_TIMER); void DELAY(int usec) { uint32_t counter; uint32_t delta, now, previous, remaining; /* Timer has not yet been initialized */ if (aml8726_timer_sc == NULL) { for (; usec > 0; usec--) for (counter = 200; counter > 0; counter--) { /* Prevent gcc from optimizing out the loop */ cpufunc_nullop(); } return; } + TSENTER(); /* * Some of the other timers in the source tree do this calculation as: * * usec * ((sc->tc.tc_frequency / 1000000) + 1) * * which gives a fairly pessimistic result when tc_frequency is an exact * multiple of 1000000. Given the data type and typical values for * tc_frequency adding 999999 shouldn't overflow. */ remaining = usec * ((aml8726_timer_sc->tc.tc_frequency + 999999) / 1000000); /* * We add one since the first iteration may catch the counter just * as it is changing. */ remaining += 1; previous = aml8726_get_timecount(&aml8726_timer_sc->tc); for ( ; ; ) { now = aml8726_get_timecount(&aml8726_timer_sc->tc); /* * If the timer has rolled over, then we have the case: * * if (previous > now) { * delta = (0 - previous) + now * } * * which is really no different then the normal case. * Both cases are simply: * * delta = now - previous. */ delta = now - previous; if (delta >= remaining) break; previous = now; remaining -= delta; } + TSEXIT(); } Index: head/sys/arm/arm/generic_timer.c =================================================================== --- head/sys/arm/arm/generic_timer.c (revision 327431) +++ head/sys/arm/arm/generic_timer.c (revision 327432) @@ -1,555 +1,557 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2011 The FreeBSD Foundation * Copyright (c) 2013 Ruslan Bukin * All rights reserved. * * Based on mpcore_timer.c developed by Ben Gray * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * Cortex-A7, Cortex-A15, ARMv8 and later Generic Timer */ #include "opt_acpi.h" #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__arm__) #include /* For arm_set_delay */ #endif #ifdef FDT #include #include #include #endif #ifdef DEV_ACPI #include #include #endif #define GT_CTRL_ENABLE (1 << 0) #define GT_CTRL_INT_MASK (1 << 1) #define GT_CTRL_INT_STAT (1 << 2) #define GT_REG_CTRL 0 #define GT_REG_TVAL 1 #define GT_CNTKCTL_PL0PTEN (1 << 9) /* PL0 Physical timer reg access */ #define GT_CNTKCTL_PL0VTEN (1 << 8) /* PL0 Virtual timer reg access */ #define GT_CNTKCTL_EVNTI (0xf << 4) /* Virtual counter event bits */ #define GT_CNTKCTL_EVNTDIR (1 << 3) /* Virtual counter event transition */ #define GT_CNTKCTL_EVNTEN (1 << 2) /* Enables virtual counter events */ #define GT_CNTKCTL_PL0VCTEN (1 << 1) /* PL0 CNTVCT and CNTFRQ access */ #define GT_CNTKCTL_PL0PCTEN (1 << 0) /* PL0 CNTPCT and CNTFRQ access */ struct arm_tmr_softc { struct resource *res[4]; void *ihl[4]; uint32_t clkfreq; struct eventtimer et; bool physical; }; static struct arm_tmr_softc *arm_tmr_sc = NULL; static struct resource_spec timer_spec[] = { { SYS_RES_IRQ, 0, RF_ACTIVE }, /* Secure */ { SYS_RES_IRQ, 1, RF_ACTIVE }, /* Non-secure */ { SYS_RES_IRQ, 2, RF_ACTIVE | RF_OPTIONAL }, /* Virt */ { SYS_RES_IRQ, 3, RF_ACTIVE | RF_OPTIONAL }, /* Hyp */ { -1, 0 } }; static uint32_t arm_tmr_fill_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc); static void arm_tmr_do_delay(int usec, void *); static timecounter_get_t arm_tmr_get_timecount; static struct timecounter arm_tmr_timecount = { .tc_name = "ARM MPCore Timecounter", .tc_get_timecount = arm_tmr_get_timecount, .tc_poll_pps = NULL, .tc_counter_mask = ~0u, .tc_frequency = 0, .tc_quality = 1000, .tc_fill_vdso_timehands = arm_tmr_fill_vdso_timehands, }; #ifdef __arm__ #define get_el0(x) cp15_## x ##_get() #define get_el1(x) cp15_## x ##_get() #define set_el0(x, val) cp15_## x ##_set(val) #define set_el1(x, val) cp15_## x ##_set(val) #else /* __aarch64__ */ #define get_el0(x) READ_SPECIALREG(x ##_el0) #define get_el1(x) READ_SPECIALREG(x ##_el1) #define set_el0(x, val) WRITE_SPECIALREG(x ##_el0, val) #define set_el1(x, val) WRITE_SPECIALREG(x ##_el1, val) #endif static int get_freq(void) { return (get_el0(cntfrq)); } static uint64_t get_cntxct(bool physical) { uint64_t val; isb(); if (physical) val = get_el0(cntpct); else val = get_el0(cntvct); return (val); } static int set_ctrl(uint32_t val, bool physical) { if (physical) set_el0(cntp_ctl, val); else set_el0(cntv_ctl, val); isb(); return (0); } static int set_tval(uint32_t val, bool physical) { if (physical) set_el0(cntp_tval, val); else set_el0(cntv_tval, val); isb(); return (0); } static int get_ctrl(bool physical) { uint32_t val; if (physical) val = get_el0(cntp_ctl); else val = get_el0(cntv_ctl); return (val); } static void setup_user_access(void *arg __unused) { uint32_t cntkctl; cntkctl = get_el1(cntkctl); cntkctl &= ~(GT_CNTKCTL_PL0PTEN | GT_CNTKCTL_PL0VTEN | GT_CNTKCTL_EVNTEN); if (arm_tmr_sc->physical) { cntkctl |= GT_CNTKCTL_PL0PCTEN; cntkctl &= ~GT_CNTKCTL_PL0VCTEN; } else { cntkctl |= GT_CNTKCTL_PL0VCTEN; cntkctl &= ~GT_CNTKCTL_PL0PCTEN; } set_el1(cntkctl, cntkctl); isb(); } static void tmr_setup_user_access(void *arg __unused) { if (arm_tmr_sc != NULL) smp_rendezvous(NULL, setup_user_access, NULL, NULL); } SYSINIT(tmr_ua, SI_SUB_SMP, SI_ORDER_SECOND, tmr_setup_user_access, NULL); static unsigned arm_tmr_get_timecount(struct timecounter *tc) { return (get_cntxct(arm_tmr_sc->physical)); } static int arm_tmr_start(struct eventtimer *et, sbintime_t first, sbintime_t period __unused) { struct arm_tmr_softc *sc; int counts, ctrl; sc = (struct arm_tmr_softc *)et->et_priv; if (first != 0) { counts = ((uint32_t)et->et_frequency * first) >> 32; ctrl = get_ctrl(sc->physical); ctrl &= ~GT_CTRL_INT_MASK; ctrl |= GT_CTRL_ENABLE; set_tval(counts, sc->physical); set_ctrl(ctrl, sc->physical); return (0); } return (EINVAL); } static void arm_tmr_disable(bool physical) { int ctrl; ctrl = get_ctrl(physical); ctrl &= ~GT_CTRL_ENABLE; set_ctrl(ctrl, physical); } static int arm_tmr_stop(struct eventtimer *et) { struct arm_tmr_softc *sc; sc = (struct arm_tmr_softc *)et->et_priv; arm_tmr_disable(sc->physical); return (0); } static int arm_tmr_intr(void *arg) { struct arm_tmr_softc *sc; int ctrl; sc = (struct arm_tmr_softc *)arg; ctrl = get_ctrl(sc->physical); if (ctrl & GT_CTRL_INT_STAT) { ctrl |= GT_CTRL_INT_MASK; set_ctrl(ctrl, sc->physical); } if (sc->et.et_active) sc->et.et_event_cb(&sc->et, sc->et.et_arg); return (FILTER_HANDLED); } #ifdef FDT static int arm_tmr_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "arm,armv7-timer")) { device_set_desc(dev, "ARMv7 Generic Timer"); return (BUS_PROBE_DEFAULT); } else if (ofw_bus_is_compatible(dev, "arm,armv8-timer")) { device_set_desc(dev, "ARMv8 Generic Timer"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } #endif #ifdef DEV_ACPI static void arm_tmr_acpi_identify(driver_t *driver, device_t parent) { ACPI_TABLE_GTDT *gtdt; vm_paddr_t physaddr; device_t dev; physaddr = acpi_find_table(ACPI_SIG_GTDT); if (physaddr == 0) return; gtdt = acpi_map_table(physaddr, ACPI_SIG_GTDT); if (gtdt == NULL) { device_printf(parent, "gic: Unable to map the GTDT\n"); return; } dev = BUS_ADD_CHILD(parent, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE, "generic_timer", -1); if (dev == NULL) { device_printf(parent, "add gic child failed\n"); goto out; } BUS_SET_RESOURCE(parent, dev, SYS_RES_IRQ, 0, gtdt->SecureEl1Interrupt, 1); BUS_SET_RESOURCE(parent, dev, SYS_RES_IRQ, 1, gtdt->NonSecureEl1Interrupt, 1); BUS_SET_RESOURCE(parent, dev, SYS_RES_IRQ, 2, gtdt->VirtualTimerInterrupt, 1); out: acpi_unmap_table(gtdt); } static int arm_tmr_acpi_probe(device_t dev) { device_set_desc(dev, "ARM Generic Timer"); return (BUS_PROBE_NOWILDCARD); } #endif static int arm_tmr_attach(device_t dev) { struct arm_tmr_softc *sc; #ifdef FDT phandle_t node; pcell_t clock; #endif int error; int i; sc = device_get_softc(dev); if (arm_tmr_sc) return (ENXIO); #ifdef FDT /* Get the base clock frequency */ node = ofw_bus_get_node(dev); if (node > 0) { error = OF_getencprop(node, "clock-frequency", &clock, sizeof(clock)); if (error > 0) sc->clkfreq = clock; } #endif if (sc->clkfreq == 0) { /* Try to get clock frequency from timer */ sc->clkfreq = get_freq(); } if (sc->clkfreq == 0) { device_printf(dev, "No clock frequency specified\n"); return (ENXIO); } if (bus_alloc_resources(dev, timer_spec, sc->res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } #ifdef __arm__ sc->physical = true; #else /* __aarch64__ */ /* If we do not have a virtual timer use the physical. */ sc->physical = (sc->res[2] == NULL) ? true : false; #endif arm_tmr_sc = sc; /* Setup secure, non-secure and virtual IRQs handler */ for (i = 0; i < 3; i++) { /* If we do not have the interrupt, skip it. */ if (sc->res[i] == NULL) continue; error = bus_setup_intr(dev, sc->res[i], INTR_TYPE_CLK, arm_tmr_intr, NULL, sc, &sc->ihl[i]); if (error) { device_printf(dev, "Unable to alloc int resource.\n"); return (ENXIO); } } /* Disable the virtual timer until we are ready */ if (sc->res[2] != NULL) arm_tmr_disable(false); /* And the physical */ if (sc->physical) arm_tmr_disable(true); arm_tmr_timecount.tc_frequency = sc->clkfreq; tc_init(&arm_tmr_timecount); sc->et.et_name = "ARM MPCore Eventtimer"; sc->et.et_flags = ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU; sc->et.et_quality = 1000; sc->et.et_frequency = sc->clkfreq; sc->et.et_min_period = (0x00000010LLU << 32) / sc->et.et_frequency; sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency; sc->et.et_start = arm_tmr_start; sc->et.et_stop = arm_tmr_stop; sc->et.et_priv = sc; et_register(&sc->et); #if defined(__arm__) arm_set_delay(arm_tmr_do_delay, sc); #endif return (0); } #ifdef FDT static device_method_t arm_tmr_fdt_methods[] = { DEVMETHOD(device_probe, arm_tmr_fdt_probe), DEVMETHOD(device_attach, arm_tmr_attach), { 0, 0 } }; static driver_t arm_tmr_fdt_driver = { "generic_timer", arm_tmr_fdt_methods, sizeof(struct arm_tmr_softc), }; static devclass_t arm_tmr_fdt_devclass; EARLY_DRIVER_MODULE(timer, simplebus, arm_tmr_fdt_driver, arm_tmr_fdt_devclass, 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE); EARLY_DRIVER_MODULE(timer, ofwbus, arm_tmr_fdt_driver, arm_tmr_fdt_devclass, 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE); #endif #ifdef DEV_ACPI static device_method_t arm_tmr_acpi_methods[] = { DEVMETHOD(device_identify, arm_tmr_acpi_identify), DEVMETHOD(device_probe, arm_tmr_acpi_probe), DEVMETHOD(device_attach, arm_tmr_attach), { 0, 0 } }; static driver_t arm_tmr_acpi_driver = { "generic_timer", arm_tmr_acpi_methods, sizeof(struct arm_tmr_softc), }; static devclass_t arm_tmr_acpi_devclass; EARLY_DRIVER_MODULE(timer, acpi, arm_tmr_acpi_driver, arm_tmr_acpi_devclass, 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE); #endif static void arm_tmr_do_delay(int usec, void *arg) { struct arm_tmr_softc *sc = arg; int32_t counts, counts_per_usec; uint32_t first, last; /* Get the number of times to count */ counts_per_usec = ((arm_tmr_timecount.tc_frequency / 1000000) + 1); /* * Clamp the timeout at a maximum value (about 32 seconds with * a 66MHz clock). *Nobody* should be delay()ing for anywhere * near that length of time and if they are, they should be hung * out to dry. */ if (usec >= (0x80000000U / counts_per_usec)) counts = (0x80000000U / counts_per_usec) - 1; else counts = usec * counts_per_usec; first = get_cntxct(sc->physical); while (counts > 0) { last = get_cntxct(sc->physical); counts -= (int32_t)(last - first); first = last; } } #if defined(__aarch64__) void DELAY(int usec) { int32_t counts; + TSENTER(); /* * Check the timers are setup, if not just * use a for loop for the meantime */ if (arm_tmr_sc == NULL) { for (; usec > 0; usec--) for (counts = 200; counts > 0; counts--) /* * Prevent the compiler from optimizing * out the loop */ cpufunc_nullop(); } else arm_tmr_do_delay(usec, arm_tmr_sc); + TSEXIT(); } #endif static uint32_t arm_tmr_fill_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc) { vdso_th->th_algo = VDSO_TH_ALGO_ARM_GENTIM; vdso_th->th_physical = arm_tmr_sc->physical; bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); return (1); } Index: head/sys/arm/arm/machdep.c =================================================================== --- head/sys/arm/arm/machdep.c (revision 327431) +++ head/sys/arm/arm/machdep.c (revision 327432) @@ -1,1270 +1,1272 @@ /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2004 Olivier Houchard * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * for the NetBSD Project. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Machine dependent functions for kernel setup * * Created : 17/09/94 * Updated : 18/04/01 updated for new wscons */ #include "opt_compat.h" #include "opt_ddb.h" #include "opt_kstack_pages.h" #include "opt_platform.h" #include "opt_sched.h" #include "opt_timer.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #endif #ifdef DEBUG #define debugf(fmt, args...) printf(fmt, ##args) #else #define debugf(fmt, args...) #endif #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) || \ defined(COMPAT_FREEBSD9) #error FreeBSD/arm doesn't provide compatibility with releases prior to 10 #endif #if __ARM_ARCH >= 6 && !defined(INTRNG) #error armv6 requires INTRNG #endif struct pcpu __pcpu[MAXCPU]; struct pcpu *pcpup = &__pcpu[0]; static struct trapframe proc0_tf; uint32_t cpu_reset_address = 0; int cold = 1; vm_offset_t vector_page; int (*_arm_memcpy)(void *, void *, int, int) = NULL; int (*_arm_bzero)(void *, int, int) = NULL; int _min_memcpy_size = 0; int _min_bzero_size = 0; extern int *end; #ifdef FDT vm_paddr_t pmap_pa; #if __ARM_ARCH >= 6 vm_offset_t systempage; vm_offset_t irqstack; vm_offset_t undstack; vm_offset_t abtstack; #else /* * This is the number of L2 page tables required for covering max * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf, * stacks etc.), uprounded to be divisible by 4. */ #define KERNEL_PT_MAX 78 static struct pv_addr kernel_pt_table[KERNEL_PT_MAX]; struct pv_addr systempage; static struct pv_addr msgbufpv; struct pv_addr irqstack; struct pv_addr undstack; struct pv_addr abtstack; static struct pv_addr kernelstack; #endif /* __ARM_ARCH >= 6 */ #endif /* FDT */ #ifdef PLATFORM static delay_func *delay_impl; static void *delay_arg; #endif struct kva_md_info kmi; /* * arm32_vector_init: * * Initialize the vector page, and select whether or not to * relocate the vectors. * * NOTE: We expect the vector page to be mapped at its expected * destination. */ extern unsigned int page0[], page0_data[]; void arm_vector_init(vm_offset_t va, int which) { unsigned int *vectors = (int *) va; unsigned int *vectors_data = vectors + (page0_data - page0); int vec; /* * Loop through the vectors we're taking over, and copy the * vector's insn and data word. */ for (vec = 0; vec < ARM_NVEC; vec++) { if ((which & (1 << vec)) == 0) { /* Don't want to take over this vector. */ continue; } vectors[vec] = page0[vec]; vectors_data[vec] = page0_data[vec]; } /* Now sync the vectors. */ icache_sync(va, (ARM_NVEC * 2) * sizeof(u_int)); vector_page = va; #if __ARM_ARCH < 6 if (va == ARM_VECTORS_HIGH) { /* * Enable high vectors in the system control reg (SCTLR). * * Assume the MD caller knows what it's doing here, and really * does want the vector page relocated. * * Note: This has to be done here (and not just in * cpu_setup()) because the vector page needs to be * accessible *before* cpu_startup() is called. * Think ddb(9) ... */ cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC); } #endif } static void cpu_startup(void *dummy) { struct pcb *pcb = thread0.td_pcb; const unsigned int mbyte = 1024 * 1024; #if __ARM_ARCH < 6 && !defined(ARM_CACHE_LOCK_ENABLE) vm_page_t m; #endif identify_arm_cpu(); vm_ksubmap_init(&kmi); /* * Display the RAM layout. */ printf("real memory = %ju (%ju MB)\n", (uintmax_t)arm32_ptob(realmem), (uintmax_t)arm32_ptob(realmem) / mbyte); printf("avail memory = %ju (%ju MB)\n", (uintmax_t)arm32_ptob(vm_cnt.v_free_count), (uintmax_t)arm32_ptob(vm_cnt.v_free_count) / mbyte); if (bootverbose) { arm_physmem_print_tables(); devmap_print_table(); } bufinit(); vm_pager_bufferinit(); pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack + USPACE_SVC_STACK_TOP; pmap_set_pcb_pagedir(kernel_pmap, pcb); #if __ARM_ARCH < 6 vector_page_setprot(VM_PROT_READ); pmap_postinit(); #ifdef ARM_CACHE_LOCK_ENABLE pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS); arm_lock_cache_line(ARM_TP_ADDRESS); #else m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO); pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m)); #endif *(uint32_t *)ARM_RAS_START = 0; *(uint32_t *)ARM_RAS_END = 0xffffffff; #endif } SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { dcache_wb_poc((vm_offset_t)ptr, (vm_paddr_t)vtophys(ptr), len); } /* Get current clock frequency for the given cpu id. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { return (ENXIO); } void cpu_idle(int busy) { CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu); spinlock_enter(); #ifndef NO_EVENTTIMERS if (!busy) cpu_idleclock(); #endif if (!sched_runnable()) cpu_sleep(0); #ifndef NO_EVENTTIMERS if (!busy) cpu_activeclock(); #endif spinlock_exit(); CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu); } int cpu_idle_wakeup(int cpu) { return (0); } #ifdef NO_EVENTTIMERS /* * Most ARM platforms don't need to do anything special to init their clocks * (they get intialized during normal device attachment), and by not defining a * cpu_initclocks() function they get this generic one. Any platform that needs * to do something special can just provide their own implementation, which will * override this one due to the weak linkage. */ void arm_generic_initclocks(void) { } __weak_reference(arm_generic_initclocks, cpu_initclocks); #else void cpu_initclocks(void) { #ifdef SMP if (PCPU_GET(cpuid) == 0) cpu_initclocks_bsp(); else cpu_initclocks_ap(); #else cpu_initclocks_bsp(); #endif } #endif #ifdef PLATFORM void arm_set_delay(delay_func *impl, void *arg) { KASSERT(impl != NULL, ("No DELAY implementation")); delay_impl = impl; delay_arg = arg; } void DELAY(int usec) { + TSENTER(); delay_impl(usec, delay_arg); + TSEXIT(); } #endif void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { } void spinlock_enter(void) { struct thread *td; register_t cspr; td = curthread; if (td->td_md.md_spinlock_count == 0) { cspr = disable_interrupts(PSR_I | PSR_F); td->td_md.md_spinlock_count = 1; td->td_md.md_saved_cspr = cspr; } else td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; register_t cspr; td = curthread; critical_exit(); cspr = td->td_md.md_saved_cspr; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) restore_interrupts(cspr); } /* * Clear registers on exec */ void exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *tf = td->td_frame; memset(tf, 0, sizeof(*tf)); tf->tf_usr_sp = stack; tf->tf_usr_lr = imgp->entry_addr; tf->tf_svc_lr = 0x77777777; tf->tf_pc = imgp->entry_addr; tf->tf_spsr = PSR_USR32_MODE; } #ifdef VFP /* * Get machine VFP context. */ void get_vfpcontext(struct thread *td, mcontext_vfp_t *vfp) { struct pcb *pcb; pcb = td->td_pcb; if (td == curthread) { critical_enter(); vfp_store(&pcb->pcb_vfpstate, false); critical_exit(); } else MPASS(TD_IS_SUSPENDED(td)); memcpy(vfp->mcv_reg, pcb->pcb_vfpstate.reg, sizeof(vfp->mcv_reg)); vfp->mcv_fpscr = pcb->pcb_vfpstate.fpscr; } /* * Set machine VFP context. */ void set_vfpcontext(struct thread *td, mcontext_vfp_t *vfp) { struct pcb *pcb; pcb = td->td_pcb; if (td == curthread) { critical_enter(); vfp_discard(td); critical_exit(); } else MPASS(TD_IS_SUSPENDED(td)); memcpy(pcb->pcb_vfpstate.reg, vfp->mcv_reg, sizeof(pcb->pcb_vfpstate.reg)); pcb->pcb_vfpstate.fpscr = vfp->mcv_fpscr; } #endif int arm_get_vfpstate(struct thread *td, void *args) { int rv; struct arm_get_vfpstate_args ua; mcontext_vfp_t mcontext_vfp; rv = copyin(args, &ua, sizeof(ua)); if (rv != 0) return (rv); if (ua.mc_vfp_size != sizeof(mcontext_vfp_t)) return (EINVAL); #ifdef VFP get_vfpcontext(td, &mcontext_vfp); #else bzero(&mcontext_vfp, sizeof(mcontext_vfp)); #endif rv = copyout(&mcontext_vfp, ua.mc_vfp, sizeof(mcontext_vfp)); if (rv != 0) return (rv); return (0); } /* * Get machine context. */ int get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) { struct trapframe *tf = td->td_frame; __greg_t *gr = mcp->__gregs; if (clear_ret & GET_MC_CLEAR_RET) { gr[_REG_R0] = 0; gr[_REG_CPSR] = tf->tf_spsr & ~PSR_C; } else { gr[_REG_R0] = tf->tf_r0; gr[_REG_CPSR] = tf->tf_spsr; } gr[_REG_R1] = tf->tf_r1; gr[_REG_R2] = tf->tf_r2; gr[_REG_R3] = tf->tf_r3; gr[_REG_R4] = tf->tf_r4; gr[_REG_R5] = tf->tf_r5; gr[_REG_R6] = tf->tf_r6; gr[_REG_R7] = tf->tf_r7; gr[_REG_R8] = tf->tf_r8; gr[_REG_R9] = tf->tf_r9; gr[_REG_R10] = tf->tf_r10; gr[_REG_R11] = tf->tf_r11; gr[_REG_R12] = tf->tf_r12; gr[_REG_SP] = tf->tf_usr_sp; gr[_REG_LR] = tf->tf_usr_lr; gr[_REG_PC] = tf->tf_pc; mcp->mc_vfp_size = 0; mcp->mc_vfp_ptr = NULL; memset(&mcp->mc_spare, 0, sizeof(mcp->mc_spare)); return (0); } /* * Set machine context. * * However, we don't set any but the user modifiable flags, and we won't * touch the cs selector. */ int set_mcontext(struct thread *td, mcontext_t *mcp) { mcontext_vfp_t mc_vfp, *vfp; struct trapframe *tf = td->td_frame; const __greg_t *gr = mcp->__gregs; int spsr; /* * Make sure the processor mode has not been tampered with and * interrupts have not been disabled. */ spsr = gr[_REG_CPSR]; if ((spsr & PSR_MODE) != PSR_USR32_MODE || (spsr & (PSR_I | PSR_F)) != 0) return (EINVAL); #ifdef WITNESS if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_size != sizeof(mc_vfp)) { printf("%s: %s: Malformed mc_vfp_size: %d (0x%08X)\n", td->td_proc->p_comm, __func__, mcp->mc_vfp_size, mcp->mc_vfp_size); } else if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_ptr == NULL) { printf("%s: %s: c_vfp_size != 0 but mc_vfp_ptr == NULL\n", td->td_proc->p_comm, __func__); } #endif if (mcp->mc_vfp_size == sizeof(mc_vfp) && mcp->mc_vfp_ptr != NULL) { if (copyin(mcp->mc_vfp_ptr, &mc_vfp, sizeof(mc_vfp)) != 0) return (EFAULT); vfp = &mc_vfp; } else { vfp = NULL; } tf->tf_r0 = gr[_REG_R0]; tf->tf_r1 = gr[_REG_R1]; tf->tf_r2 = gr[_REG_R2]; tf->tf_r3 = gr[_REG_R3]; tf->tf_r4 = gr[_REG_R4]; tf->tf_r5 = gr[_REG_R5]; tf->tf_r6 = gr[_REG_R6]; tf->tf_r7 = gr[_REG_R7]; tf->tf_r8 = gr[_REG_R8]; tf->tf_r9 = gr[_REG_R9]; tf->tf_r10 = gr[_REG_R10]; tf->tf_r11 = gr[_REG_R11]; tf->tf_r12 = gr[_REG_R12]; tf->tf_usr_sp = gr[_REG_SP]; tf->tf_usr_lr = gr[_REG_LR]; tf->tf_pc = gr[_REG_PC]; tf->tf_spsr = gr[_REG_CPSR]; #ifdef VFP if (vfp != NULL) set_vfpcontext(td, vfp); #endif return (0); } void sendsig(catcher, ksi, mask) sig_t catcher; ksiginfo_t *ksi; sigset_t *mask; { struct thread *td; struct proc *p; struct trapframe *tf; struct sigframe *fp, frame; struct sigacts *psp; struct sysentvec *sysent; int onstack; int sig; int code; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; code = ksi->ksi_code; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_usr_sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else fp = (struct sigframe *)td->td_frame->tf_usr_sp; /* make room on the stack */ fp--; /* make the stack aligned */ fp = (struct sigframe *)STACKALIGN(fp); /* Populate the siginfo frame. */ get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); #ifdef VFP get_vfpcontext(td, &frame.sf_vfp); frame.sf_uc.uc_mcontext.mc_vfp_size = sizeof(fp->sf_vfp); frame.sf_uc.uc_mcontext.mc_vfp_ptr = &fp->sf_vfp; #else frame.sf_uc.uc_mcontext.mc_vfp_size = 0; frame.sf_uc.uc_mcontext.mc_vfp_ptr = NULL; #endif frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK ) ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; frame.sf_uc.uc_stack = td->td_sigstk; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } /* * Build context to run handler in. We invoke the handler * directly, only returning via the trampoline. Note the * trampoline version numbers are coordinated with machine- * dependent code in libc. */ tf->tf_r0 = sig; tf->tf_r1 = (register_t)&fp->sf_si; tf->tf_r2 = (register_t)&fp->sf_uc; /* the trampoline uses r5 as the uc address */ tf->tf_r5 = (register_t)&fp->sf_uc; tf->tf_pc = (register_t)catcher; tf->tf_usr_sp = (register_t)fp; sysent = p->p_sysent; if (sysent->sv_sigcode_base != 0) tf->tf_usr_lr = (register_t)sysent->sv_sigcode_base; else tf->tf_usr_lr = (register_t)(sysent->sv_psstrings - *(sysent->sv_szsigcode)); /* Set the mode to enter in the signal handler */ #if __ARM_ARCH >= 7 if ((register_t)catcher & 1) tf->tf_spsr |= PSR_T; else tf->tf_spsr &= ~PSR_T; #endif CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr, tf->tf_usr_sp); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } int sys_sigreturn(td, uap) struct thread *td; struct sigreturn_args /* { const struct __ucontext *sigcntxp; } */ *uap; { ucontext_t uc; int error; if (uap == NULL) return (EFAULT); if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); /* Restore register context. */ error = set_mcontext(td, &uc.uc_mcontext); if (error != 0) return (error); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { pcb->pcb_regs.sf_r4 = tf->tf_r4; pcb->pcb_regs.sf_r5 = tf->tf_r5; pcb->pcb_regs.sf_r6 = tf->tf_r6; pcb->pcb_regs.sf_r7 = tf->tf_r7; pcb->pcb_regs.sf_r8 = tf->tf_r8; pcb->pcb_regs.sf_r9 = tf->tf_r9; pcb->pcb_regs.sf_r10 = tf->tf_r10; pcb->pcb_regs.sf_r11 = tf->tf_r11; pcb->pcb_regs.sf_r12 = tf->tf_r12; pcb->pcb_regs.sf_pc = tf->tf_pc; pcb->pcb_regs.sf_lr = tf->tf_usr_lr; pcb->pcb_regs.sf_sp = tf->tf_usr_sp; } void pcpu0_init(void) { #if __ARM_ARCH >= 6 set_curthread(&thread0); #endif pcpu_init(pcpup, 0, sizeof(struct pcpu)); PCPU_SET(curthread, &thread0); } /* * Initialize proc0 */ void init_proc0(vm_offset_t kstack) { proc_linkup0(&proc0, &thread0); thread0.td_kstack = kstack; thread0.td_pcb = (struct pcb *) (thread0.td_kstack + kstack_pages * PAGE_SIZE) - 1; thread0.td_pcb->pcb_flags = 0; thread0.td_pcb->pcb_vfpcpu = -1; thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN; thread0.td_frame = &proc0_tf; pcpup->pc_curpcb = thread0.td_pcb; } #if __ARM_ARCH >= 6 void set_stackptrs(int cpu) { set_stackptr(PSR_IRQ32_MODE, irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_ABT32_MODE, abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_UND32_MODE, undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); } #else void set_stackptrs(int cpu) { set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_UND32_MODE, undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); } #endif #ifdef FDT #if __ARM_ARCH < 6 void * initarm(struct arm_boot_params *abp) { struct mem_region mem_regions[FDT_MEM_REGIONS]; struct pv_addr kernel_l1pt; struct pv_addr dpcpu; vm_offset_t dtbp, freemempos, l2_start, lastaddr; uint64_t memsize; uint32_t l2size; char *env; void *kmdp; u_int l1pagetable; int i, j, err_devmap, mem_regions_sz; lastaddr = parse_boot_param(abp); arm_physmem_kernaddr = abp->abp_physaddr; memsize = 0; cpuinfo_init(); set_cpufuncs(); /* * Find the dtb passed in by the boot loader. */ kmdp = preload_search_by_type("elf kernel"); if (kmdp != NULL) dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); else dtbp = (vm_offset_t)NULL; #if defined(FDT_DTB_STATIC) /* * In case the device tree blob was not retrieved (from metadata) try * to use the statically embedded one. */ if (dtbp == (vm_offset_t)NULL) dtbp = (vm_offset_t)&fdt_static_dtb; #endif if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0) panic("Cannot get physical memory regions"); arm_physmem_hardware_regions(mem_regions, mem_regions_sz); /* Grab reserved memory regions information from device tree. */ if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0) arm_physmem_exclude_regions(mem_regions, mem_regions_sz, EXFLAG_NODUMP | EXFLAG_NOALLOC); /* Platform-specific initialisation */ platform_probe_and_attach(); pcpu0_init(); /* Do basic tuning, hz etc */ init_param1(); /* Calculate number of L2 tables needed for mapping vm_page_array */ l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page); l2size = (l2size >> L1_S_SHIFT) + 1; /* * Add one table for end of kernel map, one for stacks, msgbuf and * L1 and L2 tables map, one for vectors map and two for * l2 structures from pmap_bootstrap. */ l2size += 5; /* Make it divisible by 4 */ l2size = (l2size + 3) & ~3; freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK; /* Define a macro to simplify memory allocation */ #define valloc_pages(var, np) \ alloc_pages((var).pv_va, (np)); \ (var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR); #define alloc_pages(var, np) \ (var) = freemempos; \ freemempos += (np * PAGE_SIZE); \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) freemempos += PAGE_SIZE; valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); for (i = 0, j = 0; i < l2size; ++i) { if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { valloc_pages(kernel_pt_table[i], L2_TABLE_SIZE / PAGE_SIZE); j = i; } else { kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va + L2_TABLE_SIZE_REAL * (i - j); kernel_pt_table[i].pv_pa = kernel_pt_table[i].pv_va - KERNVIRTADDR + abp->abp_physaddr; } } /* * Allocate a page for the system page mapped to 0x00000000 * or 0xffff0000. This page will just contain the system vectors * and can be shared by all processes. */ valloc_pages(systempage, 1); /* Allocate dynamic per-cpu area. */ valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu.pv_va, 0); /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU); valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU); valloc_pages(undstack, UND_STACK_SIZE * MAXCPU); valloc_pages(kernelstack, kstack_pages * MAXCPU); valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); /* * Now we start construction of the L1 page table * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary */ l1pagetable = kernel_l1pt.pv_va; /* * Try to map as much as possible of kernel text and data using * 1MB section mapping and for the rest of initial kernel address * space use L2 coarse tables. * * Link L2 tables for mapping remainder of kernel (modulo 1MB) * and kernel structures */ l2_start = lastaddr & ~(L1_S_OFFSET); for (i = 0 ; i < l2size - 1; i++) pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE, &kernel_pt_table[i]); pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE; /* Map kernel code and data */ pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr, (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map L1 directory and allocated L2 page tables */ pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va, kernel_pt_table[0].pv_pa, L2_TABLE_SIZE_REAL * l2size, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); /* Map allocated DPCPU, stacks and msgbuf */ pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, freemempos - dpcpu.pv_va, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Link and map the vector page */ pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH, &kernel_pt_table[l2size - 1]); pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE); /* Establish static device mappings. */ err_devmap = platform_devmap_init(); devmap_bootstrap(l1pagetable, NULL); vm_max_kernel_address = platform_lastaddr(); cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT); pmap_pa = kernel_l1pt.pv_pa; cpu_setttb(kernel_l1pt.pv_pa); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)); /* * Now that proper page tables are installed, call cpu_setup() to enable * instruction and data caches and other chip-specific features. */ cpu_setup(); /* * Only after the SOC registers block is mapped we can perform device * tree fixups, as they may attempt to read parameters from hardware. */ OF_interpret("perform-fixup", 0); platform_gpio_init(); cninit(); debugf("initarm: console initialized\n"); debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); debugf(" boothowto = 0x%08x\n", boothowto); debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); arm_print_kenv(); env = kern_getenv("kernelname"); if (env != NULL) { strlcpy(kernelname, env, sizeof(kernelname)); freeenv(env); } if (err_devmap != 0) printf("WARNING: could not fully configure devmap, error=%d\n", err_devmap); platform_late_init(); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE); set_stackptrs(0); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in cpu_setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross relocations of the kernel thus * this problem will not occur after initarm(). */ cpu_idcache_wbinv_all(); undefined_init(); init_proc0(kernelstack.pv_va); arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); pmap_bootstrap(freemempos, &kernel_l1pt); msgbufp = (void *)msgbufpv.pv_va; msgbufinit(msgbufp, msgbufsize); mutex_init(); /* * Exclude the kernel (and all the things we allocated which immediately * follow the kernel) from the VM allocation pool but not from crash * dumps. virtual_avail is a global variable which tracks the kva we've * "allocated" while setting up pmaps. * * Prepare the list of physical memory available to the vm subsystem. */ arm_physmem_exclude_region(abp->abp_physaddr, (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC); arm_physmem_init_kernel_globals(); init_param2(physmem); dbg_monitor_init(); kdb_init(); return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - sizeof(struct pcb))); } #else /* __ARM_ARCH < 6 */ void * initarm(struct arm_boot_params *abp) { struct mem_region mem_regions[FDT_MEM_REGIONS]; vm_paddr_t lastaddr; vm_offset_t dtbp, kernelstack, dpcpu; char *env; void *kmdp; int err_devmap, mem_regions_sz; #ifdef EFI struct efi_map_header *efihdr; #endif /* get last allocated physical address */ arm_physmem_kernaddr = abp->abp_physaddr; lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr; set_cpufuncs(); cpuinfo_init(); /* * Find the dtb passed in by the boot loader. */ kmdp = preload_search_by_type("elf kernel"); dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); #if defined(FDT_DTB_STATIC) /* * In case the device tree blob was not retrieved (from metadata) try * to use the statically embedded one. */ if (dtbp == (vm_offset_t)NULL) dtbp = (vm_offset_t)&fdt_static_dtb; #endif if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); #if defined(LINUX_BOOT_ABI) arm_parse_fdt_bootargs(); #endif #ifdef EFI efihdr = (struct efi_map_header *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_MAP); if (efihdr != NULL) { arm_add_efi_map_entries(efihdr, mem_regions, &mem_regions_sz); } else #endif { /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,NULL) != 0) panic("Cannot get physical memory regions"); } arm_physmem_hardware_regions(mem_regions, mem_regions_sz); /* Grab reserved memory regions information from device tree. */ if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0) arm_physmem_exclude_regions(mem_regions, mem_regions_sz, EXFLAG_NODUMP | EXFLAG_NOALLOC); /* * Set TEX remapping registers. * Setup kernel page tables and switch to kernel L1 page table. */ pmap_set_tex(); pmap_bootstrap_prepare(lastaddr); /* * If EARLY_PRINTF support is enabled, we need to re-establish the * mapping after pmap_bootstrap_prepare() switches to new page tables. * Note that we can only do the remapping if the VA is outside the * kernel, now that we have real virtual (not VA=PA) mappings in effect. * Early printf does not work between the time pmap_set_tex() does * cp15_prrr_set() and this code remaps the VA. */ #if defined(EARLY_PRINTF) && defined(SOCDEV_PA) && defined(SOCDEV_VA) && SOCDEV_VA < KERNBASE pmap_preboot_map_attr(SOCDEV_PA, SOCDEV_VA, 1024 * 1024, VM_PROT_READ | VM_PROT_WRITE, VM_MEMATTR_DEVICE); #endif /* * Now that proper page tables are installed, call cpu_setup() to enable * instruction and data caches and other chip-specific features. */ cpu_setup(); /* Platform-specific initialisation */ platform_probe_and_attach(); pcpu0_init(); /* Do basic tuning, hz etc */ init_param1(); /* * Allocate a page for the system page mapped to 0xffff0000 * This page will just contain the system vectors and can be * shared by all processes. */ systempage = pmap_preboot_get_pages(1); /* Map the vector page. */ pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH, 1); if (virtual_end >= ARM_VECTORS_HIGH) virtual_end = ARM_VECTORS_HIGH - 1; /* Allocate dynamic per-cpu area. */ dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu, 0); /* Allocate stacks for all modes */ irqstack = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU); abtstack = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU); undstack = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU ); kernelstack = pmap_preboot_get_vpages(kstack_pages * MAXCPU); /* Allocate message buffer. */ msgbufp = (void *)pmap_preboot_get_vpages( round_page(msgbufsize) / PAGE_SIZE); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ set_stackptrs(0); mutex_init(); /* Establish static device mappings. */ err_devmap = platform_devmap_init(); devmap_bootstrap(0, NULL); vm_max_kernel_address = platform_lastaddr(); /* * Only after the SOC registers block is mapped we can perform device * tree fixups, as they may attempt to read parameters from hardware. */ OF_interpret("perform-fixup", 0); platform_gpio_init(); cninit(); /* * If we made a mapping for EARLY_PRINTF after pmap_bootstrap_prepare(), * undo it now that the normal console printf works. */ #if defined(EARLY_PRINTF) && defined(SOCDEV_PA) && defined(SOCDEV_VA) && SOCDEV_VA < KERNBASE pmap_kremove(SOCDEV_VA); #endif debugf("initarm: console initialized\n"); debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); debugf(" boothowto = 0x%08x\n", boothowto); debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); debugf(" lastaddr1: 0x%08x\n", lastaddr); arm_print_kenv(); env = kern_getenv("kernelname"); if (env != NULL) strlcpy(kernelname, env, sizeof(kernelname)); if (err_devmap != 0) printf("WARNING: could not fully configure devmap, error=%d\n", err_devmap); platform_late_init(); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in cpu_setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross relocations of the kernel thus * this problem will not occur after initarm(). */ /* Set stack for exception handlers */ undefined_init(); init_proc0(kernelstack); arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); enable_interrupts(PSR_A); pmap_bootstrap(0); /* Exclude the kernel (and all the things we allocated which immediately * follow the kernel) from the VM allocation pool but not from crash * dumps. virtual_avail is a global variable which tracks the kva we've * "allocated" while setting up pmaps. * * Prepare the list of physical memory available to the vm subsystem. */ arm_physmem_exclude_region(abp->abp_physaddr, pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC); arm_physmem_init_kernel_globals(); init_param2(physmem); /* Init message buffer. */ msgbufinit(msgbufp, msgbufsize); dbg_monitor_init(); kdb_init(); return ((void *)STACKALIGN(thread0.td_pcb)); } #endif /* __ARM_ARCH < 6 */ #endif /* FDT */ Index: head/sys/arm/arm/mpcore_timer.c =================================================================== --- head/sys/arm/arm/mpcore_timer.c (revision 327431) +++ head/sys/arm/arm/mpcore_timer.c (revision 327432) @@ -1,562 +1,564 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2011 The FreeBSD Foundation * All rights reserved. * * Developed by Ben Gray * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * The ARM Cortex-A9 core can support a global timer plus a private and * watchdog timer per core. This driver reserves memory and interrupt * resources for accessing both timer register sets, these resources are * stored globally and used to setup the timecount and eventtimer. * * The timecount timer uses the global 64-bit counter, whereas the * per-CPU eventtimer uses the private 32-bit counters. * * * REF: ARM Cortex-A9 MPCore, Technical Reference Manual (rev. r2p2) */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* For arm_set_delay */ #include #include #include #include #include /* Private (per-CPU) timer register map */ #define PRV_TIMER_LOAD 0x0000 #define PRV_TIMER_COUNT 0x0004 #define PRV_TIMER_CTRL 0x0008 #define PRV_TIMER_INTR 0x000C #define PRV_TIMER_CTR_PRESCALER_SHIFT 8 #define PRV_TIMER_CTRL_IRQ_ENABLE (1UL << 2) #define PRV_TIMER_CTRL_AUTO_RELOAD (1UL << 1) #define PRV_TIMER_CTRL_TIMER_ENABLE (1UL << 0) #define PRV_TIMER_INTR_EVENT (1UL << 0) /* Global timer register map */ #define GBL_TIMER_COUNT_LOW 0x0000 #define GBL_TIMER_COUNT_HIGH 0x0004 #define GBL_TIMER_CTRL 0x0008 #define GBL_TIMER_INTR 0x000C #define GBL_TIMER_CTR_PRESCALER_SHIFT 8 #define GBL_TIMER_CTRL_AUTO_INC (1UL << 3) #define GBL_TIMER_CTRL_IRQ_ENABLE (1UL << 2) #define GBL_TIMER_CTRL_COMP_ENABLE (1UL << 1) #define GBL_TIMER_CTRL_TIMER_ENABLE (1UL << 0) #define GBL_TIMER_INTR_EVENT (1UL << 0) struct arm_tmr_softc { device_t dev; int irqrid; int memrid; struct resource * gbl_mem; struct resource * prv_mem; struct resource * prv_irq; uint64_t clkfreq; struct eventtimer et; }; static struct eventtimer *arm_tmr_et; static struct timecounter *arm_tmr_tc; static uint64_t arm_tmr_freq; static boolean_t arm_tmr_freq_varies; #define tmr_prv_read_4(sc, reg) bus_read_4((sc)->prv_mem, reg) #define tmr_prv_write_4(sc, reg, val) bus_write_4((sc)->prv_mem, reg, val) #define tmr_gbl_read_4(sc, reg) bus_read_4((sc)->gbl_mem, reg) #define tmr_gbl_write_4(sc, reg, val) bus_write_4((sc)->gbl_mem, reg, val) static void arm_tmr_delay(int, void *); static timecounter_get_t arm_tmr_get_timecount; static struct timecounter arm_tmr_timecount = { .tc_name = "MPCore", .tc_get_timecount = arm_tmr_get_timecount, .tc_poll_pps = NULL, .tc_counter_mask = ~0u, .tc_frequency = 0, .tc_quality = 800, }; #define TMR_GBL 0x01 #define TMR_PRV 0x02 #define TMR_BOTH (TMR_GBL | TMR_PRV) #define TMR_NONE 0 static struct ofw_compat_data compat_data[] = { {"arm,mpcore-timers", TMR_BOTH}, /* Non-standard, FreeBSD. */ {"arm,cortex-a9-global-timer", TMR_GBL}, {"arm,cortex-a5-global-timer", TMR_GBL}, {"arm,cortex-a9-twd-timer", TMR_PRV}, {"arm,cortex-a5-twd-timer", TMR_PRV}, {"arm,arm11mp-twd-timer", TMR_PRV}, {NULL, TMR_NONE} }; /** * arm_tmr_get_timecount - reads the timecount (global) timer * @tc: pointer to arm_tmr_timecount struct * * We only read the lower 32-bits, the timecount stuff only uses 32-bits * so (for now?) ignore the upper 32-bits. * * RETURNS * The lower 32-bits of the counter. */ static unsigned arm_tmr_get_timecount(struct timecounter *tc) { struct arm_tmr_softc *sc; sc = tc->tc_priv; return (tmr_gbl_read_4(sc, GBL_TIMER_COUNT_LOW)); } /** * arm_tmr_start - starts the eventtimer (private) timer * @et: pointer to eventtimer struct * @first: the number of seconds and fractional sections to trigger in * @period: the period (in seconds and fractional sections) to set * * If the eventtimer is required to be in oneshot mode, period will be * NULL and first will point to the time to trigger. If in periodic mode * period will contain the time period and first may optionally contain * the time for the first period. * * RETURNS * Always returns 0 */ static int arm_tmr_start(struct eventtimer *et, sbintime_t first, sbintime_t period) { struct arm_tmr_softc *sc; uint32_t load, count; uint32_t ctrl; sc = et->et_priv; tmr_prv_write_4(sc, PRV_TIMER_CTRL, 0); tmr_prv_write_4(sc, PRV_TIMER_INTR, PRV_TIMER_INTR_EVENT); ctrl = PRV_TIMER_CTRL_IRQ_ENABLE | PRV_TIMER_CTRL_TIMER_ENABLE; if (period != 0) { load = ((uint32_t)et->et_frequency * period) >> 32; ctrl |= PRV_TIMER_CTRL_AUTO_RELOAD; } else load = 0; if (first != 0) count = (uint32_t)((et->et_frequency * first) >> 32); else count = load; tmr_prv_write_4(sc, PRV_TIMER_LOAD, load); tmr_prv_write_4(sc, PRV_TIMER_COUNT, count); tmr_prv_write_4(sc, PRV_TIMER_CTRL, ctrl); return (0); } /** * arm_tmr_stop - stops the eventtimer (private) timer * @et: pointer to eventtimer struct * * Simply stops the private timer by clearing all bits in the ctrl register. * * RETURNS * Always returns 0 */ static int arm_tmr_stop(struct eventtimer *et) { struct arm_tmr_softc *sc; sc = et->et_priv; tmr_prv_write_4(sc, PRV_TIMER_CTRL, 0); tmr_prv_write_4(sc, PRV_TIMER_INTR, PRV_TIMER_INTR_EVENT); return (0); } /** * arm_tmr_intr - ISR for the eventtimer (private) timer * @arg: pointer to arm_tmr_softc struct * * Clears the event register and then calls the eventtimer callback. * * RETURNS * Always returns FILTER_HANDLED */ static int arm_tmr_intr(void *arg) { struct arm_tmr_softc *sc; sc = arg; tmr_prv_write_4(sc, PRV_TIMER_INTR, PRV_TIMER_INTR_EVENT); if (sc->et.et_active) sc->et.et_event_cb(&sc->et, sc->et.et_arg); return (FILTER_HANDLED); } /** * arm_tmr_probe - timer probe routine * @dev: new device * * The probe function returns success when probed with the fdt compatible * string set to "arm,mpcore-timers". * * RETURNS * BUS_PROBE_DEFAULT if the fdt device is compatible, otherwise ENXIO. */ static int arm_tmr_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == TMR_NONE) return (ENXIO); device_set_desc(dev, "ARM MPCore Timers"); return (BUS_PROBE_DEFAULT); } static int attach_tc(struct arm_tmr_softc *sc) { int rid; if (arm_tmr_tc != NULL) return (EBUSY); rid = sc->memrid; sc->gbl_mem = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->gbl_mem == NULL) { device_printf(sc->dev, "could not allocate gbl mem resources\n"); return (ENXIO); } tmr_gbl_write_4(sc, GBL_TIMER_CTRL, 0x00000000); arm_tmr_timecount.tc_frequency = sc->clkfreq; arm_tmr_timecount.tc_priv = sc; tc_init(&arm_tmr_timecount); arm_tmr_tc = &arm_tmr_timecount; tmr_gbl_write_4(sc, GBL_TIMER_CTRL, GBL_TIMER_CTRL_TIMER_ENABLE); return (0); } static int attach_et(struct arm_tmr_softc *sc) { void *ihl; int irid, mrid; if (arm_tmr_et != NULL) return (EBUSY); mrid = sc->memrid; sc->prv_mem = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &mrid, RF_ACTIVE); if (sc->prv_mem == NULL) { device_printf(sc->dev, "could not allocate prv mem resources\n"); return (ENXIO); } tmr_prv_write_4(sc, PRV_TIMER_CTRL, 0x00000000); irid = sc->irqrid; sc->prv_irq = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irid, RF_ACTIVE); if (sc->prv_irq == NULL) { bus_release_resource(sc->dev, SYS_RES_MEMORY, mrid, sc->prv_mem); device_printf(sc->dev, "could not allocate prv irq resources\n"); return (ENXIO); } if (bus_setup_intr(sc->dev, sc->prv_irq, INTR_TYPE_CLK, arm_tmr_intr, NULL, sc, &ihl) != 0) { bus_release_resource(sc->dev, SYS_RES_MEMORY, mrid, sc->prv_mem); bus_release_resource(sc->dev, SYS_RES_IRQ, irid, sc->prv_irq); device_printf(sc->dev, "unable to setup the et irq handler.\n"); return (ENXIO); } /* * Setup and register the eventtimer. Most event timers set their min * and max period values to some value calculated from the clock * frequency. We might not know yet what our runtime clock frequency * will be, so we just use some safe values. A max of 2 seconds ensures * that even if our base clock frequency is 2GHz (meaning a 4GHz CPU), * we won't overflow our 32-bit timer count register. A min of 20 * nanoseconds is pretty much completely arbitrary. */ sc->et.et_name = "MPCore"; sc->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU; sc->et.et_quality = 1000; sc->et.et_frequency = sc->clkfreq; sc->et.et_min_period = nstosbt(20); sc->et.et_max_period = 2 * SBT_1S; sc->et.et_start = arm_tmr_start; sc->et.et_stop = arm_tmr_stop; sc->et.et_priv = sc; et_register(&sc->et); arm_tmr_et = &sc->et; return (0); } /** * arm_tmr_attach - attaches the timer to the simplebus * @dev: new device * * Reserves memory and interrupt resources, stores the softc structure * globally and registers both the timecount and eventtimer objects. * * RETURNS * Zero on success or ENXIO if an error occuried. */ static int arm_tmr_attach(device_t dev) { struct arm_tmr_softc *sc; phandle_t node; pcell_t clock; int et_err, tc_err, tmrtype; sc = device_get_softc(dev); sc->dev = dev; if (arm_tmr_freq_varies) { sc->clkfreq = arm_tmr_freq; } else { if (arm_tmr_freq != 0) { sc->clkfreq = arm_tmr_freq; } else { /* Get the base clock frequency */ node = ofw_bus_get_node(dev); if ((OF_getencprop(node, "clock-frequency", &clock, sizeof(clock))) <= 0) { device_printf(dev, "missing clock-frequency " "attribute in FDT\n"); return (ENXIO); } sc->clkfreq = clock; } } tmrtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data; tc_err = ENXIO; et_err = ENXIO; /* * If we're handling the global timer and it is fixed-frequency, set it * up to use as a timecounter. If it's variable frequency it won't work * as a timecounter. We also can't use it for DELAY(), so hopefully the * platform provides its own implementation. If it doesn't, ours will * get used, but since the frequency isn't set, it will only use the * bogus loop counter. */ if (tmrtype & TMR_GBL) { if (!arm_tmr_freq_varies) tc_err = attach_tc(sc); else if (bootverbose) device_printf(sc->dev, "not using variable-frequency device as timecounter"); sc->memrid++; sc->irqrid++; } /* If we are handling the private timer, set it up as an eventtimer. */ if (tmrtype & TMR_PRV) { et_err = attach_et(sc); } /* * If we didn't successfully set up a timecounter or eventtimer then we * didn't actually attach at all, return error. */ if (tc_err != 0 && et_err != 0) { return (ENXIO); } #ifdef PLATFORM /* * We can register as the DELAY() implementation only if we successfully * set up the global timer. */ if (tc_err == 0) arm_set_delay(arm_tmr_delay, sc); #endif return (0); } static device_method_t arm_tmr_methods[] = { DEVMETHOD(device_probe, arm_tmr_probe), DEVMETHOD(device_attach, arm_tmr_attach), { 0, 0 } }; static driver_t arm_tmr_driver = { "mp_tmr", arm_tmr_methods, sizeof(struct arm_tmr_softc), }; static devclass_t arm_tmr_devclass; EARLY_DRIVER_MODULE(mp_tmr, simplebus, arm_tmr_driver, arm_tmr_devclass, 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE); EARLY_DRIVER_MODULE(mp_tmr, ofwbus, arm_tmr_driver, arm_tmr_devclass, 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE); /* * Handle a change in clock frequency. The mpcore timer runs at half the CPU * frequency. When the CPU frequency changes due to power-saving or thermal * management, the platform-specific code that causes the frequency change calls * this routine to inform the clock driver, and we in turn inform the event * timer system, which actually updates the value in et->frequency for us and * reschedules the current event(s) in a way that's atomic with respect to * start/stop/intr code that may be running on various CPUs at the time of the * call. * * This routine can also be called by a platform's early init code. If the * value passed is ARM_TMR_FREQUENCY_VARIES, that will cause the attach() code * to register as an eventtimer, but not a timecounter. If the value passed in * is any other non-zero value it is used as the fixed frequency for the timer. */ void arm_tmr_change_frequency(uint64_t newfreq) { if (newfreq == ARM_TMR_FREQUENCY_VARIES) { arm_tmr_freq_varies = true; return; } arm_tmr_freq = newfreq; if (arm_tmr_et != NULL) et_change_frequency(arm_tmr_et, newfreq); } static void arm_tmr_delay(int usec, void *arg) { struct arm_tmr_softc *sc = arg; int32_t counts_per_usec; int32_t counts; uint32_t first, last; /* Get the number of times to count */ counts_per_usec = ((arm_tmr_timecount.tc_frequency / 1000000) + 1); /* * Clamp the timeout at a maximum value (about 32 seconds with * a 66MHz clock). *Nobody* should be delay()ing for anywhere * near that length of time and if they are, they should be hung * out to dry. */ if (usec >= (0x80000000U / counts_per_usec)) counts = (0x80000000U / counts_per_usec) - 1; else counts = usec * counts_per_usec; first = tmr_gbl_read_4(sc, GBL_TIMER_COUNT_LOW); while (counts > 0) { last = tmr_gbl_read_4(sc, GBL_TIMER_COUNT_LOW); counts -= (int32_t)(last - first); first = last; } } #ifndef PLATFORM /** * DELAY - Delay for at least usec microseconds. * @usec: number of microseconds to delay by * * This function is called all over the kernel and is suppose to provide a * consistent delay. This function may also be called before the console * is setup so no printf's can be called here. * * RETURNS: * nothing */ void DELAY(int usec) { struct arm_tmr_softc *sc; int32_t counts; + TSENTER(); /* Check the timers are setup, if not just use a for loop for the meantime */ if (arm_tmr_tc == NULL || arm_tmr_timecount.tc_frequency == 0) { for (; usec > 0; usec--) for (counts = 200; counts > 0; counts--) cpufunc_nullop(); /* Prevent gcc from optimizing * out the loop */ } else { sc = arm_tmr_tc->tc_priv; arm_tmr_delay(usec, sc); } + TSEXIT(); } #endif Index: head/sys/arm/at91/at91_machdep.c =================================================================== --- head/sys/arm/at91/at91_machdep.c (revision 327431) +++ head/sys/arm/at91/at91_machdep.c (revision 327432) @@ -1,687 +1,689 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * machdep.c * * Machine dependent functions for kernel setup * * This file needs a lot of work. * * Created : 17/09/94 */ #include "opt_kstack_pages.h" #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #define _ARM32_BUS_DMA_PRIVATE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef MAXCPU #define MAXCPU 1 #endif /* Page table for mapping proc0 zero page */ #define KERNEL_PT_SYS 0 #define KERNEL_PT_KERN 1 #define KERNEL_PT_KERN_NUM 22 /* L2 table for mapping after kernel */ #define KERNEL_PT_AFKERNEL KERNEL_PT_KERN + KERNEL_PT_KERN_NUM #define KERNEL_PT_AFKERNEL_NUM 5 /* this should be evenly divisable by PAGE_SIZE / L2_TABLE_SIZE_REAL (or 4) */ #define NUM_KERNEL_PTS (KERNEL_PT_AFKERNEL + KERNEL_PT_AFKERNEL_NUM) struct pv_addr kernel_pt_table[NUM_KERNEL_PTS]; /* Static device mappings. */ const struct devmap_entry at91_devmap[] = { /* * Map the critical on-board devices. The interrupt vector at * 0xffff0000 makes it impossible to map them PA == VA, so we map all * 0xfffxxxxx addresses to 0xdffxxxxx. This covers all critical devices * on all members of the AT91SAM9 and AT91RM9200 families. */ { 0xdff00000, 0xfff00000, 0x00100000, }, /* There's a notion that we should do the rest of these lazily. */ /* * We can't just map the OHCI registers VA == PA, because * AT91xx_xxx_BASE belongs to the userland address space. * We could just choose a different virtual address, but a better * solution would probably be to just use pmap_mapdev() to allocate * KVA, as we don't need the OHCI controller before the vm * initialization is done. However, the AT91 resource allocation * system doesn't know how to use pmap_mapdev() yet. * Care must be taken to ensure PA and VM address do not overlap * between entries. */ { /* * Add the ohci controller, and anything else that might be * on this chip select for a VA/PA mapping. */ /* Internal Memory 1MB */ AT91RM92_OHCI_VA_BASE, AT91RM92_OHCI_BASE, 0x00100000, }, { /* CompactFlash controller. Portion of EBI CS4 1MB */ AT91RM92_CF_VA_BASE, AT91RM92_CF_BASE, 0x00100000, }, /* * The next two should be good for the 9260, 9261 and 9G20 since * addresses mapping is the same. */ { /* Internal Memory 1MB */ AT91SAM9G20_OHCI_VA_BASE, AT91SAM9G20_OHCI_BASE, 0x00100000, }, { /* EBI CS3 256MB */ AT91SAM9G20_NAND_VA_BASE, AT91SAM9G20_NAND_BASE, AT91SAM9G20_NAND_SIZE, }, /* * The next should be good for the 9G45. */ { /* Internal Memory 1MB */ AT91SAM9G45_OHCI_VA_BASE, AT91SAM9G45_OHCI_BASE, 0x00100000, }, { 0, 0, 0, } }; #ifdef LINUX_BOOT_ABI static int membanks; static int memsize[]; #endif long at91_ramsize(void) { uint32_t cr, mdr, mr, *SDRAMC; int banks, rows, cols, bw; #ifdef LINUX_BOOT_ABI /* * If we found any ATAGs that were for memory, return the first bank. */ if (membanks > 0) return (memsize[0]); #endif if (at91_is_rm92()) { SDRAMC = (uint32_t *)(AT91_BASE + AT91RM92_SDRAMC_BASE); cr = SDRAMC[AT91RM92_SDRAMC_CR / 4]; mr = SDRAMC[AT91RM92_SDRAMC_MR / 4]; banks = (cr & AT91RM92_SDRAMC_CR_NB_4) ? 2 : 1; rows = ((cr & AT91RM92_SDRAMC_CR_NR_MASK) >> 2) + 11; cols = (cr & AT91RM92_SDRAMC_CR_NC_MASK) + 8; bw = (mr & AT91RM92_SDRAMC_MR_DBW_16) ? 1 : 2; } else if (at91_cpu_is(AT91_T_SAM9G45)) { SDRAMC = (uint32_t *)(AT91_BASE + AT91SAM9G45_DDRSDRC0_BASE); cr = SDRAMC[AT91SAM9G45_DDRSDRC_CR / 4]; mdr = SDRAMC[AT91SAM9G45_DDRSDRC_MDR / 4]; banks = 0; rows = ((cr & AT91SAM9G45_DDRSDRC_CR_NR_MASK) >> 2) + 11; cols = (cr & AT91SAM9G45_DDRSDRC_CR_NC_MASK) + 8; bw = (mdr & AT91SAM9G45_DDRSDRC_MDR_DBW_16) ? 1 : 2; /* Fix the calculation for DDR memory */ mdr &= AT91SAM9G45_DDRSDRC_MDR_MASK; if (mdr & AT91SAM9G45_DDRSDRC_MDR_LPDDR1 || mdr & AT91SAM9G45_DDRSDRC_MDR_DDR2) { /* The cols value is 1 higher for DDR */ cols += 1; /* DDR has 4 internal banks. */ banks = 2; } } else { /* * This should be good for the 9260, 9261, 9G20, 9G35 and 9X25 * as addresses and registers are the same. */ SDRAMC = (uint32_t *)(AT91_BASE + AT91SAM9G20_SDRAMC_BASE); cr = SDRAMC[AT91SAM9G20_SDRAMC_CR / 4]; mr = SDRAMC[AT91SAM9G20_SDRAMC_MR / 4]; banks = (cr & AT91SAM9G20_SDRAMC_CR_NB_4) ? 2 : 1; rows = ((cr & AT91SAM9G20_SDRAMC_CR_NR_MASK) >> 2) + 11; cols = (cr & AT91SAM9G20_SDRAMC_CR_NC_MASK) + 8; bw = (cr & AT91SAM9G20_SDRAMC_CR_DBW_16) ? 1 : 2; } return (1 << (cols + rows + banks + bw)); } static const char *soc_type_name[] = { [AT91_T_CAP9] = "at91cap9", [AT91_T_RM9200] = "at91rm9200", [AT91_T_SAM9260] = "at91sam9260", [AT91_T_SAM9261] = "at91sam9261", [AT91_T_SAM9263] = "at91sam9263", [AT91_T_SAM9G10] = "at91sam9g10", [AT91_T_SAM9G20] = "at91sam9g20", [AT91_T_SAM9G45] = "at91sam9g45", [AT91_T_SAM9N12] = "at91sam9n12", [AT91_T_SAM9RL] = "at91sam9rl", [AT91_T_SAM9X5] = "at91sam9x5", [AT91_T_NONE] = "UNKNOWN" }; static const char *soc_subtype_name[] = { [AT91_ST_NONE] = "UNKNOWN", [AT91_ST_RM9200_BGA] = "at91rm9200_bga", [AT91_ST_RM9200_PQFP] = "at91rm9200_pqfp", [AT91_ST_SAM9XE] = "at91sam9xe", [AT91_ST_SAM9G45] = "at91sam9g45", [AT91_ST_SAM9M10] = "at91sam9m10", [AT91_ST_SAM9G46] = "at91sam9g46", [AT91_ST_SAM9M11] = "at91sam9m11", [AT91_ST_SAM9G15] = "at91sam9g15", [AT91_ST_SAM9G25] = "at91sam9g25", [AT91_ST_SAM9G35] = "at91sam9g35", [AT91_ST_SAM9X25] = "at91sam9x25", [AT91_ST_SAM9X35] = "at91sam9x35", }; struct at91_soc_info soc_info; /* * Read the SoC ID from the CIDR register and try to match it against the * values we know. If we find a good one, we return true. If not, we * return false. When we find a good one, we also find the subtype * and CPU family. */ static int at91_try_id(uint32_t dbgu_base) { uint32_t socid; soc_info.cidr = *(volatile uint32_t *)(AT91_BASE + dbgu_base + DBGU_C1R); socid = soc_info.cidr & ~AT91_CPU_VERSION_MASK; soc_info.type = AT91_T_NONE; soc_info.subtype = AT91_ST_NONE; soc_info.family = (soc_info.cidr & AT91_CPU_FAMILY_MASK) >> 20; soc_info.exid = *(volatile uint32_t *)(AT91_BASE + dbgu_base + DBGU_C2R); switch (socid) { case AT91_CPU_CAP9: soc_info.type = AT91_T_CAP9; break; case AT91_CPU_RM9200: soc_info.type = AT91_T_RM9200; break; case AT91_CPU_SAM9XE128: case AT91_CPU_SAM9XE256: case AT91_CPU_SAM9XE512: case AT91_CPU_SAM9260: soc_info.type = AT91_T_SAM9260; if (soc_info.family == AT91_FAMILY_SAM9XE) soc_info.subtype = AT91_ST_SAM9XE; break; case AT91_CPU_SAM9261: soc_info.type = AT91_T_SAM9261; break; case AT91_CPU_SAM9263: soc_info.type = AT91_T_SAM9263; break; case AT91_CPU_SAM9G10: soc_info.type = AT91_T_SAM9G10; break; case AT91_CPU_SAM9G20: soc_info.type = AT91_T_SAM9G20; break; case AT91_CPU_SAM9G45: soc_info.type = AT91_T_SAM9G45; break; case AT91_CPU_SAM9N12: soc_info.type = AT91_T_SAM9N12; break; case AT91_CPU_SAM9RL64: soc_info.type = AT91_T_SAM9RL; break; case AT91_CPU_SAM9X5: soc_info.type = AT91_T_SAM9X5; break; default: return (0); } switch (soc_info.type) { case AT91_T_SAM9G45: switch (soc_info.exid) { case AT91_EXID_SAM9G45: soc_info.subtype = AT91_ST_SAM9G45; break; case AT91_EXID_SAM9G46: soc_info.subtype = AT91_ST_SAM9G46; break; case AT91_EXID_SAM9M10: soc_info.subtype = AT91_ST_SAM9M10; break; case AT91_EXID_SAM9M11: soc_info.subtype = AT91_ST_SAM9M11; break; } break; case AT91_T_SAM9X5: switch (soc_info.exid) { case AT91_EXID_SAM9G15: soc_info.subtype = AT91_ST_SAM9G15; break; case AT91_EXID_SAM9G25: soc_info.subtype = AT91_ST_SAM9G25; break; case AT91_EXID_SAM9G35: soc_info.subtype = AT91_ST_SAM9G35; break; case AT91_EXID_SAM9X25: soc_info.subtype = AT91_ST_SAM9X25; break; case AT91_EXID_SAM9X35: soc_info.subtype = AT91_ST_SAM9X35; break; } break; default: break; } /* * Disable interrupts in the DBGU unit... */ *(volatile uint32_t *)(AT91_BASE + dbgu_base + USART_IDR) = 0xffffffff; /* * Save the name for later... */ snprintf(soc_info.name, sizeof(soc_info.name), "%s%s%s", soc_type_name[soc_info.type], soc_info.subtype == AT91_ST_NONE ? "" : " subtype ", soc_info.subtype == AT91_ST_NONE ? "" : soc_subtype_name[soc_info.subtype]); /* * try to get the matching CPU support. */ soc_info.soc_data = at91_match_soc(soc_info.type, soc_info.subtype); soc_info.dbgu_base = AT91_BASE + dbgu_base; return (1); } void at91_soc_id(void) { if (!at91_try_id(AT91_DBGU0)) at91_try_id(AT91_DBGU1); } #ifdef ARM_MANY_BOARD /* likely belongs in arm/arm/machdep.c, but since board_init is still at91 only... */ SET_DECLARE(arm_board_set, const struct arm_board); /* Not yet fully functional, but enough to build ATMEL config */ static long board_init(void) { return -1; } #endif #ifndef FDT /* Physical and virtual addresses for some global pages */ struct pv_addr msgbufpv; struct pv_addr kernelstack; struct pv_addr systempage; struct pv_addr irqstack; struct pv_addr abtstack; struct pv_addr undstack; void * initarm(struct arm_boot_params *abp) { struct pv_addr kernel_l1pt; struct pv_addr dpcpu; int i; u_int l1pagetable; vm_offset_t freemempos; vm_offset_t afterkern; uint32_t memsize; vm_offset_t lastaddr; lastaddr = parse_boot_param(abp); arm_physmem_kernaddr = abp->abp_physaddr; set_cpufuncs(); pcpu0_init(); /* Do basic tuning, hz etc */ init_param1(); freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK; /* Define a macro to simplify memory allocation */ #define valloc_pages(var, np) \ alloc_pages((var).pv_va, (np)); \ (var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR); #define alloc_pages(var, np) \ (var) = freemempos; \ freemempos += (np * PAGE_SIZE); \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) freemempos += PAGE_SIZE; valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); for (i = 0; i < NUM_KERNEL_PTS; ++i) { if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { valloc_pages(kernel_pt_table[i], L2_TABLE_SIZE / PAGE_SIZE); } else { kernel_pt_table[i].pv_va = freemempos - (i % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) * L2_TABLE_SIZE_REAL; kernel_pt_table[i].pv_pa = kernel_pt_table[i].pv_va - KERNVIRTADDR + abp->abp_physaddr; } } /* * Allocate a page for the system page mapped to 0x00000000 * or 0xffff0000. This page will just contain the system vectors * and can be shared by all processes. */ valloc_pages(systempage, 1); /* Allocate dynamic per-cpu area. */ valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu.pv_va, 0); /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU); valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU); valloc_pages(undstack, UND_STACK_SIZE * MAXCPU); valloc_pages(kernelstack, kstack_pages * MAXCPU); valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); /* * Now we start construction of the L1 page table * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary */ l1pagetable = kernel_l1pt.pv_va; /* Map the L2 pages tables in the L1 page table */ pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH, &kernel_pt_table[KERNEL_PT_SYS]); for (i = 0; i < KERNEL_PT_KERN_NUM; i++) pmap_link_l2pt(l1pagetable, KERNBASE + i * L1_S_SIZE, &kernel_pt_table[KERNEL_PT_KERN + i]); pmap_map_chunk(l1pagetable, KERNBASE, PHYSADDR, rounddown2(((uint32_t)lastaddr - KERNBASE) + PAGE_SIZE, PAGE_SIZE), VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); afterkern = round_page(rounddown2(lastaddr + L1_S_SIZE, L1_S_SIZE)); for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) { pmap_link_l2pt(l1pagetable, afterkern + i * L1_S_SIZE, &kernel_pt_table[KERNEL_PT_AFKERNEL + i]); } /* Map the vector page. */ pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map the DPCPU pages */ pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, DPCPU_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map the stack pages */ pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa, IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa, ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa, UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa, kstack_pages * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); pmap_map_chunk(l1pagetable, msgbufpv.pv_va, msgbufpv.pv_pa, msgbufsize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); for (i = 0; i < NUM_KERNEL_PTS; ++i) { pmap_map_chunk(l1pagetable, kernel_pt_table[i].pv_va, kernel_pt_table[i].pv_pa, L2_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); } devmap_bootstrap(l1pagetable, at91_devmap); cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT); cpu_setttb(kernel_l1pt.pv_pa); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)); at91_soc_id(); /* * Initialize all the clocks, so that the console can work. We can only * do this if at91_soc_id() was able to fill in the support data. Even * if we can't init the clocks, still try to do a console init so we can * try to print the error message about missing soc support. There's a * chance the printf will work if the bootloader set up the DBGU. */ if (soc_info.soc_data != NULL) { soc_info.soc_data->soc_clock_init(); at91_pmc_init_clock(); } cninit(); if (soc_info.soc_data == NULL) printf("Warning: No soc support for %s found.\n", soc_info.name); memsize = board_init(); if (memsize == -1) { printf("board_init() failed, cannot determine ram size; " "assuming 16MB\n"); memsize = 16 * 1024 * 1024; } /* Enable MMU (set SCTLR), and do other cpu-specific setup. */ cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE); cpu_setup(); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ set_stackptrs(0); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in cpu_setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross relocations of the kernel thus * this problem will not occur after initarm(). */ cpu_idcache_wbinv_all(); undefined_init(); init_proc0(kernelstack.pv_va); arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); pmap_curmaxkvaddr = afterkern + L1_S_SIZE * (KERNEL_PT_KERN_NUM - 1); /* Always use the 256MB of KVA we have available between the kernel and devices */ vm_max_kernel_address = KERNVIRTADDR + (256 << 20); pmap_bootstrap(freemempos, &kernel_l1pt); msgbufp = (void*)msgbufpv.pv_va; msgbufinit(msgbufp, msgbufsize); mutex_init(); /* * Add the physical ram we have available. * * Exclude the kernel, and all the things we allocated which immediately * follow the kernel, from the VM allocation pool but not from crash * dumps. virtual_avail is a global variable which tracks the kva we've * "allocated" while setting up pmaps. * * Prepare the list of physical memory available to the vm subsystem. */ arm_physmem_hardware_region(PHYSADDR, memsize); arm_physmem_exclude_region(abp->abp_physaddr, virtual_avail - KERNVIRTADDR, EXFLAG_NOALLOC); arm_physmem_init_kernel_globals(); init_param2(physmem); kdb_init(); return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - sizeof(struct pcb))); } #endif /* * These functions are handled elsewhere, so make them nops here. */ void cpu_startprofclock(void) { } void cpu_stopprofclock(void) { } void cpu_initclocks(void) { } void DELAY(int n) { + TSENTER(); if (soc_info.soc_data) soc_info.soc_data->soc_delay(n); + TSEXIT(); } void cpu_reset(void) { if (soc_info.soc_data) soc_info.soc_data->soc_reset(); while (1) continue; } Index: head/sys/arm/cavium/cns11xx/timer.c =================================================================== --- head/sys/arm/cavium/cns11xx/timer.c (revision 327431) +++ head/sys/arm/cavium/cns11xx/timer.c (revision 327432) @@ -1,381 +1,382 @@ /*- * Copyright (c) 2009 Yohanes Nugroho . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include "econa_reg.h" #include "econa_var.h" #define INITIAL_TIMECOUNTER (0xffffffff) static int timers_initialized = 0; #define HZ 100 extern unsigned int CPU_clock; extern unsigned int AHB_clock; extern unsigned int APB_clock; static unsigned long timer_counter = 0; struct ec_timer_softc { struct resource * timer_res[3]; bus_space_tag_t timer_bst; bus_space_handle_t timer_bsh; struct mtx timer_mtx; }; static struct resource_spec ec_timer_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { SYS_RES_IRQ, 1, RF_ACTIVE }, { -1, 0 } }; static unsigned ec_timer_get_timecount(struct timecounter *); static struct timecounter ec_timecounter = { .tc_get_timecount = ec_timer_get_timecount, .tc_name = "CPU Timer", /* This is assigned on the fly in the init sequence */ .tc_frequency = 0, .tc_counter_mask = ~0u, .tc_quality = 1000, }; static struct ec_timer_softc *timer_softc = NULL; static inline void write_4(unsigned int val, unsigned int addr) { bus_space_write_4(timer_softc->timer_bst, timer_softc->timer_bsh, addr, val); } static inline unsigned int read_4(unsigned int addr) { return bus_space_read_4(timer_softc->timer_bst, timer_softc->timer_bsh, addr); } #define uSECS_PER_TICK (1000000 / APB_clock) #define TICKS2USECS(x) ((x) * uSECS_PER_TICK) static unsigned read_timer_counter_noint(void) { arm_mask_irq(0); unsigned int v = read_4(TIMER_TM1_COUNTER_REG); arm_unmask_irq(0); return v; } void DELAY(int usec) { uint32_t val, val_temp; int nticks; if (!timers_initialized) { for (; usec > 0; usec--) for (val = 100; val > 0; val--) ; return; } + TSENTER(); val = read_timer_counter_noint(); nticks = (((APB_clock / 1000) * usec) / 1000) + 100; while (nticks > 0) { val_temp = read_timer_counter_noint(); if (val > val_temp) nticks -= (val - val_temp); else nticks -= (val + (timer_counter - val_temp)); val = val_temp; } - + TSEXIT(); } /* * Setup timer */ static inline void setup_timer(unsigned int counter_value) { unsigned int control_value; unsigned int mask_value; control_value = read_4(TIMER_TM_CR_REG); mask_value = read_4(TIMER_TM_INTR_MASK_REG); write_4(counter_value, TIMER_TM1_COUNTER_REG); write_4(counter_value, TIMER_TM1_LOAD_REG); write_4(0, TIMER_TM1_MATCH1_REG); write_4(0,TIMER_TM1_MATCH2_REG); control_value &= ~(TIMER1_CLOCK_SOURCE); control_value |= TIMER1_UP_DOWN_COUNT; write_4(0, TIMER_TM2_COUNTER_REG); write_4(0, TIMER_TM2_LOAD_REG); write_4(~0u, TIMER_TM2_MATCH1_REG); write_4(~0u,TIMER_TM2_MATCH2_REG); control_value &= ~(TIMER2_CLOCK_SOURCE); control_value &= ~(TIMER2_UP_DOWN_COUNT); mask_value &= ~(63); write_4(control_value, TIMER_TM_CR_REG); write_4(mask_value, TIMER_TM_INTR_MASK_REG); } /* * Enable timer */ static inline void timer_enable(void) { unsigned int control_value; control_value = read_4(TIMER_TM_CR_REG); control_value |= TIMER1_OVERFLOW_ENABLE; control_value |= TIMER1_ENABLE; control_value |= TIMER2_OVERFLOW_ENABLE; control_value |= TIMER2_ENABLE; write_4(control_value, TIMER_TM_CR_REG); } static inline unsigned int read_second_timer_counter(void) { return read_4(TIMER_TM2_COUNTER_REG); } /* * Get timer interrupt status */ static inline unsigned int read_timer_interrupt_status(void) { return read_4(TIMER_TM_INTR_STATUS_REG); } /* * Clear timer interrupt status */ static inline void clear_timer_interrupt_status(unsigned int irq) { unsigned int interrupt_status; interrupt_status = read_4(TIMER_TM_INTR_STATUS_REG); if (irq == 0) { if (interrupt_status & (TIMER1_MATCH1_INTR)) interrupt_status &= ~(TIMER1_MATCH1_INTR); if (interrupt_status & (TIMER1_MATCH2_INTR)) interrupt_status &= ~(TIMER1_MATCH2_INTR); if (interrupt_status & (TIMER1_OVERFLOW_INTR)) interrupt_status &= ~(TIMER1_OVERFLOW_INTR); } if (irq == 1) { if (interrupt_status & (TIMER2_MATCH1_INTR)) interrupt_status &= ~(TIMER2_MATCH1_INTR); if (interrupt_status & (TIMER2_MATCH2_INTR)) interrupt_status &= ~(TIMER2_MATCH2_INTR); if (interrupt_status & (TIMER2_OVERFLOW_INTR)) interrupt_status &= ~(TIMER2_OVERFLOW_INTR); } write_4(interrupt_status, TIMER_TM_INTR_STATUS_REG); } static unsigned ec_timer_get_timecount(struct timecounter *a) { unsigned int ticks1; arm_mask_irq(1); ticks1 = read_second_timer_counter(); arm_unmask_irq(1); return ticks1; } /* * Setup timer */ static inline void do_setup_timer(void) { timer_counter = APB_clock/HZ; /* * setup timer-related values */ setup_timer(timer_counter); } void cpu_initclocks(void) { ec_timecounter.tc_frequency = APB_clock; tc_init(&ec_timecounter); timer_enable(); timers_initialized = 1; } void cpu_startprofclock(void) { } void cpu_stopprofclock(void) { } static int ec_timer_probe(device_t dev) { device_set_desc(dev, "Econa CPU Timer"); return (0); } static int ec_reset(void *arg) { arm_mask_irq(1); clear_timer_interrupt_status(1); arm_unmask_irq(1); return (FILTER_HANDLED); } static int ec_hardclock(void *arg) { struct trapframe *frame; unsigned int val; /*clear timer interrupt status*/ arm_mask_irq(0); val = read_4(TIMER_INTERRUPT_STATUS_REG); val &= ~(TIMER1_OVERFLOW_INTERRUPT); write_4(val, TIMER_INTERRUPT_STATUS_REG); frame = (struct trapframe *)arg; hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); arm_unmask_irq(0); return (FILTER_HANDLED); } static int ec_timer_attach(device_t dev) { struct ec_timer_softc *sc; int error; void *ihl; if (timer_softc != NULL) return (ENXIO); sc = (struct ec_timer_softc *)device_get_softc(dev); timer_softc = sc; error = bus_alloc_resources(dev, ec_timer_spec, sc->timer_res); if (error) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->timer_bst = rman_get_bustag(sc->timer_res[0]); sc->timer_bsh = rman_get_bushandle(sc->timer_res[0]); do_setup_timer(); if (bus_setup_intr(dev, sc->timer_res[1], INTR_TYPE_CLK, ec_hardclock, NULL, NULL, &ihl) != 0) { bus_release_resources(dev, ec_timer_spec, sc->timer_res); device_printf(dev, "could not setup hardclock interrupt\n"); return (ENXIO); } if (bus_setup_intr(dev, sc->timer_res[2], INTR_TYPE_CLK, ec_reset, NULL, NULL, &ihl) != 0) { bus_release_resources(dev, ec_timer_spec, sc->timer_res); device_printf(dev, "could not setup timer interrupt\n"); return (ENXIO); } return (0); } static device_method_t ec_timer_methods[] = { DEVMETHOD(device_probe, ec_timer_probe), DEVMETHOD(device_attach, ec_timer_attach), { 0, 0 } }; static driver_t ec_timer_driver = { "timer", ec_timer_methods, sizeof(struct ec_timer_softc), }; static devclass_t ec_timer_devclass; DRIVER_MODULE(timer, econaarm, ec_timer_driver, ec_timer_devclass, 0, 0); Index: head/sys/arm/lpc/lpc_timer.c =================================================================== --- head/sys/arm/lpc/lpc_timer.c (revision 327431) +++ head/sys/arm/lpc/lpc_timer.c (revision 327432) @@ -1,307 +1,309 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011 Jakub Wojciech Klama * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct lpc_timer_softc { device_t lt_dev; struct eventtimer lt_et; struct resource * lt_res[5]; bus_space_tag_t lt_bst0; bus_space_handle_t lt_bsh0; bus_space_tag_t lt_bst1; bus_space_handle_t lt_bsh1; int lt_oneshot; uint32_t lt_period; }; static struct resource_spec lpc_timer_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_MEMORY, 1, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { SYS_RES_IRQ, 1, RF_ACTIVE }, { -1, 0 } }; static struct lpc_timer_softc *timer_softc = NULL; static int lpc_timer_initialized = 0; static int lpc_timer_probe(device_t); static int lpc_timer_attach(device_t); static int lpc_timer_start(struct eventtimer *, sbintime_t first, sbintime_t period); static int lpc_timer_stop(struct eventtimer *et); static unsigned lpc_get_timecount(struct timecounter *); static int lpc_hardclock(void *); #define timer0_read_4(sc, reg) \ bus_space_read_4(sc->lt_bst0, sc->lt_bsh0, reg) #define timer0_write_4(sc, reg, val) \ bus_space_write_4(sc->lt_bst0, sc->lt_bsh0, reg, val) #define timer0_clear(sc) \ do { \ timer0_write_4(sc, LPC_TIMER_TC, 0); \ timer0_write_4(sc, LPC_TIMER_PR, 0); \ timer0_write_4(sc, LPC_TIMER_PC, 0); \ } while(0) #define timer1_read_4(sc, reg) \ bus_space_read_4(sc->lt_bst1, sc->lt_bsh1, reg) #define timer1_write_4(sc, reg, val) \ bus_space_write_4(sc->lt_bst1, sc->lt_bsh1, reg, val) #define timer1_clear(sc) \ do { \ timer1_write_4(sc, LPC_TIMER_TC, 0); \ timer1_write_4(sc, LPC_TIMER_PR, 0); \ timer1_write_4(sc, LPC_TIMER_PC, 0); \ } while(0) static struct timecounter lpc_timecounter = { .tc_get_timecount = lpc_get_timecount, .tc_name = "LPC32x0 Timer1", .tc_frequency = 0, /* will be filled later */ .tc_counter_mask = ~0u, .tc_quality = 1000, }; static int lpc_timer_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "lpc,timer")) return (ENXIO); device_set_desc(dev, "LPC32x0 timer"); return (BUS_PROBE_DEFAULT); } static int lpc_timer_attach(device_t dev) { void *intrcookie; struct lpc_timer_softc *sc = device_get_softc(dev); phandle_t node; uint32_t freq; if (timer_softc) return (ENXIO); timer_softc = sc; if (bus_alloc_resources(dev, lpc_timer_spec, sc->lt_res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->lt_bst0 = rman_get_bustag(sc->lt_res[0]); sc->lt_bsh0 = rman_get_bushandle(sc->lt_res[0]); sc->lt_bst1 = rman_get_bustag(sc->lt_res[1]); sc->lt_bsh1 = rman_get_bushandle(sc->lt_res[1]); if (bus_setup_intr(dev, sc->lt_res[2], INTR_TYPE_CLK, lpc_hardclock, NULL, sc, &intrcookie)) { device_printf(dev, "could not setup interrupt handler\n"); bus_release_resources(dev, lpc_timer_spec, sc->lt_res); return (ENXIO); } /* Enable timer clock */ lpc_pwr_write(dev, LPC_CLKPWR_TIMCLK_CTRL1, LPC_CLKPWR_TIMCLK_CTRL1_TIMER0 | LPC_CLKPWR_TIMCLK_CTRL1_TIMER1); /* Get PERIPH_CLK encoded in parent bus 'bus-frequency' property */ node = ofw_bus_get_node(dev); if (OF_getencprop(OF_parent(node), "bus-frequency", &freq, sizeof(pcell_t)) <= 0) { bus_release_resources(dev, lpc_timer_spec, sc->lt_res); bus_teardown_intr(dev, sc->lt_res[2], intrcookie); device_printf(dev, "could not obtain base clock frequency\n"); return (ENXIO); } /* Set desired frequency in event timer and timecounter */ sc->lt_et.et_frequency = (uint64_t)freq; lpc_timecounter.tc_frequency = (uint64_t)freq; sc->lt_et.et_name = "LPC32x0 Timer0"; sc->lt_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT; sc->lt_et.et_quality = 1000; sc->lt_et.et_min_period = (0x00000002LLU << 32) / sc->lt_et.et_frequency; sc->lt_et.et_max_period = (0xfffffffeLLU << 32) / sc->lt_et.et_frequency; sc->lt_et.et_start = lpc_timer_start; sc->lt_et.et_stop = lpc_timer_stop; sc->lt_et.et_priv = sc; et_register(&sc->lt_et); tc_init(&lpc_timecounter); /* Reset and enable timecounter */ timer1_write_4(sc, LPC_TIMER_TCR, LPC_TIMER_TCR_RESET); timer1_write_4(sc, LPC_TIMER_TCR, 0); timer1_clear(sc); timer1_write_4(sc, LPC_TIMER_TCR, LPC_TIMER_TCR_ENABLE); /* DELAY() now can work properly */ lpc_timer_initialized = 1; return (0); } static int lpc_timer_start(struct eventtimer *et, sbintime_t first, sbintime_t period) { struct lpc_timer_softc *sc = (struct lpc_timer_softc *)et->et_priv; uint32_t ticks; if (period == 0) { sc->lt_oneshot = 1; sc->lt_period = 0; } else { sc->lt_oneshot = 0; sc->lt_period = ((uint32_t)et->et_frequency * period) >> 32; } if (first == 0) ticks = sc->lt_period; else ticks = ((uint32_t)et->et_frequency * first) >> 32; /* Reset timer */ timer0_write_4(sc, LPC_TIMER_TCR, LPC_TIMER_TCR_RESET); timer0_write_4(sc, LPC_TIMER_TCR, 0); /* Start timer */ timer0_clear(sc); timer0_write_4(sc, LPC_TIMER_MR0, ticks); timer0_write_4(sc, LPC_TIMER_MCR, LPC_TIMER_MCR_MR0I | LPC_TIMER_MCR_MR0S); timer0_write_4(sc, LPC_TIMER_TCR, LPC_TIMER_TCR_ENABLE); return (0); } static int lpc_timer_stop(struct eventtimer *et) { struct lpc_timer_softc *sc = (struct lpc_timer_softc *)et->et_priv; timer0_write_4(sc, LPC_TIMER_TCR, 0); return (0); } static device_method_t lpc_timer_methods[] = { DEVMETHOD(device_probe, lpc_timer_probe), DEVMETHOD(device_attach, lpc_timer_attach), { 0, 0 } }; static driver_t lpc_timer_driver = { "timer", lpc_timer_methods, sizeof(struct lpc_timer_softc), }; static devclass_t lpc_timer_devclass; DRIVER_MODULE(timer, simplebus, lpc_timer_driver, lpc_timer_devclass, 0, 0); static int lpc_hardclock(void *arg) { struct lpc_timer_softc *sc = (struct lpc_timer_softc *)arg; /* Reset pending interrupt */ timer0_write_4(sc, LPC_TIMER_IR, 0xffffffff); /* Start timer again */ if (!sc->lt_oneshot) { timer0_clear(sc); timer0_write_4(sc, LPC_TIMER_MR0, sc->lt_period); timer0_write_4(sc, LPC_TIMER_TCR, LPC_TIMER_TCR_ENABLE); } if (sc->lt_et.et_active) sc->lt_et.et_event_cb(&sc->lt_et, sc->lt_et.et_arg); return (FILTER_HANDLED); } static unsigned lpc_get_timecount(struct timecounter *tc) { return timer1_read_4(timer_softc, LPC_TIMER_TC); } void DELAY(int usec) { uint32_t counter; uint32_t first, last; int val = (lpc_timecounter.tc_frequency / 1000000 + 1) * usec; /* Timer is not initialized yet */ if (!lpc_timer_initialized) { for (; usec > 0; usec--) for (counter = 100; counter > 0; counter--) ; return; } + TSENTER(); first = lpc_get_timecount(&lpc_timecounter); while (val > 0) { last = lpc_get_timecount(&lpc_timecounter); if (last < first) { /* Timer rolled over */ last = first; } val -= (last - first); first = last; } + TSEXIT(); } Index: head/sys/arm/mv/timer.c =================================================================== --- head/sys/arm/mv/timer.c (revision 327431) +++ head/sys/arm/mv/timer.c (revision 327432) @@ -1,498 +1,500 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Benno Rice. * Copyright (C) 2007-2008 MARVELL INTERNATIONAL LTD. * All rights reserved. * * Adapted to Marvell SoC by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: FreeBSD: //depot/projects/arm/src/sys/arm/xscale/pxa2x0/pxa2x0_timer.c, rev 1 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define INITIAL_TIMECOUNTER (0xffffffff) #define MAX_WATCHDOG_TICKS (0xffffffff) #define MV_TMR 0x1 #define MV_WDT 0x2 #define MV_NONE 0x0 #if defined(SOC_MV_ARMADAXP) || defined(SOC_MV_ARMADA38X) #define MV_CLOCK_SRC 25000000 /* Timers' 25MHz mode */ #else #define MV_CLOCK_SRC get_tclk() #endif #if defined(SOC_MV_ARMADA38X) #define WATCHDOG_TIMER 4 #else #define WATCHDOG_TIMER 2 #endif struct mv_timer_softc { struct resource * timer_res[2]; bus_space_tag_t timer_bst; bus_space_handle_t timer_bsh; struct mtx timer_mtx; struct eventtimer et; boolean_t has_wdt; }; static struct resource_spec mv_timer_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_OPTIONAL }, { -1, 0 } }; /* Interrupt is not required by MV_WDT devices */ static struct ofw_compat_data mv_timer_compat[] = { {"mrvl,timer", MV_TMR | MV_WDT }, {"marvell,armada-380-wdt", MV_WDT }, {NULL, MV_NONE } }; static struct mv_timer_softc *timer_softc = NULL; static int timers_initialized = 0; static int mv_timer_probe(device_t); static int mv_timer_attach(device_t); static int mv_hardclock(void *); static unsigned mv_timer_get_timecount(struct timecounter *); static uint32_t mv_get_timer_control(void); static void mv_set_timer_control(uint32_t); static uint32_t mv_get_timer(uint32_t); static void mv_set_timer(uint32_t, uint32_t); static void mv_set_timer_rel(uint32_t, uint32_t); static void mv_watchdog_enable(void); static void mv_watchdog_disable(void); static void mv_watchdog_event(void *, unsigned int, int *); static int mv_timer_start(struct eventtimer *et, sbintime_t first, sbintime_t period); static int mv_timer_stop(struct eventtimer *et); static void mv_setup_timers(void); static struct timecounter mv_timer_timecounter = { .tc_get_timecount = mv_timer_get_timecount, .tc_name = "CPUTimer1", .tc_frequency = 0, /* This is assigned on the fly in the init sequence */ .tc_counter_mask = ~0u, .tc_quality = 1000, }; static int mv_timer_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, mv_timer_compat)->ocd_data == MV_NONE) return (ENXIO); device_set_desc(dev, "Marvell CPU Timer"); return (0); } static int mv_timer_attach(device_t dev) { int error; void *ihl; struct mv_timer_softc *sc; #if !defined(SOC_MV_ARMADAXP) && !defined(SOC_MV_ARMADA38X) uint32_t irq_cause, irq_mask; #endif if (timer_softc != NULL) return (ENXIO); sc = (struct mv_timer_softc *)device_get_softc(dev); timer_softc = sc; error = bus_alloc_resources(dev, mv_timer_spec, sc->timer_res); if (error) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->timer_bst = rman_get_bustag(sc->timer_res[0]); sc->timer_bsh = rman_get_bushandle(sc->timer_res[0]); sc->has_wdt = ofw_bus_has_prop(dev, "mrvl,has-wdt") || ofw_bus_is_compatible(dev, "marvell,armada-380-wdt"); mtx_init(&timer_softc->timer_mtx, "watchdog", NULL, MTX_DEF); if (sc->has_wdt) { mv_watchdog_disable(); EVENTHANDLER_REGISTER(watchdog_list, mv_watchdog_event, sc, 0); } if (ofw_bus_search_compatible(dev, mv_timer_compat)->ocd_data == MV_WDT) { /* Don't set timers for wdt-only entry. */ device_printf(dev, "only watchdog attached\n"); return (0); } else if (sc->timer_res[1] == NULL) { device_printf(dev, "no interrupt resource\n"); bus_release_resources(dev, mv_timer_spec, sc->timer_res); return (ENXIO); } if (bus_setup_intr(dev, sc->timer_res[1], INTR_TYPE_CLK, mv_hardclock, NULL, sc, &ihl) != 0) { bus_release_resources(dev, mv_timer_spec, sc->timer_res); device_printf(dev, "Could not setup interrupt.\n"); return (ENXIO); } mv_setup_timers(); #if !defined(SOC_MV_ARMADAXP) && !defined(SOC_MV_ARMADA38X) irq_cause = read_cpu_ctrl(BRIDGE_IRQ_CAUSE); irq_cause &= IRQ_TIMER0_CLR; write_cpu_ctrl(BRIDGE_IRQ_CAUSE, irq_cause); irq_mask = read_cpu_ctrl(BRIDGE_IRQ_MASK); irq_mask |= IRQ_TIMER0_MASK; irq_mask &= ~IRQ_TIMER1_MASK; write_cpu_ctrl(BRIDGE_IRQ_MASK, irq_mask); #endif sc->et.et_name = "CPUTimer0"; sc->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT; sc->et.et_quality = 1000; sc->et.et_frequency = MV_CLOCK_SRC; sc->et.et_min_period = (0x00000002LLU << 32) / sc->et.et_frequency; sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency; sc->et.et_start = mv_timer_start; sc->et.et_stop = mv_timer_stop; sc->et.et_priv = sc; et_register(&sc->et); mv_timer_timecounter.tc_frequency = MV_CLOCK_SRC; tc_init(&mv_timer_timecounter); return (0); } static int mv_hardclock(void *arg) { struct mv_timer_softc *sc; uint32_t irq_cause; irq_cause = read_cpu_ctrl(BRIDGE_IRQ_CAUSE); irq_cause &= IRQ_TIMER0_CLR; write_cpu_ctrl(BRIDGE_IRQ_CAUSE, irq_cause); sc = (struct mv_timer_softc *)arg; if (sc->et.et_active) sc->et.et_event_cb(&sc->et, sc->et.et_arg); return (FILTER_HANDLED); } static device_method_t mv_timer_methods[] = { DEVMETHOD(device_probe, mv_timer_probe), DEVMETHOD(device_attach, mv_timer_attach), { 0, 0 } }; static driver_t mv_timer_driver = { "timer", mv_timer_methods, sizeof(struct mv_timer_softc), }; static devclass_t mv_timer_devclass; DRIVER_MODULE(timer, simplebus, mv_timer_driver, mv_timer_devclass, 0, 0); static unsigned mv_timer_get_timecount(struct timecounter *tc) { return (INITIAL_TIMECOUNTER - mv_get_timer(1)); } void DELAY(int usec) { uint32_t val, val_temp; int32_t nticks; if (!timers_initialized) { for (; usec > 0; usec--) for (val = 100; val > 0; val--) __asm __volatile("nop" ::: "memory"); return; } + TSENTER(); val = mv_get_timer(1); nticks = ((MV_CLOCK_SRC / 1000000 + 1) * usec); while (nticks > 0) { val_temp = mv_get_timer(1); if (val > val_temp) nticks -= (val - val_temp); else nticks -= (val + (INITIAL_TIMECOUNTER - val_temp)); val = val_temp; } + TSEXIT(); } static uint32_t mv_get_timer_control(void) { return (bus_space_read_4(timer_softc->timer_bst, timer_softc->timer_bsh, CPU_TIMER_CONTROL)); } static void mv_set_timer_control(uint32_t val) { bus_space_write_4(timer_softc->timer_bst, timer_softc->timer_bsh, CPU_TIMER_CONTROL, val); } static uint32_t mv_get_timer(uint32_t timer) { return (bus_space_read_4(timer_softc->timer_bst, timer_softc->timer_bsh, CPU_TIMER0 + timer * 0x8)); } static void mv_set_timer(uint32_t timer, uint32_t val) { bus_space_write_4(timer_softc->timer_bst, timer_softc->timer_bsh, CPU_TIMER0 + timer * 0x8, val); } static void mv_set_timer_rel(uint32_t timer, uint32_t val) { bus_space_write_4(timer_softc->timer_bst, timer_softc->timer_bsh, CPU_TIMER0_REL + timer * 0x8, val); } static void mv_watchdog_enable(void) { uint32_t val, irq_cause; #if !defined(SOC_MV_ARMADAXP) && !defined(SOC_MV_ARMADA38X) uint32_t irq_mask; #endif irq_cause = read_cpu_ctrl(BRIDGE_IRQ_CAUSE); irq_cause &= IRQ_TIMER_WD_CLR; write_cpu_ctrl(BRIDGE_IRQ_CAUSE, irq_cause); #if defined(SOC_MV_ARMADAXP) || defined(SOC_MV_ARMADA38X) val = read_cpu_mp_clocks(WD_RSTOUTn_MASK); val |= (WD_GLOBAL_MASK | WD_CPU0_MASK); write_cpu_mp_clocks(WD_RSTOUTn_MASK, val); val = read_cpu_misc(RSTOUTn_MASK); val &= ~RSTOUTn_MASK_WD; write_cpu_misc(RSTOUTn_MASK, val); #else irq_mask = read_cpu_ctrl(BRIDGE_IRQ_MASK); irq_mask |= IRQ_TIMER_WD_MASK; write_cpu_ctrl(BRIDGE_IRQ_MASK, irq_mask); val = read_cpu_ctrl(RSTOUTn_MASK); val |= WD_RST_OUT_EN; write_cpu_ctrl(RSTOUTn_MASK, val); #endif val = mv_get_timer_control(); #if defined(SOC_MV_ARMADA38X) val |= CPU_TIMER_WD_EN | CPU_TIMER_WD_AUTO | CPU_TIMER_WD_25MHZ_EN; #elif defined(SOC_MV_ARMADAXP) val |= CPU_TIMER2_EN | CPU_TIMER2_AUTO | CPU_TIMER_WD_25MHZ_EN; #else val |= CPU_TIMER2_EN | CPU_TIMER2_AUTO; #endif mv_set_timer_control(val); } static void mv_watchdog_disable(void) { uint32_t val, irq_cause; #if !defined(SOC_MV_ARMADAXP) && !defined(SOC_MV_ARMADA38X) uint32_t irq_mask; #endif val = mv_get_timer_control(); #if defined(SOC_MV_ARMADA38X) val &= ~(CPU_TIMER_WD_EN | CPU_TIMER_WD_AUTO); #else val &= ~(CPU_TIMER2_EN | CPU_TIMER2_AUTO); #endif mv_set_timer_control(val); #if defined(SOC_MV_ARMADAXP) || defined(SOC_MV_ARMADA38X) val = read_cpu_mp_clocks(WD_RSTOUTn_MASK); val &= ~(WD_GLOBAL_MASK | WD_CPU0_MASK); write_cpu_mp_clocks(WD_RSTOUTn_MASK, val); val = read_cpu_misc(RSTOUTn_MASK); val |= RSTOUTn_MASK_WD; write_cpu_misc(RSTOUTn_MASK, RSTOUTn_MASK_WD); #else val = read_cpu_ctrl(RSTOUTn_MASK); val &= ~WD_RST_OUT_EN; write_cpu_ctrl(RSTOUTn_MASK, val); irq_mask = read_cpu_ctrl(BRIDGE_IRQ_MASK); irq_mask &= ~(IRQ_TIMER_WD_MASK); write_cpu_ctrl(BRIDGE_IRQ_MASK, irq_mask); #endif irq_cause = read_cpu_ctrl(BRIDGE_IRQ_CAUSE); irq_cause &= IRQ_TIMER_WD_CLR; write_cpu_ctrl(BRIDGE_IRQ_CAUSE, irq_cause); } /* * Watchdog event handler. */ static void mv_watchdog_event(void *arg, unsigned int cmd, int *error) { uint64_t ns; uint64_t ticks; mtx_lock(&timer_softc->timer_mtx); if (cmd == 0) mv_watchdog_disable(); else { /* * Watchdog timeout is in nanosecs, calculation according to * watchdog(9) */ ns = (uint64_t)1 << (cmd & WD_INTERVAL); ticks = (uint64_t)(ns * MV_CLOCK_SRC) / 1000000000; if (ticks > MAX_WATCHDOG_TICKS) mv_watchdog_disable(); else { mv_set_timer(WATCHDOG_TIMER, ticks); mv_watchdog_enable(); *error = 0; } } mtx_unlock(&timer_softc->timer_mtx); } static int mv_timer_start(struct eventtimer *et, sbintime_t first, sbintime_t period) { struct mv_timer_softc *sc; uint32_t val, val1; /* Calculate dividers. */ sc = (struct mv_timer_softc *)et->et_priv; if (period != 0) val = ((uint32_t)sc->et.et_frequency * period) >> 32; else val = 0; if (first != 0) val1 = ((uint32_t)sc->et.et_frequency * first) >> 32; else val1 = val; /* Apply configuration. */ mv_set_timer_rel(0, val); mv_set_timer(0, val1); val = mv_get_timer_control(); val |= CPU_TIMER0_EN; if (period != 0) val |= CPU_TIMER0_AUTO; else val &= ~CPU_TIMER0_AUTO; mv_set_timer_control(val); return (0); } static int mv_timer_stop(struct eventtimer *et) { uint32_t val; val = mv_get_timer_control(); val &= ~(CPU_TIMER0_EN | CPU_TIMER0_AUTO); mv_set_timer_control(val); return (0); } static void mv_setup_timers(void) { uint32_t val; mv_set_timer_rel(1, INITIAL_TIMECOUNTER); mv_set_timer(1, INITIAL_TIMECOUNTER); val = mv_get_timer_control(); val &= ~(CPU_TIMER0_EN | CPU_TIMER0_AUTO); val |= CPU_TIMER1_EN | CPU_TIMER1_AUTO; #if defined(SOC_MV_ARMADAXP) || defined(SOC_MV_ARMADA38X) /* Enable 25MHz mode */ val |= CPU_TIMER0_25MHZ_EN | CPU_TIMER1_25MHZ_EN; #endif mv_set_timer_control(val); timers_initialized = 1; } Index: head/sys/arm/ralink/rt1310_timer.c =================================================================== --- head/sys/arm/ralink/rt1310_timer.c (revision 327431) +++ head/sys/arm/ralink/rt1310_timer.c (revision 327432) @@ -1,341 +1,343 @@ /*- * Copyright (c) 2011 Jakub Wojciech Klama * Copyright (c) 2015 Hiroki Mori * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct rt1310_timer_softc { device_t lt_dev; struct eventtimer lt_et; struct resource * lt_res[8]; bus_space_tag_t lt_bst0; bus_space_handle_t lt_bsh0; bus_space_tag_t lt_bst1; bus_space_handle_t lt_bsh1; bus_space_tag_t lt_bst2; bus_space_handle_t lt_bsh2; bus_space_tag_t lt_bst3; bus_space_handle_t lt_bsh3; int lt_oneshot; uint32_t lt_period; }; static struct resource_spec rt1310_timer_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_MEMORY, 1, RF_ACTIVE }, { SYS_RES_MEMORY, 2, RF_ACTIVE }, { SYS_RES_MEMORY, 3, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { SYS_RES_IRQ, 1, RF_ACTIVE }, { SYS_RES_IRQ, 2, RF_ACTIVE }, { -1, 0 } }; static struct rt1310_timer_softc *timer_softc = NULL; static int rt1310_timer_initialized = 0; static int rt1310_timer_probe(device_t); static int rt1310_timer_attach(device_t); static int rt1310_timer_start(struct eventtimer *, sbintime_t first, sbintime_t period); static int rt1310_timer_stop(struct eventtimer *et); static unsigned rt1310_get_timecount(struct timecounter *); static int rt1310_hardclock(void *); #define timer0_read_4(sc, reg) \ bus_space_read_4(sc->lt_bst0, sc->lt_bsh0, reg) #define timer0_write_4(sc, reg, val) \ bus_space_write_4(sc->lt_bst0, sc->lt_bsh0, reg, val) #define timer0_clear(sc) \ do { \ timer0_write_4(sc, RT_TIMER_LOAD, 0); \ timer0_write_4(sc, RT_TIMER_VALUE, 0); \ } while(0) #define timer1_read_4(sc, reg) \ bus_space_read_4(sc->lt_bst1, sc->lt_bsh1, reg) #define timer1_write_4(sc, reg, val) \ bus_space_write_4(sc->lt_bst1, sc->lt_bsh1, reg, val) #define timer1_clear(sc) \ do { \ timer1_write_4(sc, RT_TIMER_LOAD, 0); \ timer1_write_4(sc, RT_TIMER_VALUE, 0); \ } while(0) #define timer2_read_4(sc, reg) \ bus_space_read_4(sc->lt_bst1, sc->lt_bsh2, reg) #define timer2_write_4(sc, reg, val) \ bus_space_write_4(sc->lt_bst2, sc->lt_bsh2, reg, val) #define timer3_write_4(sc, reg, val) \ bus_space_write_4(sc->lt_bst3, sc->lt_bsh3, reg, val) static struct timecounter rt1310_timecounter = { .tc_get_timecount = rt1310_get_timecount, .tc_name = "RT1310ATimer1", .tc_frequency = 0, /* will be filled later */ .tc_counter_mask = ~0u, .tc_quality = 1000, }; static int rt1310_timer_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "rt,timer")) return (ENXIO); device_set_desc(dev, "RT1310 timer"); return (BUS_PROBE_DEFAULT); } static int rt1310_timer_attach(device_t dev) { void *intrcookie; struct rt1310_timer_softc *sc = device_get_softc(dev); phandle_t node; uint32_t freq; if (timer_softc) return (ENXIO); timer_softc = sc; if (bus_alloc_resources(dev, rt1310_timer_spec, sc->lt_res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->lt_bst0 = rman_get_bustag(sc->lt_res[0]); sc->lt_bsh0 = rman_get_bushandle(sc->lt_res[0]); sc->lt_bst1 = rman_get_bustag(sc->lt_res[1]); sc->lt_bsh1 = rman_get_bushandle(sc->lt_res[1]); sc->lt_bst2 = rman_get_bustag(sc->lt_res[2]); sc->lt_bsh2 = rman_get_bushandle(sc->lt_res[2]); sc->lt_bst3 = rman_get_bustag(sc->lt_res[3]); sc->lt_bsh3 = rman_get_bushandle(sc->lt_res[3]); /* Timer2 interrupt */ if (bus_setup_intr(dev, sc->lt_res[6], INTR_TYPE_CLK, rt1310_hardclock, NULL, sc, &intrcookie)) { device_printf(dev, "could not setup interrupt handler\n"); bus_release_resources(dev, rt1310_timer_spec, sc->lt_res); return (ENXIO); } /* Enable timer clock */ /* rt1310_pwr_write(dev, LPC_CLKPWR_TIMCLK_CTRL1, LPC_CLKPWR_TIMCLK_CTRL1_TIMER0 | LPC_CLKPWR_TIMCLK_CTRL1_TIMER1); */ /* Get PERIPH_CLK encoded in parent bus 'bus-frequency' property */ node = ofw_bus_get_node(dev); if (OF_getprop(OF_parent(node), "bus-frequency", &freq, sizeof(pcell_t)) <= 0) { bus_release_resources(dev, rt1310_timer_spec, sc->lt_res); bus_teardown_intr(dev, sc->lt_res[2], intrcookie); device_printf(dev, "could not obtain base clock frequency\n"); return (ENXIO); } freq = fdt32_to_cpu(freq); /* Set desired frequency in event timer and timecounter */ sc->lt_et.et_frequency = (uint64_t)freq; rt1310_timecounter.tc_frequency = (uint64_t)freq; sc->lt_et.et_name = "RT1310ATimer2"; sc->lt_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT; sc->lt_et.et_quality = 1000; sc->lt_et.et_min_period = (0x00000002LLU << 32) / sc->lt_et.et_frequency; sc->lt_et.et_max_period = (0xfffffffeLLU << 32) / sc->lt_et.et_frequency; sc->lt_et.et_start = rt1310_timer_start; sc->lt_et.et_stop = rt1310_timer_stop; sc->lt_et.et_priv = sc; et_register(&sc->lt_et); tc_init(&rt1310_timecounter); /* Reset and enable timecounter */ timer0_write_4(sc, RT_TIMER_CONTROL, 0); timer1_write_4(sc, RT_TIMER_CONTROL, 0); timer2_write_4(sc, RT_TIMER_CONTROL, 0); timer3_write_4(sc, RT_TIMER_CONTROL, 0); timer1_write_4(sc, RT_TIMER_LOAD, ~0); timer1_write_4(sc, RT_TIMER_VALUE, ~0); timer1_write_4(sc, RT_TIMER_CONTROL, RT_TIMER_CTRL_ENABLE | RT_TIMER_CTRL_PERIODCAL); /* DELAY() now can work properly */ rt1310_timer_initialized = 1; return (0); } static int rt1310_timer_start(struct eventtimer *et, sbintime_t first, sbintime_t period) { struct rt1310_timer_softc *sc = (struct rt1310_timer_softc *)et->et_priv; uint32_t ticks; if (period == 0) { sc->lt_oneshot = 1; sc->lt_period = 0; } else { sc->lt_oneshot = 0; sc->lt_period = ((uint32_t)et->et_frequency * period) >> 32; } if (first == 0) ticks = sc->lt_period; else ticks = ((uint32_t)et->et_frequency * first) >> 32; /* Reset timer */ timer2_write_4(sc, RT_TIMER_CONTROL, 0); /* Start timer */ timer2_write_4(sc, RT_TIMER_LOAD, ticks); timer2_write_4(sc, RT_TIMER_VALUE, ticks); timer2_write_4(sc, RT_TIMER_CONTROL, RT_TIMER_CTRL_ENABLE | RT_TIMER_CTRL_INTCTL); return (0); } static int rt1310_timer_stop(struct eventtimer *et) { struct rt1310_timer_softc *sc = (struct rt1310_timer_softc *)et->et_priv; timer2_write_4(sc, RT_TIMER_CONTROL, 0); return (0); } static device_method_t rt1310_timer_methods[] = { DEVMETHOD(device_probe, rt1310_timer_probe), DEVMETHOD(device_attach, rt1310_timer_attach), { 0, 0 } }; static driver_t rt1310_timer_driver = { "timer", rt1310_timer_methods, sizeof(struct rt1310_timer_softc), }; static devclass_t rt1310_timer_devclass; EARLY_DRIVER_MODULE(timer, simplebus, rt1310_timer_driver, rt1310_timer_devclass, 0, 0, BUS_PASS_TIMER); static int rt1310_hardclock(void *arg) { struct rt1310_timer_softc *sc = (struct rt1310_timer_softc *)arg; /* Reset pending interrupt */ timer2_write_4(sc, RT_TIMER_CONTROL, timer2_read_4(sc, RT_TIMER_CONTROL) | 0x08); timer2_write_4(sc, RT_TIMER_CONTROL, timer2_read_4(sc, RT_TIMER_CONTROL) & 0x1fb); /* Start timer again */ if (!sc->lt_oneshot) { timer2_write_4(sc, RT_TIMER_LOAD, sc->lt_period); timer2_write_4(sc, RT_TIMER_VALUE, sc->lt_period); timer2_write_4(sc, RT_TIMER_CONTROL, RT_TIMER_CTRL_ENABLE | RT_TIMER_CTRL_INTCTL); } if (sc->lt_et.et_active) sc->lt_et.et_event_cb(&sc->lt_et, sc->lt_et.et_arg); return (FILTER_HANDLED); } static unsigned rt1310_get_timecount(struct timecounter *tc) { return ~timer1_read_4(timer_softc, RT_TIMER_VALUE); } void DELAY(int usec) { uint32_t counter; uint32_t first, last; int val = (rt1310_timecounter.tc_frequency / 1000000 + 1) * usec; /* Timer is not initialized yet */ if (!rt1310_timer_initialized) { for (; usec > 0; usec--) for (counter = 100; counter > 0; counter--) ; return; } + TSENTER(); first = rt1310_get_timecount(&rt1310_timecounter); while (val > 0) { last = rt1310_get_timecount(&rt1310_timecounter); if (last < first) { /* Timer rolled over */ last = first; } val -= (last - first); first = last; } + TSEXIT(); } Index: head/sys/arm/xscale/i8134x/i80321_timer.c =================================================================== --- head/sys/arm/xscale/i8134x/i80321_timer.c (revision 327431) +++ head/sys/arm/xscale/i8134x/i80321_timer.c (revision 327432) @@ -1,484 +1,486 @@ /* $NetBSD: i80321_timer.c,v 1.7 2003/07/27 04:52:28 thorpej Exp $ */ /*- * Copyright (c) 2001, 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Jason R. Thorpe for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Timer/clock support for the Intel i80321 I/O processor. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CPU_XSCALE_81342 #define ICU_INT_TIMER0 (8) /* XXX: Can't include i81342reg.h because definitions overrides the ones from i80321reg.h */ #endif #include "opt_timer.h" void (*i80321_hardclock_hook)(void) = NULL; struct i80321_timer_softc { device_t dev; } timer_softc; static unsigned i80321_timer_get_timecount(struct timecounter *tc); static uint32_t counts_per_hz; #if defined(XSCALE_DISABLE_CCNT) || defined(CPU_XSCALE_81342) static uint32_t offset; static uint32_t last = -1; #endif static int ticked = 0; #ifndef COUNTS_PER_SEC #define COUNTS_PER_SEC 200000000 /* 200MHz */ #endif #define COUNTS_PER_USEC (COUNTS_PER_SEC / 1000000) static struct timecounter i80321_timer_timecounter = { i80321_timer_get_timecount, /* get_timecount */ NULL, /* no poll_pps */ ~0u, /* counter_mask */ #if defined(XSCALE_DISABLE_CCNT) || defined(CPU_XSCALE_81342) COUNTS_PER_SEC, #else COUNTS_PER_SEC * 3, /* frequency */ #endif "i80321 timer", /* name */ 1000 /* quality */ }; static int i80321_timer_probe(device_t dev) { device_set_desc(dev, "i80321 timer"); return (0); } static int i80321_timer_attach(device_t dev) { timer_softc.dev = dev; return (0); } static device_method_t i80321_timer_methods[] = { DEVMETHOD(device_probe, i80321_timer_probe), DEVMETHOD(device_attach, i80321_timer_attach), {0, 0}, }; static driver_t i80321_timer_driver = { "itimer", i80321_timer_methods, sizeof(struct i80321_timer_softc), }; static devclass_t i80321_timer_devclass; DRIVER_MODULE(itimer, iq, i80321_timer_driver, i80321_timer_devclass, 0, 0); int clockhandler(void *); static __inline uint32_t tmr1_read(void) { uint32_t rv; #ifdef CPU_XSCALE_81342 __asm __volatile("mrc p6, 0, %0, c1, c9, 0" #else __asm __volatile("mrc p6, 0, %0, c1, c1, 0" #endif : "=r" (rv)); return (rv); } static __inline void tmr1_write(uint32_t val) { #ifdef CPU_XSCALE_81342 __asm __volatile("mcr p6, 0, %0, c1, c9, 0" #else __asm __volatile("mcr p6, 0, %0, c1, c1, 0" #endif : : "r" (val)); } static __inline uint32_t tcr1_read(void) { uint32_t rv; #ifdef CPU_XSCALE_81342 __asm __volatile("mrc p6, 0, %0, c3, c9, 0" #else __asm __volatile("mrc p6, 0, %0, c3, c1, 0" #endif : "=r" (rv)); return (rv); } static __inline void tcr1_write(uint32_t val) { #ifdef CPU_XSCALE_81342 __asm __volatile("mcr p6, 0, %0, c3, c9, 0" #else __asm __volatile("mcr p6, 0, %0, c3, c1, 0" #endif : : "r" (val)); } static __inline void trr1_write(uint32_t val) { #ifdef CPU_XSCALE_81342 __asm __volatile("mcr p6, 0, %0, c5, c9, 0" #else __asm __volatile("mcr p6, 0, %0, c5, c1, 0" #endif : : "r" (val)); } static __inline uint32_t tmr0_read(void) { uint32_t rv; #ifdef CPU_XSCALE_81342 __asm __volatile("mrc p6, 0, %0, c0, c9, 0" #else __asm __volatile("mrc p6, 0, %0, c0, c1, 0" #endif : "=r" (rv)); return (rv); } static __inline void tmr0_write(uint32_t val) { #ifdef CPU_XSCALE_81342 __asm __volatile("mcr p6, 0, %0, c0, c9, 0" #else __asm __volatile("mcr p6, 0, %0, c0, c1, 0" #endif : : "r" (val)); } static __inline uint32_t tcr0_read(void) { uint32_t rv; #ifdef CPU_XSCALE_81342 __asm __volatile("mrc p6, 0, %0, c2, c9, 0" #else __asm __volatile("mrc p6, 0, %0, c2, c1, 0" #endif : "=r" (rv)); return (rv); } static __inline void tcr0_write(uint32_t val) { #ifdef CPU_XSCALE_81342 __asm __volatile("mcr p6, 0, %0, c2, c9, 0" #else __asm __volatile("mcr p6, 0, %0, c2, c1, 0" #endif : : "r" (val)); } static __inline void trr0_write(uint32_t val) { #ifdef CPU_XSCALE_81342 __asm __volatile("mcr p6, 0, %0, c4, c9, 0" #else __asm __volatile("mcr p6, 0, %0, c4, c1, 0" #endif : : "r" (val)); } static __inline void tisr_write(uint32_t val) { #ifdef CPU_XSCALE_81342 __asm __volatile("mcr p6, 0, %0, c6, c9, 0" #else __asm __volatile("mcr p6, 0, %0, c6, c1, 0" #endif : : "r" (val)); } static __inline uint32_t tisr_read(void) { int ret; #ifdef CPU_XSCALE_81342 __asm __volatile("mrc p6, 0, %0, c6, c9, 0" : "=r" (ret)); #else __asm __volatile("mrc p6, 0, %0, c6, c1, 0" : "=r" (ret)); #endif return (ret); } static unsigned i80321_timer_get_timecount(struct timecounter *tc) { #if defined(XSCALE_DISABLE_CCNT) || defined(CPU_XSCALE_81342) uint32_t cur = tcr0_read(); if (cur > last && last != -1) { offset += counts_per_hz; if (ticked > 0) ticked--; } if (ticked) { offset += ticked * counts_per_hz; ticked = 0; } return (counts_per_hz - cur + offset); #else uint32_t ret; __asm __volatile("mrc p14, 0, %0, c1, c0, 0\n" : "=r" (ret)); return (ret); #endif } /* * i80321_calibrate_delay: * * Calibrate the delay loop. */ void i80321_calibrate_delay(void) { /* * Just use hz=100 for now -- we'll adjust it, if necessary, * in cpu_initclocks(). */ counts_per_hz = COUNTS_PER_SEC / 100; tmr0_write(0); /* stop timer */ tisr_write(TISR_TMR0); /* clear interrupt */ trr0_write(counts_per_hz); /* reload value */ tcr0_write(counts_per_hz); /* current value */ tmr0_write(TMRx_ENABLE|TMRx_RELOAD|TMRx_CSEL_CORE); } /* * cpu_initclocks: * * Initialize the clock and get them going. */ void cpu_initclocks(void) { u_int oldirqstate; struct resource *irq; int rid = 0; void *ihl; device_t dev = timer_softc.dev; if (hz < 50 || COUNTS_PER_SEC % hz) { printf("Cannot get %d Hz clock; using 100 Hz\n", hz); hz = 100; } tick = 1000000 / hz; /* number of microseconds between interrupts */ /* * We only have one timer available; stathz and profhz are * always left as 0 (the upper-layer clock code deals with * this situation). */ if (stathz != 0) printf("Cannot get %d Hz statclock\n", stathz); stathz = 0; if (profhz != 0) printf("Cannot get %d Hz profclock\n", profhz); profhz = 0; /* Report the clock frequency. */ oldirqstate = disable_interrupts(PSR_I); irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, #ifdef CPU_XSCALE_81342 ICU_INT_TIMER0, ICU_INT_TIMER0, #else ICU_INT_TMR0, ICU_INT_TMR0, #endif 1, RF_ACTIVE); if (!irq) panic("Unable to setup the clock irq handler.\n"); else bus_setup_intr(dev, irq, INTR_TYPE_CLK, clockhandler, NULL, NULL, &ihl); tmr0_write(0); /* stop timer */ tisr_write(TISR_TMR0); /* clear interrupt */ counts_per_hz = COUNTS_PER_SEC / hz; trr0_write(counts_per_hz); /* reload value */ tcr0_write(counts_per_hz); /* current value */ tmr0_write(TMRx_ENABLE|TMRx_RELOAD|TMRx_CSEL_CORE); tc_init(&i80321_timer_timecounter); restore_interrupts(oldirqstate); rid = 0; #if !defined(XSCALE_DISABLE_CCNT) && !defined(CPU_XSCALE_81342) /* Enable the clock count register. */ __asm __volatile("mrc p14, 0, %0, c0, c0, 0\n" : "=r" (rid)); rid &= ~(1 << 3); rid |= (1 << 2) | 1; __asm __volatile("mcr p14, 0, %0, c0, c0, 0\n" : : "r" (rid)); #endif } /* * DELAY: * * Delay for at least N microseconds. */ void DELAY(int n) { uint32_t cur, last, delta, usecs; + TSENTER(); /* * This works by polling the timer and counting the * number of microseconds that go by. */ last = tcr0_read(); delta = usecs = 0; while (n > usecs) { cur = tcr0_read(); /* Check to see if the timer has wrapped around. */ if (last < cur) delta += (last + (counts_per_hz - cur)); else delta += (last - cur); last = cur; if (delta >= COUNTS_PER_USEC) { usecs += delta / COUNTS_PER_USEC; delta %= COUNTS_PER_USEC; } } + TSEXIT(); } /* * clockhandler: * * Handle the hardclock interrupt. */ int clockhandler(void *arg) { struct trapframe *frame = arg; ticked++; tisr_write(TISR_TMR0); hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); if (i80321_hardclock_hook != NULL) (*i80321_hardclock_hook)(); return (FILTER_HANDLED); } void cpu_startprofclock(void) { } void cpu_stopprofclock(void) { } Index: head/sys/arm/xscale/ixp425/ixp425_timer.c =================================================================== --- head/sys/arm/xscale/ixp425/ixp425_timer.c (revision 327431) +++ head/sys/arm/xscale/ixp425/ixp425_timer.c (revision 327432) @@ -1,264 +1,266 @@ /* $NetBSD: ixp425_timer.c,v 1.15 2009/10/21 14:15:51 rmind Exp $ */ /*- * SPDX-License-Identifier: BSD-2-Clause-NetBSD * * Copyright (c) 2003 * Ichiro FUKUHARA . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ICHIRO FUKUHARA ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL ICHIRO FUKUHARA OR THE VOICES IN HIS HEAD BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static uint32_t counts_per_hz; /* callback functions for intr_functions */ int ixpclk_intr(void *); struct ixpclk_softc { device_t sc_dev; bus_addr_t sc_baseaddr; bus_space_tag_t sc_iot; bus_space_handle_t sc_ioh; }; static unsigned ixp425_timer_get_timecount(struct timecounter *tc); #ifndef IXP425_CLOCK_FREQ #define COUNTS_PER_SEC 66666600 /* 66MHz */ #else #define COUNTS_PER_SEC IXP425_CLOCK_FREQ #endif #define COUNTS_PER_USEC ((COUNTS_PER_SEC / 1000000) + 1) static struct ixpclk_softc *ixpclk_sc = NULL; #define GET_TS_VALUE(sc) (*(volatile u_int32_t *) \ (IXP425_TIMER_VBASE + IXP425_OST_TS)) static struct timecounter ixp425_timer_timecounter = { ixp425_timer_get_timecount, /* get_timecount */ NULL, /* no poll_pps */ ~0u, /* counter_mask */ COUNTS_PER_SEC, /* frequency */ "IXP4XX Timer", /* name */ 1000, /* quality */ }; static int ixpclk_probe(device_t dev) { device_set_desc(dev, "IXP4XX Timer"); return (0); } static int ixpclk_attach(device_t dev) { struct ixpclk_softc *sc = device_get_softc(dev); struct ixp425_softc *sa = device_get_softc(device_get_parent(dev)); ixpclk_sc = sc; sc->sc_dev = dev; sc->sc_iot = sa->sc_iot; sc->sc_baseaddr = IXP425_TIMER_HWBASE; if (bus_space_map(sc->sc_iot, sc->sc_baseaddr, 8, 0, &sc->sc_ioh)) panic("%s: Cannot map registers", device_get_name(dev)); return (0); } static device_method_t ixpclk_methods[] = { DEVMETHOD(device_probe, ixpclk_probe), DEVMETHOD(device_attach, ixpclk_attach), {0, 0}, }; static driver_t ixpclk_driver = { "ixpclk", ixpclk_methods, sizeof(struct ixpclk_softc), }; static devclass_t ixpclk_devclass; DRIVER_MODULE(ixpclk, ixp, ixpclk_driver, ixpclk_devclass, 0, 0); static unsigned ixp425_timer_get_timecount(struct timecounter *tc) { uint32_t ret; ret = GET_TS_VALUE(sc); return (ret); } /* * cpu_initclocks: * * Initialize the clock and get them going. */ void cpu_initclocks(void) { struct ixpclk_softc* sc = ixpclk_sc; struct resource *irq; device_t dev = sc->sc_dev; u_int oldirqstate; int rid = 0; void *ihl; if (hz < 50 || COUNTS_PER_SEC % hz) { printf("Cannot get %d Hz clock; using 100 Hz\n", hz); hz = 100; } tick = 1000000 / hz; /* number of microseconds between interrupts */ /* * We only have one timer available; stathz and profhz are * always left as 0 (the upper-layer clock code deals with * this situation). */ if (stathz != 0) printf("Cannot get %d Hz statclock\n", stathz); stathz = 0; if (profhz != 0) printf("Cannot get %d Hz profclock\n", profhz); profhz = 0; /* Report the clock frequency. */ oldirqstate = disable_interrupts(PSR_I); irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, IXP425_INT_TMR0, IXP425_INT_TMR0, 1, RF_ACTIVE); if (!irq) panic("Unable to setup the clock irq handler.\n"); else bus_setup_intr(dev, irq, INTR_TYPE_CLK, ixpclk_intr, NULL, NULL, &ihl); /* Set up the new clock parameters. */ /* clear interrupt */ bus_space_write_4(sc->sc_iot, sc->sc_ioh, IXP425_OST_STATUS, OST_WARM_RESET | OST_WDOG_INT | OST_TS_INT | OST_TIM1_INT | OST_TIM0_INT); counts_per_hz = COUNTS_PER_SEC / hz; /* reload value & Timer enable */ bus_space_write_4(sc->sc_iot, sc->sc_ioh, IXP425_OST_TIM0_RELOAD, (counts_per_hz & TIMERRELOAD_MASK) | OST_TIMER_EN); tc_init(&ixp425_timer_timecounter); restore_interrupts(oldirqstate); rid = 0; } /* * DELAY: * * Delay for at least N microseconds. */ void DELAY(int n) { u_int32_t first, last; int usecs; if (n == 0) return; + TSENTER(); /* * Clamp the timeout at a maximum value (about 32 seconds with * a 66MHz clock). *Nobody* should be delay()ing for anywhere * near that length of time and if they are, they should be hung * out to dry. */ if (n >= (0x80000000U / COUNTS_PER_USEC)) usecs = (0x80000000U / COUNTS_PER_USEC) - 1; else usecs = n * COUNTS_PER_USEC; /* Note: Timestamp timer counts *up*, unlike the other timers */ first = GET_TS_VALUE(); while (usecs > 0) { last = GET_TS_VALUE(); usecs -= (int)(last - first); first = last; } + TSEXIT(); } /* * ixpclk_intr: * * Handle the hardclock interrupt. */ int ixpclk_intr(void *arg) { struct ixpclk_softc* sc = ixpclk_sc; struct trapframe *frame = arg; bus_space_write_4(sc->sc_iot, sc->sc_ioh, IXP425_OST_STATUS, OST_TIM0_INT); hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); return (FILTER_HANDLED); } void cpu_startprofclock(void) { } void cpu_stopprofclock(void) { } Index: head/sys/arm/xscale/pxa/pxa_timer.c =================================================================== --- head/sys/arm/xscale/pxa/pxa_timer.c (revision 327431) +++ head/sys/arm/xscale/pxa/pxa_timer.c (revision 327432) @@ -1,320 +1,322 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Benno Rice. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PXA_TIMER_FREQUENCY 3686400 #define PXA_TIMER_TICK (PXA_TIMER_FREQUENCY / hz) struct pxa_timer_softc { struct resource * pt_res[5]; bus_space_tag_t pt_bst; bus_space_handle_t pt_bsh; }; static struct resource_spec pxa_timer_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { SYS_RES_IRQ, 1, RF_ACTIVE }, { SYS_RES_IRQ, 2, RF_ACTIVE }, { SYS_RES_IRQ, 3, RF_ACTIVE }, { -1, 0 } }; static struct pxa_timer_softc *timer_softc = NULL; static int pxa_timer_probe(device_t); static int pxa_timer_attach(device_t); static driver_filter_t pxa_hardclock; static unsigned pxa_timer_get_timecount(struct timecounter *); uint32_t pxa_timer_get_osmr(int); void pxa_timer_set_osmr(int, uint32_t); uint32_t pxa_timer_get_oscr(void); void pxa_timer_set_oscr(uint32_t); uint32_t pxa_timer_get_ossr(void); void pxa_timer_clear_ossr(uint32_t); void pxa_timer_watchdog_enable(void); void pxa_timer_watchdog_disable(void); void pxa_timer_interrupt_enable(int); void pxa_timer_interrupt_disable(int); static struct timecounter pxa_timer_timecounter = { .tc_get_timecount = pxa_timer_get_timecount, .tc_name = "OS Timer", .tc_frequency = PXA_TIMER_FREQUENCY, .tc_counter_mask = ~0u, .tc_quality = 1000, }; static int pxa_timer_probe(device_t dev) { device_set_desc(dev, "OS Timer"); return (0); } static int pxa_timer_attach(device_t dev) { int error; void *ihl; struct pxa_timer_softc *sc; sc = (struct pxa_timer_softc *)device_get_softc(dev); if (timer_softc != NULL) return (ENXIO); error = bus_alloc_resources(dev, pxa_timer_spec, sc->pt_res); if (error) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->pt_bst = rman_get_bustag(sc->pt_res[0]); sc->pt_bsh = rman_get_bushandle(sc->pt_res[0]); timer_softc = sc; pxa_timer_interrupt_disable(-1); pxa_timer_watchdog_disable(); if (bus_setup_intr(dev, sc->pt_res[1], INTR_TYPE_CLK, pxa_hardclock, NULL, NULL, &ihl) != 0) { bus_release_resources(dev, pxa_timer_spec, sc->pt_res); device_printf(dev, "could not setup hardclock interrupt\n"); return (ENXIO); } return (0); } static int pxa_hardclock(void *arg) { struct trapframe *frame; frame = (struct trapframe *)arg; /* Clear the interrupt */ pxa_timer_clear_ossr(OST_SR_CH0); /* Schedule next tick */ pxa_timer_set_osmr(0, pxa_timer_get_oscr() + PXA_TIMER_TICK); /* Do what we came here for */ hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); return (FILTER_HANDLED); } static device_method_t pxa_timer_methods[] = { DEVMETHOD(device_probe, pxa_timer_probe), DEVMETHOD(device_attach, pxa_timer_attach), {0, 0} }; static driver_t pxa_timer_driver = { "timer", pxa_timer_methods, sizeof(struct pxa_timer_softc), }; static devclass_t pxa_timer_devclass; DRIVER_MODULE(pxatimer, pxa, pxa_timer_driver, pxa_timer_devclass, 0, 0); static unsigned pxa_timer_get_timecount(struct timecounter *tc) { return (pxa_timer_get_oscr()); } void cpu_initclocks(void) { pxa_timer_set_oscr(0); pxa_timer_set_osmr(0, PXA_TIMER_TICK); pxa_timer_interrupt_enable(0); tc_init(&pxa_timer_timecounter); } void cpu_reset(void) { uint32_t val; (void)disable_interrupts(PSR_I|PSR_F); val = pxa_timer_get_oscr(); val += PXA_TIMER_FREQUENCY; pxa_timer_set_osmr(3, val); pxa_timer_watchdog_enable(); for(;;); } void DELAY(int usec) { uint32_t val; if (timer_softc == NULL) { for (; usec > 0; usec--) for (val = 100; val > 0; val--) ; return; } + TSENTER(); val = pxa_timer_get_oscr(); val += (PXA_TIMER_FREQUENCY * usec) / 1000000; while (pxa_timer_get_oscr() <= val); + TSEXIT(); } uint32_t pxa_timer_get_osmr(int which) { return (bus_space_read_4(timer_softc->pt_bst, timer_softc->pt_bsh, which * 0x4)); } void pxa_timer_set_osmr(int which, uint32_t val) { bus_space_write_4(timer_softc->pt_bst, timer_softc->pt_bsh, which * 0x4, val); } uint32_t pxa_timer_get_oscr(void) { return (bus_space_read_4(timer_softc->pt_bst, timer_softc->pt_bsh, OST_CR)); } void pxa_timer_set_oscr(uint32_t val) { bus_space_write_4(timer_softc->pt_bst, timer_softc->pt_bsh, OST_CR, val); } uint32_t pxa_timer_get_ossr(void) { return (bus_space_read_4(timer_softc->pt_bst, timer_softc->pt_bsh, OST_SR)); } void pxa_timer_clear_ossr(uint32_t val) { bus_space_write_4(timer_softc->pt_bst, timer_softc->pt_bsh, OST_SR, val); } void pxa_timer_watchdog_enable(void) { bus_space_write_4(timer_softc->pt_bst, timer_softc->pt_bsh, OST_WR, 0x1); } void pxa_timer_watchdog_disable(void) { bus_space_write_4(timer_softc->pt_bst, timer_softc->pt_bsh, OST_WR, 0x0); } void pxa_timer_interrupt_enable(int which) { uint32_t oier; if (which == -1) { bus_space_write_4(timer_softc->pt_bst, timer_softc->pt_bsh, OST_IR, 0xf); return; } oier = bus_space_read_4(timer_softc->pt_bst, timer_softc->pt_bsh, OST_IR); oier |= 1 << which; bus_space_write_4(timer_softc->pt_bst, timer_softc->pt_bsh, OST_IR, oier); } void pxa_timer_interrupt_disable(int which) { uint32_t oier; if (which == -1) { bus_space_write_4(timer_softc->pt_bst, timer_softc->pt_bsh, OST_IR, 0); } oier = bus_space_read_4(timer_softc->pt_bst, timer_softc->pt_bsh, OST_IR); oier &= ~(1 << which); bus_space_write_4(timer_softc->pt_bst, timer_softc->pt_bsh, OST_IR, oier); } Index: head/sys/kern/subr_prf.c =================================================================== --- head/sys/kern/subr_prf.c (revision 327431) +++ head/sys/kern/subr_prf.c (revision 327432) @@ -1,1260 +1,1262 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1986, 1988, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)subr_prf.c 8.3 (Berkeley) 1/21/94 */ #include __FBSDID("$FreeBSD$"); #ifdef _KERNEL #include "opt_ddb.h" #include "opt_printf.h" #endif /* _KERNEL */ #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif #include #include #ifdef DDB #include #endif /* * Note that stdarg.h and the ANSI style va_start macro is used for both * ANSI and traditional C compilers. */ #ifdef _KERNEL #include #else #include #endif /* * This is needed for sbuf_putbuf() when compiled into userland. Due to the * shared nature of this file, it's the only place to put it. */ #ifndef _KERNEL #include #endif #ifdef _KERNEL #define TOCONS 0x01 #define TOTTY 0x02 #define TOLOG 0x04 /* Max number conversion buffer length: a u_quad_t in base 2, plus NUL byte. */ #define MAXNBUF (sizeof(intmax_t) * NBBY + 1) struct putchar_arg { int flags; int pri; struct tty *tty; char *p_bufr; size_t n_bufr; char *p_next; size_t remain; }; struct snprintf_arg { char *str; size_t remain; }; extern int log_open; static void msglogchar(int c, int pri); static void msglogstr(char *str, int pri, int filter_cr); static void putchar(int ch, void *arg); static char *ksprintn(char *nbuf, uintmax_t num, int base, int *len, int upper); static void snprintf_func(int ch, void *arg); static int msgbufmapped; /* Set when safe to use msgbuf */ int msgbuftrigger; struct msgbuf *msgbufp; static int log_console_output = 1; SYSCTL_INT(_kern, OID_AUTO, log_console_output, CTLFLAG_RWTUN, &log_console_output, 0, "Duplicate console output to the syslog"); /* * See the comment in log_console() below for more explanation of this. */ static int log_console_add_linefeed; SYSCTL_INT(_kern, OID_AUTO, log_console_add_linefeed, CTLFLAG_RWTUN, &log_console_add_linefeed, 0, "log_console() adds extra newlines"); static int always_console_output; SYSCTL_INT(_kern, OID_AUTO, always_console_output, CTLFLAG_RWTUN, &always_console_output, 0, "Always output to console despite TIOCCONS"); /* * Warn that a system table is full. */ void tablefull(const char *tab) { log(LOG_ERR, "%s: table is full\n", tab); } /* * Uprintf prints to the controlling terminal for the current process. */ int uprintf(const char *fmt, ...) { va_list ap; struct putchar_arg pca; struct proc *p; struct thread *td; int retval; td = curthread; if (TD_IS_IDLETHREAD(td)) return (0); sx_slock(&proctree_lock); p = td->td_proc; PROC_LOCK(p); if ((p->p_flag & P_CONTROLT) == 0) { PROC_UNLOCK(p); sx_sunlock(&proctree_lock); return (0); } SESS_LOCK(p->p_session); pca.tty = p->p_session->s_ttyp; SESS_UNLOCK(p->p_session); PROC_UNLOCK(p); if (pca.tty == NULL) { sx_sunlock(&proctree_lock); return (0); } pca.flags = TOTTY; pca.p_bufr = NULL; va_start(ap, fmt); tty_lock(pca.tty); sx_sunlock(&proctree_lock); retval = kvprintf(fmt, putchar, &pca, 10, ap); tty_unlock(pca.tty); va_end(ap); return (retval); } /* * tprintf and vtprintf print on the controlling terminal associated with the * given session, possibly to the log as well. */ void tprintf(struct proc *p, int pri, const char *fmt, ...) { va_list ap; va_start(ap, fmt); vtprintf(p, pri, fmt, ap); va_end(ap); } void vtprintf(struct proc *p, int pri, const char *fmt, va_list ap) { struct tty *tp = NULL; int flags = 0; struct putchar_arg pca; struct session *sess = NULL; sx_slock(&proctree_lock); if (pri != -1) flags |= TOLOG; if (p != NULL) { PROC_LOCK(p); if (p->p_flag & P_CONTROLT && p->p_session->s_ttyvp) { sess = p->p_session; sess_hold(sess); PROC_UNLOCK(p); tp = sess->s_ttyp; if (tp != NULL && tty_checkoutq(tp)) flags |= TOTTY; else tp = NULL; } else PROC_UNLOCK(p); } pca.pri = pri; pca.tty = tp; pca.flags = flags; pca.p_bufr = NULL; if (pca.tty != NULL) tty_lock(pca.tty); sx_sunlock(&proctree_lock); kvprintf(fmt, putchar, &pca, 10, ap); if (pca.tty != NULL) tty_unlock(pca.tty); if (sess != NULL) sess_release(sess); msgbuftrigger = 1; } /* * Ttyprintf displays a message on a tty; it should be used only by * the tty driver, or anything that knows the underlying tty will not * be revoke(2)'d away. Other callers should use tprintf. */ int ttyprintf(struct tty *tp, const char *fmt, ...) { va_list ap; struct putchar_arg pca; int retval; va_start(ap, fmt); pca.tty = tp; pca.flags = TOTTY; pca.p_bufr = NULL; retval = kvprintf(fmt, putchar, &pca, 10, ap); va_end(ap); return (retval); } static int _vprintf(int level, int flags, const char *fmt, va_list ap) { struct putchar_arg pca; int retval; #ifdef PRINTF_BUFR_SIZE char bufr[PRINTF_BUFR_SIZE]; #endif + TSENTER(); pca.tty = NULL; pca.pri = level; pca.flags = flags; #ifdef PRINTF_BUFR_SIZE pca.p_bufr = bufr; pca.p_next = pca.p_bufr; pca.n_bufr = sizeof(bufr); pca.remain = sizeof(bufr); *pca.p_next = '\0'; #else /* Don't buffer console output. */ pca.p_bufr = NULL; #endif retval = kvprintf(fmt, putchar, &pca, 10, ap); #ifdef PRINTF_BUFR_SIZE /* Write any buffered console/log output: */ if (*pca.p_bufr != '\0') { if (pca.flags & TOLOG) msglogstr(pca.p_bufr, level, /*filter_cr*/1); if (pca.flags & TOCONS) cnputs(pca.p_bufr); } #endif + TSEXIT(); return (retval); } /* * Log writes to the log buffer, and guarantees not to sleep (so can be * called by interrupt routines). If there is no process reading the * log yet, it writes to the console also. */ void log(int level, const char *fmt, ...) { va_list ap; va_start(ap, fmt); vlog(level, fmt, ap); va_end(ap); } void vlog(int level, const char *fmt, va_list ap) { (void)_vprintf(level, log_open ? TOLOG : TOCONS | TOLOG, fmt, ap); msgbuftrigger = 1; } #define CONSCHUNK 128 void log_console(struct uio *uio) { int c, error, nl; char *consbuffer; int pri; if (!log_console_output) return; pri = LOG_INFO | LOG_CONSOLE; uio = cloneuio(uio); consbuffer = malloc(CONSCHUNK, M_TEMP, M_WAITOK); nl = 0; while (uio->uio_resid > 0) { c = imin(uio->uio_resid, CONSCHUNK - 1); error = uiomove(consbuffer, c, uio); if (error != 0) break; /* Make sure we're NUL-terminated */ consbuffer[c] = '\0'; if (consbuffer[c - 1] == '\n') nl = 1; else nl = 0; msglogstr(consbuffer, pri, /*filter_cr*/ 1); } /* * The previous behavior in log_console() is preserved when * log_console_add_linefeed is non-zero. For that behavior, if an * individual console write came in that was not terminated with a * line feed, it would add a line feed. * * This results in different data in the message buffer than * appears on the system console (which doesn't add extra line feed * characters). * * A number of programs and rc scripts write a line feed, or a period * and a line feed when they have completed their operation. On * the console, this looks seamless, but when displayed with * 'dmesg -a', you wind up with output that looks like this: * * Updating motd: * . * * On the console, it looks like this: * Updating motd:. * * We could add logic to detect that situation, or just not insert * the extra newlines. Set the kern.log_console_add_linefeed * sysctl/tunable variable to get the old behavior. */ if (!nl && log_console_add_linefeed) { consbuffer[0] = '\n'; consbuffer[1] = '\0'; msglogstr(consbuffer, pri, /*filter_cr*/ 1); } msgbuftrigger = 1; free(uio, M_IOV); free(consbuffer, M_TEMP); } int printf(const char *fmt, ...) { va_list ap; int retval; va_start(ap, fmt); retval = vprintf(fmt, ap); va_end(ap); return (retval); } int vprintf(const char *fmt, va_list ap) { int retval; retval = _vprintf(-1, TOCONS | TOLOG, fmt, ap); if (!panicstr) msgbuftrigger = 1; return (retval); } static void prf_putbuf(char *bufr, int flags, int pri) { if (flags & TOLOG) msglogstr(bufr, pri, /*filter_cr*/1); if (flags & TOCONS) { if ((panicstr == NULL) && (constty != NULL)) msgbuf_addstr(&consmsgbuf, -1, bufr, /*filter_cr*/ 0); if ((constty == NULL) ||(always_console_output)) cnputs(bufr); } } static void putbuf(int c, struct putchar_arg *ap) { /* Check if no console output buffer was provided. */ if (ap->p_bufr == NULL) { /* Output direct to the console. */ if (ap->flags & TOCONS) cnputc(c); if (ap->flags & TOLOG) msglogchar(c, ap->pri); } else { /* Buffer the character: */ *ap->p_next++ = c; ap->remain--; /* Always leave the buffer zero terminated. */ *ap->p_next = '\0'; /* Check if the buffer needs to be flushed. */ if (ap->remain == 2 || c == '\n') { prf_putbuf(ap->p_bufr, ap->flags, ap->pri); ap->p_next = ap->p_bufr; ap->remain = ap->n_bufr; *ap->p_next = '\0'; } /* * Since we fill the buffer up one character at a time, * this should not happen. We should always catch it when * ap->remain == 2 (if not sooner due to a newline), flush * the buffer and move on. One way this could happen is * if someone sets PRINTF_BUFR_SIZE to 1 or something * similarly silly. */ KASSERT(ap->remain > 2, ("Bad buffer logic, remain = %zd", ap->remain)); } } /* * Print a character on console or users terminal. If destination is * the console then the last bunch of characters are saved in msgbuf for * inspection later. */ static void putchar(int c, void *arg) { struct putchar_arg *ap = (struct putchar_arg*) arg; struct tty *tp = ap->tty; int flags = ap->flags; /* Don't use the tty code after a panic or while in ddb. */ if (kdb_active) { if (c != '\0') cnputc(c); return; } if ((flags & TOTTY) && tp != NULL && panicstr == NULL) tty_putchar(tp, c); if ((flags & (TOCONS | TOLOG)) && c != '\0') putbuf(c, ap); } /* * Scaled down version of sprintf(3). */ int sprintf(char *buf, const char *cfmt, ...) { int retval; va_list ap; va_start(ap, cfmt); retval = kvprintf(cfmt, NULL, (void *)buf, 10, ap); buf[retval] = '\0'; va_end(ap); return (retval); } /* * Scaled down version of vsprintf(3). */ int vsprintf(char *buf, const char *cfmt, va_list ap) { int retval; retval = kvprintf(cfmt, NULL, (void *)buf, 10, ap); buf[retval] = '\0'; return (retval); } /* * Scaled down version of snprintf(3). */ int snprintf(char *str, size_t size, const char *format, ...) { int retval; va_list ap; va_start(ap, format); retval = vsnprintf(str, size, format, ap); va_end(ap); return(retval); } /* * Scaled down version of vsnprintf(3). */ int vsnprintf(char *str, size_t size, const char *format, va_list ap) { struct snprintf_arg info; int retval; info.str = str; info.remain = size; retval = kvprintf(format, snprintf_func, &info, 10, ap); if (info.remain >= 1) *info.str++ = '\0'; return (retval); } /* * Kernel version which takes radix argument vsnprintf(3). */ int vsnrprintf(char *str, size_t size, int radix, const char *format, va_list ap) { struct snprintf_arg info; int retval; info.str = str; info.remain = size; retval = kvprintf(format, snprintf_func, &info, radix, ap); if (info.remain >= 1) *info.str++ = '\0'; return (retval); } static void snprintf_func(int ch, void *arg) { struct snprintf_arg *const info = arg; if (info->remain >= 2) { *info->str++ = ch; info->remain--; } } /* * Put a NUL-terminated ASCII number (base <= 36) in a buffer in reverse * order; return an optional length and a pointer to the last character * written in the buffer (i.e., the first character of the string). * The buffer pointed to by `nbuf' must have length >= MAXNBUF. */ static char * ksprintn(char *nbuf, uintmax_t num, int base, int *lenp, int upper) { char *p, c; p = nbuf; *p = '\0'; do { c = hex2ascii(num % base); *++p = upper ? toupper(c) : c; } while (num /= base); if (lenp) *lenp = p - nbuf; return (p); } /* * Scaled down version of printf(3). * * Two additional formats: * * The format %b is supported to decode error registers. * Its usage is: * * printf("reg=%b\n", regval, "*"); * * where is the output base expressed as a control character, e.g. * \10 gives octal; \20 gives hex. Each arg is a sequence of characters, * the first of which gives the bit number to be inspected (origin 1), and * the next characters (up to a control character, i.e. a character <= 32), * give the name of the register. Thus: * * kvprintf("reg=%b\n", 3, "\10\2BITTWO\1BITONE"); * * would produce output: * * reg=3 * * XXX: %D -- Hexdump, takes pointer and separator string: * ("%6D", ptr, ":") -> XX:XX:XX:XX:XX:XX * ("%*D", len, ptr, " " -> XX XX XX XX ... */ int kvprintf(char const *fmt, void (*func)(int, void*), void *arg, int radix, va_list ap) { #define PCHAR(c) {int cc=(c); if (func) (*func)(cc,arg); else *d++ = cc; retval++; } char nbuf[MAXNBUF]; char *d; const char *p, *percent, *q; u_char *up; int ch, n; uintmax_t num; int base, lflag, qflag, tmp, width, ladjust, sharpflag, neg, sign, dot; int cflag, hflag, jflag, tflag, zflag; int bconv, dwidth, upper; char padc; int stop = 0, retval = 0; num = 0; if (!func) d = (char *) arg; else d = NULL; if (fmt == NULL) fmt = "(fmt null)\n"; if (radix < 2 || radix > 36) radix = 10; for (;;) { padc = ' '; width = 0; while ((ch = (u_char)*fmt++) != '%' || stop) { if (ch == '\0') return (retval); PCHAR(ch); } percent = fmt - 1; qflag = 0; lflag = 0; ladjust = 0; sharpflag = 0; neg = 0; sign = 0; dot = 0; bconv = 0; dwidth = 0; upper = 0; cflag = 0; hflag = 0; jflag = 0; tflag = 0; zflag = 0; reswitch: switch (ch = (u_char)*fmt++) { case '.': dot = 1; goto reswitch; case '#': sharpflag = 1; goto reswitch; case '+': sign = 1; goto reswitch; case '-': ladjust = 1; goto reswitch; case '%': PCHAR(ch); break; case '*': if (!dot) { width = va_arg(ap, int); if (width < 0) { ladjust = !ladjust; width = -width; } } else { dwidth = va_arg(ap, int); } goto reswitch; case '0': if (!dot) { padc = '0'; goto reswitch; } case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': for (n = 0;; ++fmt) { n = n * 10 + ch - '0'; ch = *fmt; if (ch < '0' || ch > '9') break; } if (dot) dwidth = n; else width = n; goto reswitch; case 'b': ladjust = 1; bconv = 1; goto handle_nosign; case 'c': width -= 1; if (!ladjust && width > 0) while (width--) PCHAR(padc); PCHAR(va_arg(ap, int)); if (ladjust && width > 0) while (width--) PCHAR(padc); break; case 'D': up = va_arg(ap, u_char *); p = va_arg(ap, char *); if (!width) width = 16; while(width--) { PCHAR(hex2ascii(*up >> 4)); PCHAR(hex2ascii(*up & 0x0f)); up++; if (width) for (q=p;*q;q++) PCHAR(*q); } break; case 'd': case 'i': base = 10; sign = 1; goto handle_sign; case 'h': if (hflag) { hflag = 0; cflag = 1; } else hflag = 1; goto reswitch; case 'j': jflag = 1; goto reswitch; case 'l': if (lflag) { lflag = 0; qflag = 1; } else lflag = 1; goto reswitch; case 'n': if (jflag) *(va_arg(ap, intmax_t *)) = retval; else if (qflag) *(va_arg(ap, quad_t *)) = retval; else if (lflag) *(va_arg(ap, long *)) = retval; else if (zflag) *(va_arg(ap, size_t *)) = retval; else if (hflag) *(va_arg(ap, short *)) = retval; else if (cflag) *(va_arg(ap, char *)) = retval; else *(va_arg(ap, int *)) = retval; break; case 'o': base = 8; goto handle_nosign; case 'p': base = 16; sharpflag = (width == 0); sign = 0; num = (uintptr_t)va_arg(ap, void *); goto number; case 'q': qflag = 1; goto reswitch; case 'r': base = radix; if (sign) goto handle_sign; goto handle_nosign; case 's': p = va_arg(ap, char *); if (p == NULL) p = "(null)"; if (!dot) n = strlen (p); else for (n = 0; n < dwidth && p[n]; n++) continue; width -= n; if (!ladjust && width > 0) while (width--) PCHAR(padc); while (n--) PCHAR(*p++); if (ladjust && width > 0) while (width--) PCHAR(padc); break; case 't': tflag = 1; goto reswitch; case 'u': base = 10; goto handle_nosign; case 'X': upper = 1; case 'x': base = 16; goto handle_nosign; case 'y': base = 16; sign = 1; goto handle_sign; case 'z': zflag = 1; goto reswitch; handle_nosign: sign = 0; if (jflag) num = va_arg(ap, uintmax_t); else if (qflag) num = va_arg(ap, u_quad_t); else if (tflag) num = va_arg(ap, ptrdiff_t); else if (lflag) num = va_arg(ap, u_long); else if (zflag) num = va_arg(ap, size_t); else if (hflag) num = (u_short)va_arg(ap, int); else if (cflag) num = (u_char)va_arg(ap, int); else num = va_arg(ap, u_int); if (bconv) { q = va_arg(ap, char *); base = *q++; } goto number; handle_sign: if (jflag) num = va_arg(ap, intmax_t); else if (qflag) num = va_arg(ap, quad_t); else if (tflag) num = va_arg(ap, ptrdiff_t); else if (lflag) num = va_arg(ap, long); else if (zflag) num = va_arg(ap, ssize_t); else if (hflag) num = (short)va_arg(ap, int); else if (cflag) num = (char)va_arg(ap, int); else num = va_arg(ap, int); number: if (sign && (intmax_t)num < 0) { neg = 1; num = -(intmax_t)num; } p = ksprintn(nbuf, num, base, &n, upper); tmp = 0; if (sharpflag && num != 0) { if (base == 8) tmp++; else if (base == 16) tmp += 2; } if (neg) tmp++; if (!ladjust && padc == '0') dwidth = width - tmp; width -= tmp + imax(dwidth, n); dwidth -= n; if (!ladjust) while (width-- > 0) PCHAR(' '); if (neg) PCHAR('-'); if (sharpflag && num != 0) { if (base == 8) { PCHAR('0'); } else if (base == 16) { PCHAR('0'); PCHAR('x'); } } while (dwidth-- > 0) PCHAR('0'); while (*p) PCHAR(*p--); if (bconv && num != 0) { /* %b conversion flag format. */ tmp = retval; while (*q) { n = *q++; if (num & (1 << (n - 1))) { PCHAR(retval != tmp ? ',' : '<'); for (; (n = *q) > ' '; ++q) PCHAR(n); } else for (; *q > ' '; ++q) continue; } if (retval != tmp) { PCHAR('>'); width -= retval - tmp; } } if (ladjust) while (width-- > 0) PCHAR(' '); break; default: while (percent < fmt) PCHAR(*percent++); /* * Since we ignore a formatting argument it is no * longer safe to obey the remaining formatting * arguments as the arguments will no longer match * the format specs. */ stop = 1; break; } } #undef PCHAR } /* * Put character in log buffer with a particular priority. */ static void msglogchar(int c, int pri) { static int lastpri = -1; static int dangling; char nbuf[MAXNBUF]; char *p; if (!msgbufmapped) return; if (c == '\0' || c == '\r') return; if (pri != -1 && pri != lastpri) { if (dangling) { msgbuf_addchar(msgbufp, '\n'); dangling = 0; } msgbuf_addchar(msgbufp, '<'); for (p = ksprintn(nbuf, (uintmax_t)pri, 10, NULL, 0); *p;) msgbuf_addchar(msgbufp, *p--); msgbuf_addchar(msgbufp, '>'); lastpri = pri; } msgbuf_addchar(msgbufp, c); if (c == '\n') { dangling = 0; lastpri = -1; } else { dangling = 1; } } static void msglogstr(char *str, int pri, int filter_cr) { if (!msgbufmapped) return; msgbuf_addstr(msgbufp, pri, str, filter_cr); } void msgbufinit(void *ptr, int size) { char *cp; static struct msgbuf *oldp = NULL; size -= sizeof(*msgbufp); cp = (char *)ptr; msgbufp = (struct msgbuf *)(cp + size); msgbuf_reinit(msgbufp, cp, size); if (msgbufmapped && oldp != msgbufp) msgbuf_copy(oldp, msgbufp); msgbufmapped = 1; oldp = msgbufp; } static int unprivileged_read_msgbuf = 1; SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_read_msgbuf, CTLFLAG_RW, &unprivileged_read_msgbuf, 0, "Unprivileged processes may read the kernel message buffer"); /* Sysctls for accessing/clearing the msgbuf */ static int sysctl_kern_msgbuf(SYSCTL_HANDLER_ARGS) { char buf[128]; u_int seq; int error, len; if (!unprivileged_read_msgbuf) { error = priv_check(req->td, PRIV_MSGBUF); if (error) return (error); } /* Read the whole buffer, one chunk at a time. */ mtx_lock(&msgbuf_lock); msgbuf_peekbytes(msgbufp, NULL, 0, &seq); for (;;) { len = msgbuf_peekbytes(msgbufp, buf, sizeof(buf), &seq); mtx_unlock(&msgbuf_lock); if (len == 0) return (SYSCTL_OUT(req, "", 1)); /* add nulterm */ error = sysctl_handle_opaque(oidp, buf, len, req); if (error) return (error); mtx_lock(&msgbuf_lock); } } SYSCTL_PROC(_kern, OID_AUTO, msgbuf, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_msgbuf, "A", "Contents of kernel message buffer"); static int msgbuf_clearflag; static int sysctl_kern_msgbuf_clear(SYSCTL_HANDLER_ARGS) { int error; error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); if (!error && req->newptr) { mtx_lock(&msgbuf_lock); msgbuf_clear(msgbufp); mtx_unlock(&msgbuf_lock); msgbuf_clearflag = 0; } return (error); } SYSCTL_PROC(_kern, OID_AUTO, msgbuf_clear, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_SECURE | CTLFLAG_MPSAFE, &msgbuf_clearflag, 0, sysctl_kern_msgbuf_clear, "I", "Clear kernel message buffer"); #ifdef DDB DB_SHOW_COMMAND(msgbuf, db_show_msgbuf) { int i, j; if (!msgbufmapped) { db_printf("msgbuf not mapped yet\n"); return; } db_printf("msgbufp = %p\n", msgbufp); db_printf("magic = %x, size = %d, r= %u, w = %u, ptr = %p, cksum= %u\n", msgbufp->msg_magic, msgbufp->msg_size, msgbufp->msg_rseq, msgbufp->msg_wseq, msgbufp->msg_ptr, msgbufp->msg_cksum); for (i = 0; i < msgbufp->msg_size && !db_pager_quit; i++) { j = MSGBUF_SEQ_TO_POS(msgbufp, i + msgbufp->msg_rseq); db_printf("%c", msgbufp->msg_ptr[j]); } db_printf("\n"); } #endif /* DDB */ void hexdump(const void *ptr, int length, const char *hdr, int flags) { int i, j, k; int cols; const unsigned char *cp; char delim; if ((flags & HD_DELIM_MASK) != 0) delim = (flags & HD_DELIM_MASK) >> 8; else delim = ' '; if ((flags & HD_COLUMN_MASK) != 0) cols = flags & HD_COLUMN_MASK; else cols = 16; cp = ptr; for (i = 0; i < length; i+= cols) { if (hdr != NULL) printf("%s", hdr); if ((flags & HD_OMIT_COUNT) == 0) printf("%04x ", i); if ((flags & HD_OMIT_HEX) == 0) { for (j = 0; j < cols; j++) { k = i + j; if (k < length) printf("%c%02x", delim, cp[k]); else printf(" "); } } if ((flags & HD_OMIT_CHARS) == 0) { printf(" |"); for (j = 0; j < cols; j++) { k = i + j; if (k >= length) printf(" "); else if (cp[k] >= ' ' && cp[k] <= '~') printf("%c", cp[k]); else printf("."); } printf("|"); } printf("\n"); } } #endif /* _KERNEL */ void sbuf_hexdump(struct sbuf *sb, const void *ptr, int length, const char *hdr, int flags) { int i, j, k; int cols; const unsigned char *cp; char delim; if ((flags & HD_DELIM_MASK) != 0) delim = (flags & HD_DELIM_MASK) >> 8; else delim = ' '; if ((flags & HD_COLUMN_MASK) != 0) cols = flags & HD_COLUMN_MASK; else cols = 16; cp = ptr; for (i = 0; i < length; i+= cols) { if (hdr != NULL) sbuf_printf(sb, "%s", hdr); if ((flags & HD_OMIT_COUNT) == 0) sbuf_printf(sb, "%04x ", i); if ((flags & HD_OMIT_HEX) == 0) { for (j = 0; j < cols; j++) { k = i + j; if (k < length) sbuf_printf(sb, "%c%02x", delim, cp[k]); else sbuf_printf(sb, " "); } } if ((flags & HD_OMIT_CHARS) == 0) { sbuf_printf(sb, " |"); for (j = 0; j < cols; j++) { k = i + j; if (k >= length) sbuf_printf(sb, " "); else if (cp[k] >= ' ' && cp[k] <= '~') sbuf_printf(sb, "%c", cp[k]); else sbuf_printf(sb, "."); } sbuf_printf(sb, "|"); } sbuf_printf(sb, "\n"); } } #ifdef _KERNEL void counted_warning(unsigned *counter, const char *msg) { struct thread *td; unsigned c; for (;;) { c = *counter; if (c == 0) break; if (atomic_cmpset_int(counter, c, c - 1)) { td = curthread; log(LOG_INFO, "pid %d (%s) %s%s\n", td->td_proc->p_pid, td->td_name, msg, c > 1 ? "" : " - not logging anymore"); break; } } } #endif #ifdef _KERNEL void sbuf_putbuf(struct sbuf *sb) { prf_putbuf(sbuf_data(sb), TOLOG | TOCONS, -1); } #else void sbuf_putbuf(struct sbuf *sb) { printf("%s", sbuf_data(sb)); } #endif Index: head/sys/mips/ingenic/jz4780_timer.c =================================================================== --- head/sys/mips/ingenic/jz4780_timer.c (revision 327431) +++ head/sys/mips/ingenic/jz4780_timer.c (revision 327432) @@ -1,337 +1,339 @@ /*- * Copyright 2013-2015 Alexander Kabaev * Copyright 2013-2015 John Wehle * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct jz4780_timer_softc { device_t dev; struct resource * res[4]; void * ih_cookie; struct eventtimer et; struct timecounter tc; }; static struct resource_spec jz4780_timer_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, /* OST */ { SYS_RES_IRQ, 1, RF_ACTIVE }, /* TC5 */ { SYS_RES_IRQ, 2, RF_ACTIVE }, /* TC0-4,6 */ { -1, 0 } }; /* * devclass_get_device / device_get_softc could be used * to dynamically locate this, however the timers are a * required device which can't be unloaded so there's * no need for the overhead. */ static struct jz4780_timer_softc *jz4780_timer_sc = NULL; #define CSR_WRITE_4(sc, reg, val) bus_write_4((sc)->res[0], reg, (val)) #define CSR_READ_4(sc, reg) bus_read_4((sc)->res[0], reg) static unsigned jz4780_get_timecount(struct timecounter *tc) { struct jz4780_timer_softc *sc = (struct jz4780_timer_softc *)tc->tc_priv; return CSR_READ_4(sc, JZ_OST_CNT_LO); } static int jz4780_hardclock(void *arg) { struct jz4780_timer_softc *sc = (struct jz4780_timer_softc *)arg; CSR_WRITE_4(sc, JZ_TC_TFCR, TFR_FFLAG5); CSR_WRITE_4(sc, JZ_TC_TECR, TESR_TCST5); if (sc->et.et_active) sc->et.et_event_cb(&sc->et, sc->et.et_arg); return (FILTER_HANDLED); } static int jz4780_timer_start(struct eventtimer *et, sbintime_t first, sbintime_t period) { struct jz4780_timer_softc *sc = (struct jz4780_timer_softc *)et->et_priv; uint32_t ticks; ticks = (first * et->et_frequency) / SBT_1S; if (ticks == 0) return (EINVAL); CSR_WRITE_4(sc, JZ_TC_TDFR(5), ticks); CSR_WRITE_4(sc, JZ_TC_TCNT(5), 0); CSR_WRITE_4(sc, JZ_TC_TESR, TESR_TCST5); return (0); } static int jz4780_timer_stop(struct eventtimer *et) { struct jz4780_timer_softc *sc = (struct jz4780_timer_softc *)et->et_priv; CSR_WRITE_4(sc, JZ_TC_TECR, TESR_TCST5); return (0); } static int jz4780_timer_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "ingenic,jz4780-tcu")) return (ENXIO); device_set_desc(dev, "Ingenic JZ4780 timer"); return (BUS_PROBE_DEFAULT); } static int jz4780_timer_attach(device_t dev) { struct jz4780_timer_softc *sc = device_get_softc(dev); pcell_t counter_freq; clk_t clk; /* There should be exactly one instance. */ if (jz4780_timer_sc != NULL) return (ENXIO); sc->dev = dev; if (bus_alloc_resources(dev, jz4780_timer_spec, sc->res)) { device_printf(dev, "can not allocate resources for device\n"); return (ENXIO); } counter_freq = 0; if (clk_get_by_name(dev, "ext", &clk) == 0) { uint64_t clk_freq; if (clk_get_freq(clk, &clk_freq) == 0) counter_freq = (uint32_t)clk_freq / 16; clk_release(clk); } if (counter_freq == 0) { device_printf(dev, "unable to determine ext clock frequency\n"); /* Hardcode value we 'know' is correct */ counter_freq = 48000000 / 16; } /* * Disable the timers, select the input for each timer, * clear and then start OST. */ /* Stop OST, if it happens to be running */ CSR_WRITE_4(sc, JZ_TC_TECR, TESR_OST); /* Stop all other channels as well */ CSR_WRITE_4(sc, JZ_TC_TECR, TESR_TCST0 | TESR_TCST1 | TESR_TCST2 | TESR_TCST3 | TESR_TCST4 | TESR_TCST5 | TESR_TCST6 | TESR_TCST3); /* Clear detect mask flags */ CSR_WRITE_4(sc, JZ_TC_TFCR, 0xFFFFFFFF); /* Mask all interrupts */ CSR_WRITE_4(sc, JZ_TC_TMSR, 0xFFFFFFFF); /* Init counter with known data */ CSR_WRITE_4(sc, JZ_OST_CTRL, 0); CSR_WRITE_4(sc, JZ_OST_CNT_LO, 0); CSR_WRITE_4(sc, JZ_OST_CNT_HI, 0); CSR_WRITE_4(sc, JZ_OST_DATA, 0xffffffff); /* Configure counter for external clock */ CSR_WRITE_4(sc, JZ_OST_CTRL, OSTC_EXT_EN | OSTC_MODE | OSTC_DIV_16); /* Start the counter again */ CSR_WRITE_4(sc, JZ_TC_TESR, TESR_OST); /* Configure TCU channel 5 similarly to OST and leave it disabled */ CSR_WRITE_4(sc, JZ_TC_TCSR(5), TCSR_EXT_EN | TCSR_DIV_16); CSR_WRITE_4(sc, JZ_TC_TMCR, TMR_FMASK(5)); if (bus_setup_intr(dev, sc->res[2], INTR_TYPE_CLK, jz4780_hardclock, NULL, sc, &sc->ih_cookie)) { device_printf(dev, "could not setup interrupt handler\n"); bus_release_resources(dev, jz4780_timer_spec, sc->res); return (ENXIO); } sc->et.et_name = "JZ4780 TCU5"; sc->et.et_flags = ET_FLAGS_ONESHOT; sc->et.et_frequency = counter_freq; sc->et.et_quality = 1000; sc->et.et_min_period = (0x00000002LLU * SBT_1S) / sc->et.et_frequency; sc->et.et_max_period = (0x0000fffeLLU * SBT_1S) / sc->et.et_frequency; sc->et.et_start = jz4780_timer_start; sc->et.et_stop = jz4780_timer_stop; sc->et.et_priv = sc; et_register(&sc->et); sc->tc.tc_get_timecount = jz4780_get_timecount; sc->tc.tc_name = "JZ4780 OST"; sc->tc.tc_frequency = counter_freq; sc->tc.tc_counter_mask = ~0u; sc->tc.tc_quality = 1000; sc->tc.tc_priv = sc; tc_init(&sc->tc); /* Now when tc is initialized, allow DELAY to find it */ jz4780_timer_sc = sc; return (0); } static int jz4780_timer_detach(device_t dev) { return (EBUSY); } static device_method_t jz4780_timer_methods[] = { /* Device interface */ DEVMETHOD(device_probe, jz4780_timer_probe), DEVMETHOD(device_attach, jz4780_timer_attach), DEVMETHOD(device_detach, jz4780_timer_detach), DEVMETHOD_END }; static driver_t jz4780_timer_driver = { "timer", jz4780_timer_methods, sizeof(struct jz4780_timer_softc), }; static devclass_t jz4780_timer_devclass; EARLY_DRIVER_MODULE(timer, simplebus, jz4780_timer_driver, jz4780_timer_devclass, 0, 0, BUS_PASS_TIMER); void DELAY(int usec) { uint32_t counter; uint32_t delta, now, previous, remaining; /* Timer has not yet been initialized */ if (jz4780_timer_sc == NULL) { for (; usec > 0; usec--) for (counter = 200; counter > 0; counter--) { /* Prevent gcc from optimizing out the loop */ mips_rd_cause(); } return; } + TSENTER(); /* * Some of the other timers in the source tree do this calculation as: * * usec * ((sc->tc.tc_frequency / 1000000) + 1) * * which gives a fairly pessimistic result when tc_frequency is an exact * multiple of 1000000. Given the data type and typical values for * tc_frequency adding 999999 shouldn't overflow. */ remaining = usec * ((jz4780_timer_sc->tc.tc_frequency + 999999) / 1000000); /* * We add one since the first iteration may catch the counter just * as it is changing. */ remaining += 1; previous = jz4780_get_timecount(&jz4780_timer_sc->tc); for ( ; ; ) { now = jz4780_get_timecount(&jz4780_timer_sc->tc); /* * If the timer has rolled over, then we have the case: * * if (previous > now) { * delta = (0 - previous) + now * } * * which is really no different then the normal case. * Both cases are simply: * * delta = now - previous. */ delta = now - previous; if (delta >= remaining) break; previous = now; remaining -= delta; } + TSEXIT(); } void platform_initclocks(void) { } Index: head/sys/mips/mips/tick.c =================================================================== --- head/sys/mips/mips/tick.c (revision 327431) +++ head/sys/mips/mips/tick.c (revision 327432) @@ -1,400 +1,402 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006-2007 Bruce M. Simpson. * Copyright (c) 2003-2004 Juli Mallett. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Simple driver for the 32-bit interval counter built in to all * MIPS32 CPUs. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INTRNG #include #endif uint64_t counter_freq; struct timecounter *platform_timecounter; static DPCPU_DEFINE(uint32_t, cycles_per_tick); static uint32_t cycles_per_usec; static DPCPU_DEFINE(volatile uint32_t, counter_upper); static DPCPU_DEFINE(volatile uint32_t, counter_lower_last); static DPCPU_DEFINE(uint32_t, compare_ticks); static DPCPU_DEFINE(uint32_t, lost_ticks); struct clock_softc { int intr_rid; struct resource *intr_res; void *intr_handler; struct timecounter tc; struct eventtimer et; }; static struct clock_softc *softc; /* * Device methods */ static int clock_probe(device_t); static void clock_identify(driver_t *, device_t); static int clock_attach(device_t); static unsigned counter_get_timecount(struct timecounter *tc); void mips_timer_early_init(uint64_t clock_hz) { /* Initialize clock early so that we can use DELAY sooner */ counter_freq = clock_hz; cycles_per_usec = (clock_hz / (1000 * 1000)); } void platform_initclocks(void) { if (platform_timecounter != NULL) tc_init(platform_timecounter); } static uint64_t tick_ticker(void) { uint64_t ret; uint32_t ticktock; uint32_t t_lower_last, t_upper; /* * Disable preemption because we are working with cpu specific data. */ critical_enter(); /* * Note that even though preemption is disabled, interrupts are * still enabled. In particular there is a race with clock_intr() * reading the values of 'counter_upper' and 'counter_lower_last'. * * XXX this depends on clock_intr() being executed periodically * so that 'counter_upper' and 'counter_lower_last' are not stale. */ do { t_upper = DPCPU_GET(counter_upper); t_lower_last = DPCPU_GET(counter_lower_last); } while (t_upper != DPCPU_GET(counter_upper)); ticktock = mips_rd_count(); critical_exit(); /* COUNT register wrapped around */ if (ticktock < t_lower_last) t_upper++; ret = ((uint64_t)t_upper << 32) | ticktock; return (ret); } void mips_timer_init_params(uint64_t platform_counter_freq, int double_count) { /* * XXX: Do not use printf here: uart code 8250 may use DELAY so this * function should be called before cninit. */ counter_freq = platform_counter_freq; /* * XXX: Some MIPS32 cores update the Count register only every two * pipeline cycles. * We know this because of status registers in CP0, make it automatic. */ if (double_count != 0) counter_freq /= 2; cycles_per_usec = counter_freq / (1 * 1000 * 1000); set_cputicker(tick_ticker, counter_freq, 1); } static int sysctl_machdep_counter_freq(SYSCTL_HANDLER_ARGS) { int error; uint64_t freq; if (softc == NULL) return (EOPNOTSUPP); freq = counter_freq; error = sysctl_handle_64(oidp, &freq, sizeof(freq), req); if (error == 0 && req->newptr != NULL) { counter_freq = freq; softc->et.et_frequency = counter_freq; softc->tc.tc_frequency = counter_freq; } return (error); } SYSCTL_PROC(_machdep, OID_AUTO, counter_freq, CTLTYPE_U64 | CTLFLAG_RW, NULL, 0, sysctl_machdep_counter_freq, "QU", "Timecounter frequency in Hz"); static unsigned counter_get_timecount(struct timecounter *tc) { return (mips_rd_count()); } /* * Wait for about n microseconds (at least!). */ void DELAY(int n) { uint32_t cur, last, delta, usecs; + TSENTER(); /* * This works by polling the timer and counting the number of * microseconds that go by. */ last = mips_rd_count(); delta = usecs = 0; while (n > usecs) { cur = mips_rd_count(); /* Check to see if the timer has wrapped around. */ if (cur < last) delta += cur + (0xffffffff - last) + 1; else delta += cur - last; last = cur; if (delta >= cycles_per_usec) { usecs += delta / cycles_per_usec; delta %= cycles_per_usec; } } + TSEXIT(); } static int clock_start(struct eventtimer *et, sbintime_t first, sbintime_t period) { uint32_t fdiv, div, next; if (period != 0) { div = (et->et_frequency * period) >> 32; } else div = 0; if (first != 0) fdiv = (et->et_frequency * first) >> 32; else fdiv = div; DPCPU_SET(cycles_per_tick, div); next = mips_rd_count() + fdiv; DPCPU_SET(compare_ticks, next); mips_wr_compare(next); return (0); } static int clock_stop(struct eventtimer *et) { DPCPU_SET(cycles_per_tick, 0); mips_wr_compare(0xffffffff); return (0); } /* * Device section of file below */ static int clock_intr(void *arg) { struct clock_softc *sc = (struct clock_softc *)arg; uint32_t cycles_per_tick; uint32_t count, compare_last, compare_next, lost_ticks; cycles_per_tick = DPCPU_GET(cycles_per_tick); /* * Set next clock edge. */ count = mips_rd_count(); compare_last = DPCPU_GET(compare_ticks); if (cycles_per_tick > 0) { compare_next = count + cycles_per_tick; DPCPU_SET(compare_ticks, compare_next); mips_wr_compare(compare_next); } else /* In one-shot mode timer should be stopped after the event. */ mips_wr_compare(0xffffffff); /* COUNT register wrapped around */ if (count < DPCPU_GET(counter_lower_last)) { DPCPU_SET(counter_upper, DPCPU_GET(counter_upper) + 1); } DPCPU_SET(counter_lower_last, count); if (cycles_per_tick > 0) { /* * Account for the "lost time" between when the timer interrupt * fired and when 'clock_intr' actually started executing. */ lost_ticks = DPCPU_GET(lost_ticks); lost_ticks += count - compare_last; /* * If the COUNT and COMPARE registers are no longer in sync * then make up some reasonable value for the 'lost_ticks'. * * This could happen, for e.g., after we resume normal * operations after exiting the debugger. */ if (lost_ticks > 2 * cycles_per_tick) lost_ticks = cycles_per_tick; while (lost_ticks >= cycles_per_tick) { if (sc->et.et_active) sc->et.et_event_cb(&sc->et, sc->et.et_arg); lost_ticks -= cycles_per_tick; } DPCPU_SET(lost_ticks, lost_ticks); } if (sc->et.et_active) sc->et.et_event_cb(&sc->et, sc->et.et_arg); return (FILTER_HANDLED); } static int clock_probe(device_t dev) { device_set_desc(dev, "Generic MIPS32 ticker"); return (BUS_PROBE_NOWILDCARD); } static void clock_identify(driver_t * drv, device_t parent) { BUS_ADD_CHILD(parent, 0, "clock", 0); } static int clock_attach(device_t dev) { struct clock_softc *sc; #ifndef INTRNG int error; #endif if (device_get_unit(dev) != 0) panic("can't attach more clocks"); softc = sc = device_get_softc(dev); #ifdef INTRNG cpu_establish_hardintr("clock", clock_intr, NULL, sc, 5, INTR_TYPE_CLK, NULL); #else sc->intr_rid = 0; sc->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->intr_rid, 5, 5, 1, RF_ACTIVE); if (sc->intr_res == NULL) { device_printf(dev, "failed to allocate irq\n"); return (ENXIO); } error = bus_setup_intr(dev, sc->intr_res, INTR_TYPE_CLK, clock_intr, NULL, sc, &sc->intr_handler); if (error != 0) { device_printf(dev, "bus_setup_intr returned %d\n", error); return (error); } #endif sc->tc.tc_get_timecount = counter_get_timecount; sc->tc.tc_counter_mask = 0xffffffff; sc->tc.tc_frequency = counter_freq; sc->tc.tc_name = "MIPS32"; sc->tc.tc_quality = 800; sc->tc.tc_priv = sc; tc_init(&sc->tc); sc->et.et_name = "MIPS32"; sc->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU; sc->et.et_quality = 800; sc->et.et_frequency = counter_freq; sc->et.et_min_period = 0x00004000LLU; /* To be safe. */ sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency; sc->et.et_start = clock_start; sc->et.et_stop = clock_stop; sc->et.et_priv = sc; et_register(&sc->et); return (0); } static device_method_t clock_methods[] = { /* Device interface */ DEVMETHOD(device_probe, clock_probe), DEVMETHOD(device_identify, clock_identify), DEVMETHOD(device_attach, clock_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), {0, 0} }; static driver_t clock_driver = { "clock", clock_methods, sizeof(struct clock_softc), }; static devclass_t clock_devclass; DRIVER_MODULE(clock, nexus, clock_driver, clock_devclass, 0, 0); Index: head/sys/mips/nlm/tick.c =================================================================== --- head/sys/mips/nlm/tick.c (revision 327431) +++ head/sys/mips/nlm/tick.c (revision 327432) @@ -1,385 +1,387 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights * reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * NETLOGIC_BSD */ /* * Simple driver for the 32-bit interval counter built in to all * MIPS32 CPUs. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include uint64_t counter_freq; struct timecounter *platform_timecounter; static DPCPU_DEFINE(uint32_t, cycles_per_tick); static uint32_t cycles_per_usec; static DPCPU_DEFINE(volatile uint32_t, counter_upper); static DPCPU_DEFINE(volatile uint32_t, counter_lower_last); static DPCPU_DEFINE(uint32_t, compare_ticks); static DPCPU_DEFINE(uint32_t, lost_ticks); struct clock_softc { int intr_rid; struct resource *intr_res; void *intr_handler; struct timecounter tc; struct eventtimer et; }; static struct clock_softc *softc; /* * Device methods */ static int clock_probe(device_t); static void clock_identify(driver_t *, device_t); static int clock_attach(device_t); static unsigned counter_get_timecount(struct timecounter *tc); void mips_timer_early_init(uint64_t clock_hz) { /* Initialize clock early so that we can use DELAY sooner */ counter_freq = clock_hz; cycles_per_usec = (clock_hz / (1000 * 1000)); } void platform_initclocks(void) { if (platform_timecounter != NULL) tc_init(platform_timecounter); } static uint64_t tick_ticker(void) { uint64_t ret; uint32_t ticktock; uint32_t t_lower_last, t_upper; /* * Disable preemption because we are working with cpu specific data. */ critical_enter(); /* * Note that even though preemption is disabled, interrupts are * still enabled. In particular there is a race with clock_intr() * reading the values of 'counter_upper' and 'counter_lower_last'. * * XXX this depends on clock_intr() being executed periodically * so that 'counter_upper' and 'counter_lower_last' are not stale. */ do { t_upper = DPCPU_GET(counter_upper); t_lower_last = DPCPU_GET(counter_lower_last); } while (t_upper != DPCPU_GET(counter_upper)); ticktock = mips_rd_count(); critical_exit(); /* COUNT register wrapped around */ if (ticktock < t_lower_last) t_upper++; ret = ((uint64_t)t_upper << 32) | ticktock; return (ret); } void mips_timer_init_params(uint64_t platform_counter_freq, int double_count) { /* * XXX: Do not use printf here: uart code 8250 may use DELAY so this * function should be called before cninit. */ counter_freq = platform_counter_freq; /* * XXX: Some MIPS32 cores update the Count register only every two * pipeline cycles. * We know this because of status registers in CP0, make it automatic. */ if (double_count != 0) counter_freq /= 2; cycles_per_usec = counter_freq / (1 * 1000 * 1000); set_cputicker(tick_ticker, counter_freq, 1); } static int sysctl_machdep_counter_freq(SYSCTL_HANDLER_ARGS) { int error; uint64_t freq; if (softc == NULL) return (EOPNOTSUPP); freq = counter_freq; error = sysctl_handle_64(oidp, &freq, sizeof(freq), req); if (error == 0 && req->newptr != NULL) { counter_freq = freq; softc->et.et_frequency = counter_freq; softc->tc.tc_frequency = counter_freq; } return (error); } SYSCTL_PROC(_machdep, OID_AUTO, counter_freq, CTLTYPE_U64 | CTLFLAG_RW, NULL, 0, sysctl_machdep_counter_freq, "QU", "Timecounter frequency in Hz"); static unsigned counter_get_timecount(struct timecounter *tc) { return (mips_rd_count()); } /* * Wait for about n microseconds (at least!). */ void DELAY(int n) { uint32_t cur, last, delta, usecs; + TSENTER(); /* * This works by polling the timer and counting the number of * microseconds that go by. */ last = mips_rd_count(); delta = usecs = 0; while (n > usecs) { cur = mips_rd_count(); /* Check to see if the timer has wrapped around. */ if (cur < last) delta += cur + (0xffffffff - last) + 1; else delta += cur - last; last = cur; if (delta >= cycles_per_usec) { usecs += delta / cycles_per_usec; delta %= cycles_per_usec; } } + TSEXIT(); } static int clock_start(struct eventtimer *et, sbintime_t first, sbintime_t period) { uint32_t fdiv, div, next; if (period != 0) div = (et->et_frequency * period) >> 32; else div = 0; if (first != 0) fdiv = (et->et_frequency * first) >> 32; else fdiv = div; DPCPU_SET(cycles_per_tick, div); next = mips_rd_count() + fdiv; DPCPU_SET(compare_ticks, next); mips_wr_compare(next); return (0); } static int clock_stop(struct eventtimer *et) { DPCPU_SET(cycles_per_tick, 0); mips_wr_compare(0xffffffff); return (0); } /* * Device section of file below */ static int clock_intr(void *arg) { struct clock_softc *sc = (struct clock_softc *)arg; uint32_t cycles_per_tick; uint32_t count, compare_last, compare_next, lost_ticks; cycles_per_tick = DPCPU_GET(cycles_per_tick); /* * Set next clock edge. */ count = mips_rd_count(); compare_last = DPCPU_GET(compare_ticks); if (cycles_per_tick > 0) { compare_next = count + cycles_per_tick; DPCPU_SET(compare_ticks, compare_next); mips_wr_compare(compare_next); } else /* In one-shot mode timer should be stopped after the event. */ mips_wr_compare(0xffffffff); /* COUNT register wrapped around */ if (count < DPCPU_GET(counter_lower_last)) { DPCPU_SET(counter_upper, DPCPU_GET(counter_upper) + 1); } DPCPU_SET(counter_lower_last, count); if (cycles_per_tick > 0) { /* * Account for the "lost time" between when the timer interrupt * fired and when 'clock_intr' actually started executing. */ lost_ticks = DPCPU_GET(lost_ticks); lost_ticks += count - compare_last; /* * If the COUNT and COMPARE registers are no longer in sync * then make up some reasonable value for the 'lost_ticks'. * * This could happen, for e.g., after we resume normal * operations after exiting the debugger. */ if (lost_ticks > 2 * cycles_per_tick) lost_ticks = cycles_per_tick; while (lost_ticks >= cycles_per_tick) { if (sc->et.et_active) sc->et.et_event_cb(&sc->et, sc->et.et_arg); lost_ticks -= cycles_per_tick; } DPCPU_SET(lost_ticks, lost_ticks); } if (sc->et.et_active) sc->et.et_event_cb(&sc->et, sc->et.et_arg); return (FILTER_HANDLED); } static int clock_probe(device_t dev) { device_set_desc(dev, "Generic MIPS32 ticker"); return (BUS_PROBE_NOWILDCARD); } static void clock_identify(driver_t * drv, device_t parent) { BUS_ADD_CHILD(parent, 0, "clock", 0); } static int clock_attach(device_t dev) { struct clock_softc *sc; if (device_get_unit(dev) != 0) panic("can't attach more clocks"); softc = sc = device_get_softc(dev); cpu_establish_hardintr("compare", clock_intr, NULL, sc, IRQ_TIMER, INTR_TYPE_CLK, &sc->intr_handler); sc->tc.tc_get_timecount = counter_get_timecount; sc->tc.tc_counter_mask = 0xffffffff; sc->tc.tc_frequency = counter_freq; sc->tc.tc_name = "MIPS32"; sc->tc.tc_quality = 800; sc->tc.tc_priv = sc; tc_init(&sc->tc); sc->et.et_name = "MIPS32"; #if 0 sc->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU; #endif sc->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_PERCPU; sc->et.et_quality = 800; sc->et.et_frequency = counter_freq; sc->et.et_min_period = 0x00004000LLU; /* To be safe. */ sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency; sc->et.et_start = clock_start; sc->et.et_stop = clock_stop; sc->et.et_priv = sc; et_register(&sc->et); return (0); } static device_method_t clock_methods[] = { /* Device interface */ DEVMETHOD(device_probe, clock_probe), DEVMETHOD(device_identify, clock_identify), DEVMETHOD(device_attach, clock_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), {0, 0} }; static driver_t clock_driver = { "clock", clock_methods, sizeof(struct clock_softc), }; static devclass_t clock_devclass; DRIVER_MODULE(clock, nexus, clock_driver, clock_devclass, 0, 0); Index: head/sys/mips/rmi/tick.c =================================================================== --- head/sys/mips/rmi/tick.c (revision 327431) +++ head/sys/mips/rmi/tick.c (revision 327432) @@ -1,379 +1,381 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006-2007 Bruce M. Simpson. * Copyright (c) 2003-2004 Juli Mallett. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Simple driver for the 32-bit interval counter built in to all * MIPS32 CPUs. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include uint64_t counter_freq; struct timecounter *platform_timecounter; static DPCPU_DEFINE(uint32_t, cycles_per_tick); static uint32_t cycles_per_usec; static DPCPU_DEFINE(volatile uint32_t, counter_upper); static DPCPU_DEFINE(volatile uint32_t, counter_lower_last); static DPCPU_DEFINE(uint32_t, compare_ticks); static DPCPU_DEFINE(uint32_t, lost_ticks); struct clock_softc { int intr_rid; struct resource *intr_res; void *intr_handler; struct timecounter tc; struct eventtimer et; }; static struct clock_softc *softc; /* * Device methods */ static int clock_probe(device_t); static void clock_identify(driver_t *, device_t); static int clock_attach(device_t); static unsigned counter_get_timecount(struct timecounter *tc); void mips_timer_early_init(uint64_t clock_hz) { /* Initialize clock early so that we can use DELAY sooner */ counter_freq = clock_hz; cycles_per_usec = (clock_hz / (1000 * 1000)); } void platform_initclocks(void) { if (platform_timecounter != NULL) tc_init(platform_timecounter); } static uint64_t tick_ticker(void) { uint64_t ret; uint32_t ticktock; uint32_t t_lower_last, t_upper; /* * Disable preemption because we are working with cpu specific data. */ critical_enter(); /* * Note that even though preemption is disabled, interrupts are * still enabled. In particular there is a race with clock_intr() * reading the values of 'counter_upper' and 'counter_lower_last'. * * XXX this depends on clock_intr() being executed periodically * so that 'counter_upper' and 'counter_lower_last' are not stale. */ do { t_upper = DPCPU_GET(counter_upper); t_lower_last = DPCPU_GET(counter_lower_last); } while (t_upper != DPCPU_GET(counter_upper)); ticktock = mips_rd_count(); critical_exit(); /* COUNT register wrapped around */ if (ticktock < t_lower_last) t_upper++; ret = ((uint64_t)t_upper << 32) | ticktock; return (ret); } void mips_timer_init_params(uint64_t platform_counter_freq, int double_count) { /* * XXX: Do not use printf here: uart code 8250 may use DELAY so this * function should be called before cninit. */ counter_freq = platform_counter_freq; /* * XXX: Some MIPS32 cores update the Count register only every two * pipeline cycles. * We know this because of status registers in CP0, make it automatic. */ if (double_count != 0) counter_freq /= 2; cycles_per_usec = counter_freq / (1 * 1000 * 1000); set_cputicker(tick_ticker, counter_freq, 1); } static int sysctl_machdep_counter_freq(SYSCTL_HANDLER_ARGS) { int error; uint64_t freq; if (softc == NULL) return (EOPNOTSUPP); freq = counter_freq; error = sysctl_handle_64(oidp, &freq, sizeof(freq), req); if (error == 0 && req->newptr != NULL) { counter_freq = freq; softc->et.et_frequency = counter_freq; softc->tc.tc_frequency = counter_freq; } return (error); } SYSCTL_PROC(_machdep, OID_AUTO, counter_freq, CTLTYPE_U64 | CTLFLAG_RW, NULL, 0, sysctl_machdep_counter_freq, "QU", "Timecounter frequency in Hz"); static unsigned counter_get_timecount(struct timecounter *tc) { return (mips_rd_count()); } /* * Wait for about n microseconds (at least!). */ void DELAY(int n) { uint32_t cur, last, delta, usecs; + TSENTER(); /* * This works by polling the timer and counting the number of * microseconds that go by. */ last = mips_rd_count(); delta = usecs = 0; while (n > usecs) { cur = mips_rd_count(); /* Check to see if the timer has wrapped around. */ if (cur < last) delta += cur + (0xffffffff - last) + 1; else delta += cur - last; last = cur; if (delta >= cycles_per_usec) { usecs += delta / cycles_per_usec; delta %= cycles_per_usec; } } + TSEXIT(); } static int clock_start(struct eventtimer *et, sbintime_t first, sbintime_t period) { uint32_t fdiv, div, next; if (period != 0) div = (et->et_frequency * period) >> 32; else div = 0; if (first != 0) fdiv = (et->et_frequency * first) >> 32; else fdiv = div; DPCPU_SET(cycles_per_tick, div); next = mips_rd_count() + fdiv; DPCPU_SET(compare_ticks, next); mips_wr_compare(next); return (0); } static int clock_stop(struct eventtimer *et) { DPCPU_SET(cycles_per_tick, 0); mips_wr_compare(0xffffffff); return (0); } /* * Device section of file below */ static int clock_intr(void *arg) { struct clock_softc *sc = (struct clock_softc *)arg; uint32_t cycles_per_tick; uint32_t count, compare_last, compare_next, lost_ticks; cycles_per_tick = DPCPU_GET(cycles_per_tick); /* * Set next clock edge. */ count = mips_rd_count(); compare_last = DPCPU_GET(compare_ticks); if (cycles_per_tick > 0) { compare_next = count + cycles_per_tick; DPCPU_SET(compare_ticks, compare_next); mips_wr_compare(compare_next); } else /* In one-shot mode timer should be stopped after the event. */ mips_wr_compare(0xffffffff); /* COUNT register wrapped around */ if (count < DPCPU_GET(counter_lower_last)) { DPCPU_SET(counter_upper, DPCPU_GET(counter_upper) + 1); } DPCPU_SET(counter_lower_last, count); if (cycles_per_tick > 0) { /* * Account for the "lost time" between when the timer interrupt * fired and when 'clock_intr' actually started executing. */ lost_ticks = DPCPU_GET(lost_ticks); lost_ticks += count - compare_last; /* * If the COUNT and COMPARE registers are no longer in sync * then make up some reasonable value for the 'lost_ticks'. * * This could happen, for e.g., after we resume normal * operations after exiting the debugger. */ if (lost_ticks > 2 * cycles_per_tick) lost_ticks = cycles_per_tick; while (lost_ticks >= cycles_per_tick) { if (sc->et.et_active) sc->et.et_event_cb(&sc->et, sc->et.et_arg); lost_ticks -= cycles_per_tick; } DPCPU_SET(lost_ticks, lost_ticks); } if (sc->et.et_active) sc->et.et_event_cb(&sc->et, sc->et.et_arg); return (FILTER_HANDLED); } static int clock_probe(device_t dev) { device_set_desc(dev, "Generic MIPS32 ticker"); return (BUS_PROBE_NOWILDCARD); } static void clock_identify(driver_t * drv, device_t parent) { BUS_ADD_CHILD(parent, 0, "clock", 0); } static int clock_attach(device_t dev) { struct clock_softc *sc; if (device_get_unit(dev) != 0) panic("can't attach more clocks"); softc = sc = device_get_softc(dev); cpu_establish_hardintr("compare", clock_intr, NULL, sc, IRQ_TIMER, INTR_TYPE_CLK, &sc->intr_handler); sc->tc.tc_get_timecount = counter_get_timecount; sc->tc.tc_counter_mask = 0xffffffff; sc->tc.tc_frequency = counter_freq; sc->tc.tc_name = "MIPS32"; sc->tc.tc_quality = 800; sc->tc.tc_priv = sc; tc_init(&sc->tc); sc->et.et_name = "MIPS32"; sc->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU; sc->et.et_quality = 800; sc->et.et_frequency = counter_freq; sc->et.et_min_period = 0x00004000LLU; /* To be safe. */ sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency; sc->et.et_start = clock_start; sc->et.et_stop = clock_stop; sc->et.et_priv = sc; et_register(&sc->et); return (0); } static device_method_t clock_methods[] = { /* Device interface */ DEVMETHOD(device_probe, clock_probe), DEVMETHOD(device_identify, clock_identify), DEVMETHOD(device_attach, clock_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), {0, 0} }; static driver_t clock_driver = { "clock", clock_methods, sizeof(struct clock_softc), }; static devclass_t clock_devclass; DRIVER_MODULE(clock, nexus, clock_driver, clock_devclass, 0, 0); Index: head/sys/powerpc/powerpc/clock.c =================================================================== --- head/sys/powerpc/powerpc/clock.c (revision 327431) +++ head/sys/powerpc/powerpc/clock.c (revision 327432) @@ -1,313 +1,315 @@ /*- * SPDX-License-Identifier: BSD-4-Clause AND BSD-2-Clause-FreeBSD * * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: clock.c,v 1.9 2000/01/19 02:52:19 msaitoh Exp $ */ /* * Copyright (C) 2001 Benno Rice. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Initially we assume a processor with a bus frequency of 12.5 MHz. */ static int initialized = 0; static u_long ns_per_tick = 80; static u_long ticks_per_sec = 12500000; static u_long *decr_counts[MAXCPU]; static int decr_et_start(struct eventtimer *et, sbintime_t first, sbintime_t period); static int decr_et_stop(struct eventtimer *et); static timecounter_get_t decr_get_timecount; struct decr_state { int mode; /* 0 - off, 1 - periodic, 2 - one-shot. */ int32_t div; /* Periodic divisor. */ }; static DPCPU_DEFINE(struct decr_state, decr_state); static struct eventtimer decr_et; static struct timecounter decr_tc = { decr_get_timecount, /* get_timecount */ 0, /* no poll_pps */ ~0u, /* counter_mask */ 0, /* frequency */ "timebase" /* name */ }; /* * Decrementer interrupt handler. */ void decr_intr(struct trapframe *frame) { struct decr_state *s = DPCPU_PTR(decr_state); int nticks = 0; int32_t val; if (!initialized) return; (*decr_counts[curcpu])++; #ifdef BOOKE /* * Interrupt handler must reset DIS to avoid getting another * interrupt once EE is enabled. */ mtspr(SPR_TSR, TSR_DIS); #endif if (s->mode == 1) { /* * Based on the actual time delay since the last decrementer * reload, we arrange for earlier interrupt next time. */ __asm ("mfdec %0" : "=r"(val)); while (val < 0) { val += s->div; nticks++; } mtdec(val); } else if (s->mode == 2) { nticks = 1; decr_et_stop(NULL); } else if (s->mode == 0) { /* Potemkin timer ran out without an event. Just reset it. */ decr_et_stop(NULL); } while (nticks-- > 0) { if (decr_et.et_active) decr_et.et_event_cb(&decr_et, decr_et.et_arg); } } void cpu_initclocks(void) { decr_tc_init(); cpu_initclocks_bsp(); } /* * BSP early initialization. */ void decr_init(void) { struct cpuref cpu; char buf[32]; /* * Check the BSP's timebase frequency. Sometimes we can't find the BSP, * so fall back to the first CPU in this case. */ if (platform_smp_get_bsp(&cpu) != 0) platform_smp_first_cpu(&cpu); ticks_per_sec = platform_timebase_freq(&cpu); ns_per_tick = 1000000000 / ticks_per_sec; set_cputicker(mftb, ticks_per_sec, 0); snprintf(buf, sizeof(buf), "cpu%d:decrementer", curcpu); intrcnt_add(buf, &decr_counts[curcpu]); decr_et_stop(NULL); initialized = 1; } #ifdef SMP /* * AP early initialization. */ void decr_ap_init(void) { char buf[32]; snprintf(buf, sizeof(buf), "cpu%d:decrementer", curcpu); intrcnt_add(buf, &decr_counts[curcpu]); decr_et_stop(NULL); } #endif /* * Final initialization. */ void decr_tc_init(void) { decr_tc.tc_frequency = ticks_per_sec; tc_init(&decr_tc); decr_et.et_name = "decrementer"; decr_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU; decr_et.et_quality = 1000; decr_et.et_frequency = ticks_per_sec; decr_et.et_min_period = (0x00000002LLU << 32) / ticks_per_sec; decr_et.et_max_period = (0x7fffffffLLU << 32) / ticks_per_sec; decr_et.et_start = decr_et_start; decr_et.et_stop = decr_et_stop; decr_et.et_priv = NULL; et_register(&decr_et); } /* * Event timer start method. */ static int decr_et_start(struct eventtimer *et, sbintime_t first, sbintime_t period) { struct decr_state *s = DPCPU_PTR(decr_state); uint32_t fdiv; #ifdef BOOKE uint32_t tcr; #endif if (period != 0) { s->mode = 1; s->div = (decr_et.et_frequency * period) >> 32; } else { s->mode = 2; s->div = 0; } if (first != 0) fdiv = (decr_et.et_frequency * first) >> 32; else fdiv = s->div; #ifdef BOOKE tcr = mfspr(SPR_TCR); tcr |= TCR_DIE; if (s->mode == 1) { mtspr(SPR_DECAR, s->div); tcr |= TCR_ARE; } else tcr &= ~TCR_ARE; mtdec(fdiv); mtspr(SPR_TCR, tcr); #else mtdec(fdiv); #endif return (0); } /* * Event timer stop method. */ static int decr_et_stop(struct eventtimer *et) { struct decr_state *s = DPCPU_PTR(decr_state); #ifdef BOOKE uint32_t tcr; #endif s->mode = 0; s->div = 0x7fffffff; #ifdef BOOKE tcr = mfspr(SPR_TCR); tcr &= ~(TCR_DIE | TCR_ARE); mtspr(SPR_TCR, tcr); #else mtdec(s->div); #endif return (0); } /* * Timecounter get method. */ static unsigned decr_get_timecount(struct timecounter *tc) { return (mftb()); } /* * Wait for about n microseconds (at least!). */ void DELAY(int n) { u_quad_t tb, ttb; + TSENTER(); tb = mftb(); ttb = tb + howmany(n * 1000, ns_per_tick); while (tb < ttb) tb = mftb(); + TSEXIT(); } Index: head/sys/riscv/riscv/timer.c =================================================================== --- head/sys/riscv/riscv/timer.c (revision 327431) +++ head/sys/riscv/riscv/timer.c (revision 327432) @@ -1,286 +1,288 @@ /*- * Copyright (c) 2015-2017 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * RISC-V Timer */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DEFAULT_FREQ 10000000 #define TIMER_COUNTS 0x00 #define TIMER_MTIMECMP(cpu) (cpu * 8) struct riscv_timer_softc { void *ih; uint32_t clkfreq; struct eventtimer et; int intr_rid; struct resource *intr_res; }; static struct riscv_timer_softc *riscv_timer_sc = NULL; static timecounter_get_t riscv_timer_get_timecount; static struct timecounter riscv_timer_timecount = { .tc_name = "RISC-V Timecounter", .tc_get_timecount = riscv_timer_get_timecount, .tc_poll_pps = NULL, .tc_counter_mask = ~0u, .tc_frequency = 0, .tc_quality = 1000, }; static inline uint64_t get_cycles(void) { uint64_t cycles; __asm __volatile("rdtime %0" : "=r" (cycles)); return (cycles); } static long get_counts(struct riscv_timer_softc *sc) { uint64_t counts; counts = get_cycles(); return (counts); } static unsigned riscv_timer_get_timecount(struct timecounter *tc) { struct riscv_timer_softc *sc; sc = tc->tc_priv; return (get_counts(sc)); } static int riscv_timer_start(struct eventtimer *et, sbintime_t first, sbintime_t period) { uint64_t counts; if (first != 0) { counts = ((uint32_t)et->et_frequency * first) >> 32; sbi_set_timer(get_cycles() + counts); csr_set(sie, SIE_STIE); return (0); } return (EINVAL); } static int riscv_timer_stop(struct eventtimer *et) { /* TODO */ return (0); } static int riscv_timer_intr(void *arg) { struct riscv_timer_softc *sc; sc = (struct riscv_timer_softc *)arg; csr_clear(sip, SIP_STIP); if (sc->et.et_active) sc->et.et_event_cb(&sc->et, sc->et.et_arg); return (FILTER_HANDLED); } static int riscv_timer_probe(device_t dev) { device_set_desc(dev, "RISC-V Timer"); return (BUS_PROBE_DEFAULT); } static int riscv_timer_attach(device_t dev) { struct riscv_timer_softc *sc; int error; sc = device_get_softc(dev); if (riscv_timer_sc) return (ENXIO); if (device_get_unit(dev) != 0) return ENXIO; sc->clkfreq = DEFAULT_FREQ; if (sc->clkfreq == 0) { device_printf(dev, "No clock frequency specified\n"); return (ENXIO); } riscv_timer_sc = sc; sc->intr_rid = 0; sc->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->intr_rid, IRQ_TIMER_SUPERVISOR, IRQ_TIMER_SUPERVISOR, 1, RF_ACTIVE); if (sc->intr_res == NULL) { device_printf(dev, "failed to allocate irq\n"); return (ENXIO); } /* Setup IRQs handler */ error = bus_setup_intr(dev, sc->intr_res, INTR_TYPE_CLK, riscv_timer_intr, NULL, sc, &sc->ih); if (error) { device_printf(dev, "Unable to alloc int resource.\n"); return (ENXIO); } riscv_timer_timecount.tc_frequency = sc->clkfreq; riscv_timer_timecount.tc_priv = sc; tc_init(&riscv_timer_timecount); sc->et.et_name = "RISC-V Eventtimer"; sc->et.et_flags = ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU; sc->et.et_quality = 1000; sc->et.et_frequency = sc->clkfreq; sc->et.et_min_period = (0x00000002LLU << 32) / sc->et.et_frequency; sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency; sc->et.et_start = riscv_timer_start; sc->et.et_stop = riscv_timer_stop; sc->et.et_priv = sc; et_register(&sc->et); return (0); } static device_method_t riscv_timer_methods[] = { DEVMETHOD(device_probe, riscv_timer_probe), DEVMETHOD(device_attach, riscv_timer_attach), { 0, 0 } }; static driver_t riscv_timer_driver = { "timer", riscv_timer_methods, sizeof(struct riscv_timer_softc), }; static devclass_t riscv_timer_devclass; EARLY_DRIVER_MODULE(timer, nexus, riscv_timer_driver, riscv_timer_devclass, 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE); void DELAY(int usec) { int64_t counts, counts_per_usec; uint64_t first, last; /* * Check the timers are setup, if not just * use a for loop for the meantime */ if (riscv_timer_sc == NULL) { for (; usec > 0; usec--) for (counts = 200; counts > 0; counts--) /* * Prevent the compiler from optimizing * out the loop */ cpufunc_nullop(); return; } + TSENTER(); /* Get the number of times to count */ counts_per_usec = ((riscv_timer_timecount.tc_frequency / 1000000) + 1); /* * Clamp the timeout at a maximum value (about 32 seconds with * a 66MHz clock). *Nobody* should be delay()ing for anywhere * near that length of time and if they are, they should be hung * out to dry. */ if (usec >= (0x80000000U / counts_per_usec)) counts = (0x80000000U / counts_per_usec) - 1; else counts = usec * counts_per_usec; first = get_counts(riscv_timer_sc); while (counts > 0) { last = get_counts(riscv_timer_sc); counts -= (int64_t)(last - first); first = last; } + TSEXIT(); } Index: head/sys/sparc64/sparc64/clock.c =================================================================== --- head/sys/sparc64/sparc64/clock.c (revision 327431) +++ head/sys/sparc64/sparc64/clock.c (revision 327432) @@ -1,61 +1,63 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2001 Jake Burkholder. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include void DELAY(int usec) { u_long end; if (usec < 0) return; + TSENTER(); /* * We avoid being migrated to another CPU with a possibly * unsynchronized TICK timer while spinning. */ sched_pin(); end = rd(tick) + (u_long)usec * PCPU_GET(clock) / 1000000; while (rd(tick) < end) cpu_spinwait(); sched_unpin(); + TSEXIT(); } Index: head/sys/x86/x86/delay.c =================================================================== --- head/sys/x86/x86/delay.c (revision 327431) +++ head/sys/x86/x86/delay.c (revision 327432) @@ -1,108 +1,112 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1990 The Regents of the University of California. * Copyright (c) 2010 Alexander Motin * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz and Don Ahn. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)clock.c 7.2 (Berkeley) 5/12/91 */ #include __FBSDID("$FreeBSD$"); /* Generic x86 routines to handle delay */ #include #include #include #include #include #include #include #include #include static u_int get_tsc(__unused struct timecounter *tc) { return (rdtsc32()); } static int delay_tc(int n) { struct timecounter *tc; timecounter_get_t *func; uint64_t end, freq, now; u_int last, mask, u; tc = timecounter; freq = atomic_load_acq_64(&tsc_freq); if (tsc_is_invariant && freq != 0) { func = get_tsc; mask = ~0u; } else { if (tc->tc_quality <= 0) return (0); func = tc->tc_get_timecount; mask = tc->tc_counter_mask; freq = tc->tc_frequency; } now = 0; end = freq * n / 1000000; if (func == get_tsc) sched_pin(); last = func(tc) & mask; do { cpu_spinwait(); u = func(tc) & mask; if (u < last) now += mask - last + u + 1; else now += u - last; last = u; } while (now < end); if (func == get_tsc) sched_unpin(); return (1); } void DELAY(int n) { - if (delay_tc(n)) + TSENTER(); + if (delay_tc(n)) { + TSEXIT(); return; + } init_ops.early_delay(n); + TSEXIT(); }