Index: head/sys/arm/allwinner/aw_gpio.c =================================================================== --- head/sys/arm/allwinner/aw_gpio.c (revision 368140) +++ head/sys/arm/allwinner/aw_gpio.c (revision 368141) @@ -1,1498 +1,1487 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013 Ganbold Tsagaankhuu * Copyright (c) 2012 Oleksandr Tymoshenko * Copyright (c) 2012 Luiz Otavio O Souza. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__aarch64__) #include "opt_soc.h" #endif -#ifdef INTRNG #include "pic_if.h" -#endif - #include "gpio_if.h" #define AW_GPIO_DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | \ GPIO_PIN_PULLUP | GPIO_PIN_PULLDOWN); #define AW_GPIO_INTR_CAPS (GPIO_INTR_LEVEL_LOW | GPIO_INTR_LEVEL_HIGH | \ GPIO_INTR_EDGE_RISING | GPIO_INTR_EDGE_FALLING | GPIO_INTR_EDGE_BOTH) #define AW_GPIO_NONE 0 #define AW_GPIO_PULLUP 1 #define AW_GPIO_PULLDOWN 2 #define AW_GPIO_INPUT 0 #define AW_GPIO_OUTPUT 1 #define AW_GPIO_DRV_MASK 0x3 #define AW_GPIO_PUD_MASK 0x3 #define AW_PINCTRL 1 #define AW_R_PINCTRL 2 struct aw_gpio_conf { struct allwinner_padconf *padconf; const char *banks; }; /* Defined in aw_padconf.c */ #ifdef SOC_ALLWINNER_A10 extern struct allwinner_padconf a10_padconf; struct aw_gpio_conf a10_gpio_conf = { .padconf = &a10_padconf, .banks = "abcdefghi", }; #endif /* Defined in a13_padconf.c */ #ifdef SOC_ALLWINNER_A13 extern struct allwinner_padconf a13_padconf; struct aw_gpio_conf a13_gpio_conf = { .padconf = &a13_padconf, .banks = "bcdefg", }; #endif /* Defined in a20_padconf.c */ #ifdef SOC_ALLWINNER_A20 extern struct allwinner_padconf a20_padconf; struct aw_gpio_conf a20_gpio_conf = { .padconf = &a20_padconf, .banks = "abcdefghi", }; #endif /* Defined in a31_padconf.c */ #ifdef SOC_ALLWINNER_A31 extern struct allwinner_padconf a31_padconf; struct aw_gpio_conf a31_gpio_conf = { .padconf = &a31_padconf, .banks = "abcdefgh", }; #endif /* Defined in a31s_padconf.c */ #ifdef SOC_ALLWINNER_A31S extern struct allwinner_padconf a31s_padconf; struct aw_gpio_conf a31s_gpio_conf = { .padconf = &a31s_padconf, .banks = "abcdefgh", }; #endif #if defined(SOC_ALLWINNER_A31) || defined(SOC_ALLWINNER_A31S) extern struct allwinner_padconf a31_r_padconf; struct aw_gpio_conf a31_r_gpio_conf = { .padconf = &a31_r_padconf, .banks = "lm", }; #endif /* Defined in a33_padconf.c */ #ifdef SOC_ALLWINNER_A33 extern struct allwinner_padconf a33_padconf; struct aw_gpio_conf a33_gpio_conf = { .padconf = &a33_padconf, .banks = "bcdefgh", }; #endif /* Defined in h3_padconf.c */ #if defined(SOC_ALLWINNER_H3) || defined(SOC_ALLWINNER_H5) extern struct allwinner_padconf h3_padconf; extern struct allwinner_padconf h3_r_padconf; struct aw_gpio_conf h3_gpio_conf = { .padconf = &h3_padconf, .banks = "acdefg", }; struct aw_gpio_conf h3_r_gpio_conf = { .padconf = &h3_r_padconf, .banks = "l", }; #endif /* Defined in a83t_padconf.c */ #ifdef SOC_ALLWINNER_A83T extern struct allwinner_padconf a83t_padconf; extern struct allwinner_padconf a83t_r_padconf; struct aw_gpio_conf a83t_gpio_conf = { .padconf = &a83t_padconf, .banks = "bcdefgh" }; struct aw_gpio_conf a83t_r_gpio_conf = { .padconf = &a83t_r_padconf, .banks = "l", }; #endif /* Defined in a64_padconf.c */ #ifdef SOC_ALLWINNER_A64 extern struct allwinner_padconf a64_padconf; extern struct allwinner_padconf a64_r_padconf; struct aw_gpio_conf a64_gpio_conf = { .padconf = &a64_padconf, .banks = "bcdefgh", }; struct aw_gpio_conf a64_r_gpio_conf = { .padconf = &a64_r_padconf, .banks = "l", }; #endif /* Defined in h6_padconf.c */ #ifdef SOC_ALLWINNER_H6 extern struct allwinner_padconf h6_padconf; extern struct allwinner_padconf h6_r_padconf; struct aw_gpio_conf h6_gpio_conf = { .padconf = &h6_padconf, .banks = "cdfgh", }; struct aw_gpio_conf h6_r_gpio_conf = { .padconf = &h6_r_padconf, .banks = "lm", }; #endif static struct ofw_compat_data compat_data[] = { #ifdef SOC_ALLWINNER_A10 {"allwinner,sun4i-a10-pinctrl", (uintptr_t)&a10_gpio_conf}, #endif #ifdef SOC_ALLWINNER_A13 {"allwinner,sun5i-a13-pinctrl", (uintptr_t)&a13_gpio_conf}, #endif #ifdef SOC_ALLWINNER_A20 {"allwinner,sun7i-a20-pinctrl", (uintptr_t)&a20_gpio_conf}, #endif #ifdef SOC_ALLWINNER_A31 {"allwinner,sun6i-a31-pinctrl", (uintptr_t)&a31_gpio_conf}, #endif #ifdef SOC_ALLWINNER_A31S {"allwinner,sun6i-a31s-pinctrl", (uintptr_t)&a31s_gpio_conf}, #endif #if defined(SOC_ALLWINNER_A31) || defined(SOC_ALLWINNER_A31S) {"allwinner,sun6i-a31-r-pinctrl", (uintptr_t)&a31_r_gpio_conf}, #endif #ifdef SOC_ALLWINNER_A33 {"allwinner,sun6i-a33-pinctrl", (uintptr_t)&a33_gpio_conf}, #endif #ifdef SOC_ALLWINNER_A83T {"allwinner,sun8i-a83t-pinctrl", (uintptr_t)&a83t_gpio_conf}, {"allwinner,sun8i-a83t-r-pinctrl", (uintptr_t)&a83t_r_gpio_conf}, #endif #if defined(SOC_ALLWINNER_H3) || defined(SOC_ALLWINNER_H5) {"allwinner,sun8i-h3-pinctrl", (uintptr_t)&h3_gpio_conf}, {"allwinner,sun50i-h5-pinctrl", (uintptr_t)&h3_gpio_conf}, {"allwinner,sun8i-h3-r-pinctrl", (uintptr_t)&h3_r_gpio_conf}, #endif #ifdef SOC_ALLWINNER_A64 {"allwinner,sun50i-a64-pinctrl", (uintptr_t)&a64_gpio_conf}, {"allwinner,sun50i-a64-r-pinctrl", (uintptr_t)&a64_r_gpio_conf}, #endif #ifdef SOC_ALLWINNER_H6 {"allwinner,sun50i-h6-pinctrl", (uintptr_t)&h6_gpio_conf}, {"allwinner,sun50i-h6-r-pinctrl", (uintptr_t)&h6_r_gpio_conf}, #endif {NULL, 0} }; struct clk_list { TAILQ_ENTRY(clk_list) next; clk_t clk; }; -#ifdef INTRNG struct gpio_irqsrc { struct intr_irqsrc isrc; u_int irq; uint32_t mode; uint32_t pin; uint32_t bank; uint32_t intnum; uint32_t intfunc; uint32_t oldfunc; bool enabled; }; -#endif #define AW_GPIO_MEMRES 0 #define AW_GPIO_IRQRES 1 #define AW_GPIO_RESSZ 2 struct aw_gpio_softc { device_t sc_dev; device_t sc_busdev; struct resource * sc_res[AW_GPIO_RESSZ]; struct mtx sc_mtx; struct resource * sc_mem_res; struct resource * sc_irq_res; void * sc_intrhand; struct aw_gpio_conf *conf; TAILQ_HEAD(, clk_list) clk_list; -#ifdef INTRNG struct gpio_irqsrc *gpio_pic_irqsrc; int nirqs; -#endif }; static struct resource_spec aw_gpio_res_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; #define AW_GPIO_LOCK(_sc) mtx_lock_spin(&(_sc)->sc_mtx) #define AW_GPIO_UNLOCK(_sc) mtx_unlock_spin(&(_sc)->sc_mtx) #define AW_GPIO_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED) #define AW_GPIO_GP_CFG(_bank, _idx) 0x00 + ((_bank) * 0x24) + ((_idx) << 2) #define AW_GPIO_GP_DAT(_bank) 0x10 + ((_bank) * 0x24) #define AW_GPIO_GP_DRV(_bank, _idx) 0x14 + ((_bank) * 0x24) + ((_idx) << 2) #define AW_GPIO_GP_PUL(_bank, _idx) 0x1c + ((_bank) * 0x24) + ((_idx) << 2) #define AW_GPIO_GP_INT_BASE(_bank) (0x200 + 0x20 * _bank) #define AW_GPIO_GP_INT_CFG(_bank, _pin) (AW_GPIO_GP_INT_BASE(_bank) + (0x4 * ((_pin) / 8))) #define AW_GPIO_GP_INT_CTL(_bank) (AW_GPIO_GP_INT_BASE(_bank) + 0x10) #define AW_GPIO_GP_INT_STA(_bank) (AW_GPIO_GP_INT_BASE(_bank) + 0x14) #define AW_GPIO_GP_INT_DEB(_bank) (AW_GPIO_GP_INT_BASE(_bank) + 0x18) #define AW_GPIO_INT_EDGE_POSITIVE 0x0 #define AW_GPIO_INT_EDGE_NEGATIVE 0x1 #define AW_GPIO_INT_LEVEL_HIGH 0x2 #define AW_GPIO_INT_LEVEL_LOW 0x3 #define AW_GPIO_INT_EDGE_BOTH 0x4 static char *aw_gpio_parse_function(phandle_t node); static const char **aw_gpio_parse_pins(phandle_t node, int *pins_nb); static uint32_t aw_gpio_parse_bias(phandle_t node); static int aw_gpio_parse_drive_strength(phandle_t node, uint32_t *drive); static int aw_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *value); static int aw_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value); static int aw_gpio_pin_get_locked(struct aw_gpio_softc *sc, uint32_t pin, unsigned int *value); static int aw_gpio_pin_set_locked(struct aw_gpio_softc *sc, uint32_t pin, unsigned int value); static void aw_gpio_intr(void *arg); static void aw_gpio_pic_disable_intr(device_t dev, struct intr_irqsrc *isrc); static void aw_gpio_pic_disable_intr_locked(struct aw_gpio_softc *sc, struct intr_irqsrc *isrc); static void aw_gpio_pic_post_filter(device_t dev, struct intr_irqsrc *isrc); static int aw_gpio_register_isrcs(struct aw_gpio_softc *sc); #define AW_GPIO_WRITE(_sc, _off, _val) \ bus_write_4((_sc)->sc_res[AW_GPIO_MEMRES], _off, _val) #define AW_GPIO_READ(_sc, _off) \ bus_read_4((_sc)->sc_res[AW_GPIO_MEMRES], _off) static uint32_t aw_gpio_get_function(struct aw_gpio_softc *sc, uint32_t pin) { uint32_t bank, func, offset; /* Must be called with lock held. */ AW_GPIO_LOCK_ASSERT(sc); if (pin > sc->conf->padconf->npins) return (0); bank = sc->conf->padconf->pins[pin].port; pin = sc->conf->padconf->pins[pin].pin; offset = ((pin & 0x07) << 2); func = AW_GPIO_READ(sc, AW_GPIO_GP_CFG(bank, pin >> 3)); return ((func >> offset) & 0x7); } static int aw_gpio_set_function(struct aw_gpio_softc *sc, uint32_t pin, uint32_t f) { uint32_t bank, data, offset; /* Check if the function exists in the padconf data */ if (sc->conf->padconf->pins[pin].functions[f] == NULL) return (EINVAL); /* Must be called with lock held. */ AW_GPIO_LOCK_ASSERT(sc); bank = sc->conf->padconf->pins[pin].port; pin = sc->conf->padconf->pins[pin].pin; offset = ((pin & 0x07) << 2); data = AW_GPIO_READ(sc, AW_GPIO_GP_CFG(bank, pin >> 3)); data &= ~(7 << offset); data |= (f << offset); AW_GPIO_WRITE(sc, AW_GPIO_GP_CFG(bank, pin >> 3), data); return (0); } static uint32_t aw_gpio_get_pud(struct aw_gpio_softc *sc, uint32_t pin) { uint32_t bank, offset, val; /* Must be called with lock held. */ AW_GPIO_LOCK_ASSERT(sc); bank = sc->conf->padconf->pins[pin].port; pin = sc->conf->padconf->pins[pin].pin; offset = ((pin & 0x0f) << 1); val = AW_GPIO_READ(sc, AW_GPIO_GP_PUL(bank, pin >> 4)); return ((val >> offset) & AW_GPIO_PUD_MASK); } static void aw_gpio_set_pud(struct aw_gpio_softc *sc, uint32_t pin, uint32_t state) { uint32_t bank, offset, val; if (aw_gpio_get_pud(sc, pin) == state) return; /* Must be called with lock held. */ AW_GPIO_LOCK_ASSERT(sc); bank = sc->conf->padconf->pins[pin].port; pin = sc->conf->padconf->pins[pin].pin; offset = ((pin & 0x0f) << 1); val = AW_GPIO_READ(sc, AW_GPIO_GP_PUL(bank, pin >> 4)); val &= ~(AW_GPIO_PUD_MASK << offset); val |= (state << offset); AW_GPIO_WRITE(sc, AW_GPIO_GP_PUL(bank, pin >> 4), val); } static uint32_t aw_gpio_get_drv(struct aw_gpio_softc *sc, uint32_t pin) { uint32_t bank, offset, val; /* Must be called with lock held. */ AW_GPIO_LOCK_ASSERT(sc); bank = sc->conf->padconf->pins[pin].port; pin = sc->conf->padconf->pins[pin].pin; offset = ((pin & 0x0f) << 1); val = AW_GPIO_READ(sc, AW_GPIO_GP_DRV(bank, pin >> 4)); return ((val >> offset) & AW_GPIO_DRV_MASK); } static void aw_gpio_set_drv(struct aw_gpio_softc *sc, uint32_t pin, uint32_t drive) { uint32_t bank, offset, val; if (aw_gpio_get_drv(sc, pin) == drive) return; /* Must be called with lock held. */ AW_GPIO_LOCK_ASSERT(sc); bank = sc->conf->padconf->pins[pin].port; pin = sc->conf->padconf->pins[pin].pin; offset = ((pin & 0x0f) << 1); val = AW_GPIO_READ(sc, AW_GPIO_GP_DRV(bank, pin >> 4)); val &= ~(AW_GPIO_DRV_MASK << offset); val |= (drive << offset); AW_GPIO_WRITE(sc, AW_GPIO_GP_DRV(bank, pin >> 4), val); } static int aw_gpio_pin_configure(struct aw_gpio_softc *sc, uint32_t pin, uint32_t flags) { u_int val; int err = 0; /* Must be called with lock held. */ AW_GPIO_LOCK_ASSERT(sc); if (pin > sc->conf->padconf->npins) return (EINVAL); /* Manage input/output. */ if (flags & GPIO_PIN_INPUT) { err = aw_gpio_set_function(sc, pin, AW_GPIO_INPUT); } else if ((flags & GPIO_PIN_OUTPUT) && aw_gpio_get_function(sc, pin) != AW_GPIO_OUTPUT) { if (flags & GPIO_PIN_PRESET_LOW) { aw_gpio_pin_set_locked(sc, pin, 0); } else if (flags & GPIO_PIN_PRESET_HIGH) { aw_gpio_pin_set_locked(sc, pin, 1); } else { /* Read the pin and preset output to current state. */ err = aw_gpio_set_function(sc, pin, AW_GPIO_INPUT); if (err == 0) { aw_gpio_pin_get_locked(sc, pin, &val); aw_gpio_pin_set_locked(sc, pin, val); } } if (err == 0) err = aw_gpio_set_function(sc, pin, AW_GPIO_OUTPUT); } if (err) return (err); /* Manage Pull-up/pull-down. */ if (flags & GPIO_PIN_PULLUP) aw_gpio_set_pud(sc, pin, AW_GPIO_PULLUP); else if (flags & GPIO_PIN_PULLDOWN) aw_gpio_set_pud(sc, pin, AW_GPIO_PULLDOWN); else aw_gpio_set_pud(sc, pin, AW_GPIO_NONE); return (0); } static device_t aw_gpio_get_bus(device_t dev) { struct aw_gpio_softc *sc; sc = device_get_softc(dev); return (sc->sc_busdev); } static int aw_gpio_pin_max(device_t dev, int *maxpin) { struct aw_gpio_softc *sc; sc = device_get_softc(dev); *maxpin = sc->conf->padconf->npins - 1; return (0); } static int aw_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps) { struct aw_gpio_softc *sc; sc = device_get_softc(dev); if (pin >= sc->conf->padconf->npins) return (EINVAL); *caps = AW_GPIO_DEFAULT_CAPS; if (sc->conf->padconf->pins[pin].eint_func != 0) *caps |= AW_GPIO_INTR_CAPS; return (0); } static int aw_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags) { struct aw_gpio_softc *sc; uint32_t func; uint32_t pud; sc = device_get_softc(dev); if (pin >= sc->conf->padconf->npins) return (EINVAL); AW_GPIO_LOCK(sc); func = aw_gpio_get_function(sc, pin); switch (func) { case AW_GPIO_INPUT: *flags = GPIO_PIN_INPUT; break; case AW_GPIO_OUTPUT: *flags = GPIO_PIN_OUTPUT; break; default: *flags = 0; break; } pud = aw_gpio_get_pud(sc, pin); switch (pud) { case AW_GPIO_PULLDOWN: *flags |= GPIO_PIN_PULLDOWN; break; case AW_GPIO_PULLUP: *flags |= GPIO_PIN_PULLUP; break; default: break; } AW_GPIO_UNLOCK(sc); return (0); } static int aw_gpio_pin_getname(device_t dev, uint32_t pin, char *name) { struct aw_gpio_softc *sc; sc = device_get_softc(dev); if (pin >= sc->conf->padconf->npins) return (EINVAL); snprintf(name, GPIOMAXNAME - 1, "%s", sc->conf->padconf->pins[pin].name); name[GPIOMAXNAME - 1] = '\0'; return (0); } static int aw_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags) { struct aw_gpio_softc *sc; int err; sc = device_get_softc(dev); if (pin > sc->conf->padconf->npins) return (EINVAL); AW_GPIO_LOCK(sc); err = aw_gpio_pin_configure(sc, pin, flags); AW_GPIO_UNLOCK(sc); return (err); } static int aw_gpio_pin_set_locked(struct aw_gpio_softc *sc, uint32_t pin, unsigned int value) { uint32_t bank, data; AW_GPIO_LOCK_ASSERT(sc); if (pin > sc->conf->padconf->npins) return (EINVAL); bank = sc->conf->padconf->pins[pin].port; pin = sc->conf->padconf->pins[pin].pin; data = AW_GPIO_READ(sc, AW_GPIO_GP_DAT(bank)); if (value) data |= (1 << pin); else data &= ~(1 << pin); AW_GPIO_WRITE(sc, AW_GPIO_GP_DAT(bank), data); return (0); } static int aw_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value) { struct aw_gpio_softc *sc; int ret; sc = device_get_softc(dev); AW_GPIO_LOCK(sc); ret = aw_gpio_pin_set_locked(sc, pin, value); AW_GPIO_UNLOCK(sc); return (ret); } static int aw_gpio_pin_get_locked(struct aw_gpio_softc *sc,uint32_t pin, unsigned int *val) { uint32_t bank, reg_data; AW_GPIO_LOCK_ASSERT(sc); if (pin > sc->conf->padconf->npins) return (EINVAL); bank = sc->conf->padconf->pins[pin].port; pin = sc->conf->padconf->pins[pin].pin; reg_data = AW_GPIO_READ(sc, AW_GPIO_GP_DAT(bank)); *val = (reg_data & (1 << pin)) ? 1 : 0; return (0); } static char * aw_gpio_parse_function(phandle_t node) { char *function; if (OF_getprop_alloc(node, "function", (void **)&function) != -1) return (function); if (OF_getprop_alloc(node, "allwinner,function", (void **)&function) != -1) return (function); return (NULL); } static const char ** aw_gpio_parse_pins(phandle_t node, int *pins_nb) { const char **pinlist; *pins_nb = ofw_bus_string_list_to_array(node, "pins", &pinlist); if (*pins_nb > 0) return (pinlist); *pins_nb = ofw_bus_string_list_to_array(node, "allwinner,pins", &pinlist); if (*pins_nb > 0) return (pinlist); return (NULL); } static uint32_t aw_gpio_parse_bias(phandle_t node) { uint32_t bias; if (OF_getencprop(node, "pull", &bias, sizeof(bias)) != -1) return (bias); if (OF_getencprop(node, "allwinner,pull", &bias, sizeof(bias)) != -1) return (bias); if (OF_hasprop(node, "bias-disable")) return (AW_GPIO_NONE); if (OF_hasprop(node, "bias-pull-up")) return (AW_GPIO_PULLUP); if (OF_hasprop(node, "bias-pull-down")) return (AW_GPIO_PULLDOWN); return (AW_GPIO_NONE); } static int aw_gpio_parse_drive_strength(phandle_t node, uint32_t *drive) { uint32_t drive_str; if (OF_getencprop(node, "drive", drive, sizeof(*drive)) != -1) return (0); if (OF_getencprop(node, "allwinner,drive", drive, sizeof(*drive)) != -1) return (0); if (OF_getencprop(node, "drive-strength", &drive_str, sizeof(drive_str)) != -1) { *drive = (drive_str / 10) - 1; return (0); } return (1); } static int aw_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val) { struct aw_gpio_softc *sc; int ret; sc = device_get_softc(dev); AW_GPIO_LOCK(sc); ret = aw_gpio_pin_get_locked(sc, pin, val); AW_GPIO_UNLOCK(sc); return (ret); } static int aw_gpio_pin_toggle(device_t dev, uint32_t pin) { struct aw_gpio_softc *sc; uint32_t bank, data; sc = device_get_softc(dev); if (pin > sc->conf->padconf->npins) return (EINVAL); bank = sc->conf->padconf->pins[pin].port; pin = sc->conf->padconf->pins[pin].pin; AW_GPIO_LOCK(sc); data = AW_GPIO_READ(sc, AW_GPIO_GP_DAT(bank)); if (data & (1 << pin)) data &= ~(1 << pin); else data |= (1 << pin); AW_GPIO_WRITE(sc, AW_GPIO_GP_DAT(bank), data); AW_GPIO_UNLOCK(sc); return (0); } static int aw_gpio_pin_access_32(device_t dev, uint32_t first_pin, uint32_t clear_pins, uint32_t change_pins, uint32_t *orig_pins) { struct aw_gpio_softc *sc; uint32_t bank, data, pin; sc = device_get_softc(dev); if (first_pin > sc->conf->padconf->npins) return (EINVAL); /* * We require that first_pin refers to the first pin in a bank, because * this API is not about convenience, it's for making a set of pins * change simultaneously (required) with reasonably high performance * (desired); we need to do a read-modify-write on a single register. */ bank = sc->conf->padconf->pins[first_pin].port; pin = sc->conf->padconf->pins[first_pin].pin; if (pin != 0) return (EINVAL); AW_GPIO_LOCK(sc); data = AW_GPIO_READ(sc, AW_GPIO_GP_DAT(bank)); if ((clear_pins | change_pins) != 0) AW_GPIO_WRITE(sc, AW_GPIO_GP_DAT(bank), (data & ~clear_pins) ^ change_pins); AW_GPIO_UNLOCK(sc); if (orig_pins != NULL) *orig_pins = data; return (0); } static int aw_gpio_pin_config_32(device_t dev, uint32_t first_pin, uint32_t num_pins, uint32_t *pin_flags) { struct aw_gpio_softc *sc; uint32_t bank, pin; int err; sc = device_get_softc(dev); if (first_pin > sc->conf->padconf->npins) return (EINVAL); bank = sc->conf->padconf->pins[first_pin].port; if (sc->conf->padconf->pins[first_pin].pin != 0) return (EINVAL); /* * The configuration for a bank of pins is scattered among several * registers; we cannot g'tee to simultaneously change the state of all * the pins in the flags array. So just loop through the array * configuring each pin for now. If there was a strong need, it might * be possible to support some limited simultaneous config, such as * adjacent groups of 8 pins that line up the same as the config regs. */ for (err = 0, pin = first_pin; err == 0 && pin < num_pins; ++pin) { if (pin_flags[pin] & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) err = aw_gpio_pin_configure(sc, pin, pin_flags[pin]); } return (err); } static int aw_gpio_map_gpios(device_t bus, phandle_t dev, phandle_t gparent, int gcells, pcell_t *gpios, uint32_t *pin, uint32_t *flags) { struct aw_gpio_softc *sc; int i; sc = device_get_softc(bus); /* The GPIO pins are mapped as: . */ for (i = 0; i < sc->conf->padconf->npins; i++) if (sc->conf->padconf->pins[i].port == gpios[0] && sc->conf->padconf->pins[i].pin == gpios[1]) { *pin = i; break; } *flags = gpios[gcells - 1]; return (0); } static int aw_find_pinnum_by_name(struct aw_gpio_softc *sc, const char *pinname) { int i; for (i = 0; i < sc->conf->padconf->npins; i++) if (!strcmp(pinname, sc->conf->padconf->pins[i].name)) return i; return (-1); } static int aw_find_pin_func(struct aw_gpio_softc *sc, int pin, const char *func) { int i; for (i = 0; i < AW_MAX_FUNC_BY_PIN; i++) if (sc->conf->padconf->pins[pin].functions[i] && !strcmp(func, sc->conf->padconf->pins[pin].functions[i])) return (i); return (-1); } static int aw_fdt_configure_pins(device_t dev, phandle_t cfgxref) { struct aw_gpio_softc *sc; phandle_t node; const char **pinlist = NULL; char *pin_function = NULL; uint32_t pin_drive, pin_pull; int pins_nb, pin_num, pin_func, i, ret; bool set_drive; sc = device_get_softc(dev); node = OF_node_from_xref(cfgxref); ret = 0; set_drive = false; /* Getting all prop for configuring pins */ pinlist = aw_gpio_parse_pins(node, &pins_nb); if (pinlist == NULL) return (ENOENT); pin_function = aw_gpio_parse_function(node); if (pin_function == NULL) { ret = ENOENT; goto out; } if (aw_gpio_parse_drive_strength(node, &pin_drive) == 0) set_drive = true; pin_pull = aw_gpio_parse_bias(node); /* Configure each pin to the correct function, drive and pull */ for (i = 0; i < pins_nb; i++) { pin_num = aw_find_pinnum_by_name(sc, pinlist[i]); if (pin_num == -1) { ret = ENOENT; goto out; } pin_func = aw_find_pin_func(sc, pin_num, pin_function); if (pin_func == -1) { ret = ENOENT; goto out; } AW_GPIO_LOCK(sc); if (aw_gpio_get_function(sc, pin_num) != pin_func) aw_gpio_set_function(sc, pin_num, pin_func); if (set_drive) aw_gpio_set_drv(sc, pin_num, pin_drive); if (pin_pull != AW_GPIO_NONE) aw_gpio_set_pud(sc, pin_num, pin_pull); AW_GPIO_UNLOCK(sc); } out: OF_prop_free(pinlist); OF_prop_free(pin_function); return (ret); } static void aw_gpio_enable_bank_supply(void *arg) { struct aw_gpio_softc *sc = arg; regulator_t vcc_supply; char bank_reg_name[16]; int i, nbanks; nbanks = strlen(sc->conf->banks); for (i = 0; i < nbanks; i++) { snprintf(bank_reg_name, sizeof(bank_reg_name), "vcc-p%c-supply", sc->conf->banks[i]); if (regulator_get_by_ofw_property(sc->sc_dev, 0, bank_reg_name, &vcc_supply) == 0) { if (bootverbose) device_printf(sc->sc_dev, "Enabling regulator for gpio bank %c\n", sc->conf->banks[i]); if (regulator_enable(vcc_supply) != 0) { device_printf(sc->sc_dev, "Cannot enable regulator for bank %c\n", sc->conf->banks[i]); } } } } static int aw_gpio_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner GPIO/Pinmux controller"); return (BUS_PROBE_DEFAULT); } static int aw_gpio_attach(device_t dev) { int error; phandle_t gpio; struct aw_gpio_softc *sc; struct clk_list *clkp, *clkp_tmp; clk_t clk; hwreset_t rst = NULL; int off, err, clkret; sc = device_get_softc(dev); sc->sc_dev = dev; mtx_init(&sc->sc_mtx, "aw gpio", "gpio", MTX_SPIN); if (bus_alloc_resources(dev, aw_gpio_res_spec, sc->sc_res) != 0) { device_printf(dev, "cannot allocate device resources\n"); return (ENXIO); } if (bus_setup_intr(dev, sc->sc_res[AW_GPIO_IRQRES], INTR_TYPE_CLK | INTR_MPSAFE, NULL, aw_gpio_intr, sc, &sc->sc_intrhand)) { device_printf(dev, "cannot setup interrupt handler\n"); goto fail; } /* Find our node. */ gpio = ofw_bus_get_node(sc->sc_dev); if (!OF_hasprop(gpio, "gpio-controller")) /* Node is not a GPIO controller. */ goto fail; /* Use the right pin data for the current SoC */ sc->conf = (struct aw_gpio_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; if (hwreset_get_by_ofw_idx(dev, 0, 0, &rst) == 0) { error = hwreset_deassert(rst); if (error != 0) { device_printf(dev, "cannot de-assert reset\n"); goto fail; } } TAILQ_INIT(&sc->clk_list); for (off = 0, clkret = 0; clkret == 0; off++) { clkret = clk_get_by_ofw_index(dev, 0, off, &clk); if (clkret != 0) break; err = clk_enable(clk); if (err != 0) { device_printf(dev, "Could not enable clock %s\n", clk_get_name(clk)); goto fail; } clkp = malloc(sizeof(*clkp), M_DEVBUF, M_WAITOK | M_ZERO); clkp->clk = clk; TAILQ_INSERT_TAIL(&sc->clk_list, clkp, next); } if (clkret != 0 && clkret != ENOENT) { device_printf(dev, "Could not find clock at offset %d (%d)\n", off, clkret); goto fail; } -#ifdef INTRNG aw_gpio_register_isrcs(sc); intr_pic_register(dev, OF_xref_from_node(ofw_bus_get_node(dev))); -#endif sc->sc_busdev = gpiobus_attach_bus(dev); if (sc->sc_busdev == NULL) goto fail; /* * Register as a pinctrl device */ fdt_pinctrl_register(dev, "pins"); fdt_pinctrl_configure_tree(dev); fdt_pinctrl_register(dev, "allwinner,pins"); fdt_pinctrl_configure_tree(dev); config_intrhook_oneshot(aw_gpio_enable_bank_supply, sc); return (0); fail: if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); mtx_destroy(&sc->sc_mtx); /* Disable clock */ TAILQ_FOREACH_SAFE(clkp, &sc->clk_list, next, clkp_tmp) { err = clk_disable(clkp->clk); if (err != 0) device_printf(dev, "Could not disable clock %s\n", clk_get_name(clkp->clk)); err = clk_release(clkp->clk); if (err != 0) device_printf(dev, "Could not release clock %s\n", clk_get_name(clkp->clk)); TAILQ_REMOVE(&sc->clk_list, clkp, next); free(clkp, M_DEVBUF); } /* Assert resets */ if (rst) { hwreset_assert(rst); hwreset_release(rst); } return (ENXIO); } static int aw_gpio_detach(device_t dev) { return (EBUSY); } static void aw_gpio_intr(void *arg) { struct aw_gpio_softc *sc; struct intr_irqsrc *isrc; uint32_t reg; int irq; sc = (struct aw_gpio_softc *)arg; AW_GPIO_LOCK(sc); for (irq = 0; irq < sc->nirqs; irq++) { if (!sc->gpio_pic_irqsrc[irq].enabled) continue; reg = AW_GPIO_READ(sc, AW_GPIO_GP_INT_STA(sc->gpio_pic_irqsrc[irq].bank)); if (!(reg & (1 << sc->gpio_pic_irqsrc[irq].intnum))) continue; isrc = &sc->gpio_pic_irqsrc[irq].isrc; if (intr_isrc_dispatch(isrc, curthread->td_intr_frame) != 0) { aw_gpio_pic_disable_intr_locked(sc, isrc); aw_gpio_pic_post_filter(sc->sc_dev, isrc); device_printf(sc->sc_dev, "Stray irq %u disabled\n", irq); } } AW_GPIO_UNLOCK(sc); } /* * Interrupts support */ static int aw_gpio_register_isrcs(struct aw_gpio_softc *sc) { const char *name; int nirqs; int pin; int err; name = device_get_nameunit(sc->sc_dev); for (nirqs = 0, pin = 0; pin < sc->conf->padconf->npins; pin++) { if (sc->conf->padconf->pins[pin].eint_func == 0) continue; nirqs++; } sc->gpio_pic_irqsrc = malloc(sizeof(*sc->gpio_pic_irqsrc) * nirqs, M_DEVBUF, M_WAITOK | M_ZERO); for (nirqs = 0, pin = 0; pin < sc->conf->padconf->npins; pin++) { if (sc->conf->padconf->pins[pin].eint_func == 0) continue; sc->gpio_pic_irqsrc[nirqs].pin = pin; sc->gpio_pic_irqsrc[nirqs].bank = sc->conf->padconf->pins[pin].eint_bank; sc->gpio_pic_irqsrc[nirqs].intnum = sc->conf->padconf->pins[pin].eint_num; sc->gpio_pic_irqsrc[nirqs].intfunc = sc->conf->padconf->pins[pin].eint_func; sc->gpio_pic_irqsrc[nirqs].irq = nirqs; sc->gpio_pic_irqsrc[nirqs].mode = GPIO_INTR_CONFORM; err = intr_isrc_register(&sc->gpio_pic_irqsrc[nirqs].isrc, sc->sc_dev, 0, "%s,%s", name, sc->conf->padconf->pins[pin].functions[sc->conf->padconf->pins[pin].eint_func]); if (err) { device_printf(sc->sc_dev, "intr_isrs_register failed for irq %d\n", nirqs); } nirqs++; } sc->nirqs = nirqs; return (0); } static void aw_gpio_pic_disable_intr_locked(struct aw_gpio_softc *sc, struct intr_irqsrc *isrc) { u_int irq; uint32_t reg; AW_GPIO_LOCK_ASSERT(sc); irq = ((struct gpio_irqsrc *)isrc)->irq; reg = AW_GPIO_READ(sc, AW_GPIO_GP_INT_CTL(sc->gpio_pic_irqsrc[irq].bank)); reg &= ~(1 << sc->gpio_pic_irqsrc[irq].intnum); AW_GPIO_WRITE(sc, AW_GPIO_GP_INT_CTL(sc->gpio_pic_irqsrc[irq].bank), reg); sc->gpio_pic_irqsrc[irq].enabled = false; } static void aw_gpio_pic_disable_intr(device_t dev, struct intr_irqsrc *isrc) { struct aw_gpio_softc *sc; sc = device_get_softc(dev); AW_GPIO_LOCK(sc); aw_gpio_pic_disable_intr_locked(sc, isrc); AW_GPIO_UNLOCK(sc); } static void aw_gpio_pic_enable_intr(device_t dev, struct intr_irqsrc *isrc) { struct aw_gpio_softc *sc; u_int irq; uint32_t reg; sc = device_get_softc(dev); irq = ((struct gpio_irqsrc *)isrc)->irq; AW_GPIO_LOCK(sc); reg = AW_GPIO_READ(sc, AW_GPIO_GP_INT_CTL(sc->gpio_pic_irqsrc[irq].bank)); reg |= 1 << sc->gpio_pic_irqsrc[irq].intnum; AW_GPIO_WRITE(sc, AW_GPIO_GP_INT_CTL(sc->gpio_pic_irqsrc[irq].bank), reg); AW_GPIO_UNLOCK(sc); sc->gpio_pic_irqsrc[irq].enabled = true; } static int aw_gpio_pic_map_gpio(struct aw_gpio_softc *sc, struct intr_map_data_gpio *dag, u_int *irqp, u_int *mode) { u_int irq; int pin; irq = dag->gpio_pin_num; for (pin = 0; pin < sc->nirqs; pin++) if (sc->gpio_pic_irqsrc[pin].pin == irq) break; if (pin == sc->nirqs) { device_printf(sc->sc_dev, "Invalid interrupt number %u\n", irq); return (EINVAL); } switch (dag->gpio_intr_mode) { case GPIO_INTR_LEVEL_LOW: case GPIO_INTR_LEVEL_HIGH: case GPIO_INTR_EDGE_RISING: case GPIO_INTR_EDGE_FALLING: case GPIO_INTR_EDGE_BOTH: break; default: device_printf(sc->sc_dev, "Unsupported interrupt mode 0x%8x\n", dag->gpio_intr_mode); return (EINVAL); } *irqp = pin; if (mode != NULL) *mode = dag->gpio_intr_mode; return (0); } static int aw_gpio_pic_map_intr(device_t dev, struct intr_map_data *data, struct intr_irqsrc **isrcp) { struct aw_gpio_softc *sc; u_int irq; int err; sc = device_get_softc(dev); switch (data->type) { case INTR_MAP_DATA_GPIO: err = aw_gpio_pic_map_gpio(sc, (struct intr_map_data_gpio *)data, &irq, NULL); break; default: return (ENOTSUP); }; if (err == 0) *isrcp = &sc->gpio_pic_irqsrc[irq].isrc; return (0); } static int aw_gpio_pic_setup_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct aw_gpio_softc *sc; struct gpio_irqsrc *gi; uint32_t irqcfg; uint32_t pinidx, reg; u_int irq, mode; int err; sc = device_get_softc(dev); gi = (struct gpio_irqsrc *)isrc; switch (data->type) { case INTR_MAP_DATA_GPIO: err = aw_gpio_pic_map_gpio(sc, (struct intr_map_data_gpio *)data, &irq, &mode); break; default: return (ENOTSUP); }; pinidx = (sc->gpio_pic_irqsrc[irq].intnum % 8) * 4; AW_GPIO_LOCK(sc); switch (mode) { case GPIO_INTR_LEVEL_LOW: irqcfg = AW_GPIO_INT_LEVEL_LOW << pinidx; break; case GPIO_INTR_LEVEL_HIGH: irqcfg = AW_GPIO_INT_LEVEL_HIGH << pinidx; break; case GPIO_INTR_EDGE_RISING: irqcfg = AW_GPIO_INT_EDGE_POSITIVE << pinidx; break; case GPIO_INTR_EDGE_FALLING: irqcfg = AW_GPIO_INT_EDGE_NEGATIVE << pinidx; break; case GPIO_INTR_EDGE_BOTH: irqcfg = AW_GPIO_INT_EDGE_BOTH << pinidx; break; } /* Switch the pin to interrupt mode */ sc->gpio_pic_irqsrc[irq].oldfunc = aw_gpio_get_function(sc, sc->gpio_pic_irqsrc[irq].pin); aw_gpio_set_function(sc, sc->gpio_pic_irqsrc[irq].pin, sc->gpio_pic_irqsrc[irq].intfunc); /* Write interrupt mode */ reg = AW_GPIO_READ(sc, AW_GPIO_GP_INT_CFG(sc->gpio_pic_irqsrc[irq].bank, sc->gpio_pic_irqsrc[irq].intnum)); reg &= ~(0xF << pinidx); reg |= irqcfg; AW_GPIO_WRITE(sc, AW_GPIO_GP_INT_CFG(sc->gpio_pic_irqsrc[irq].bank, sc->gpio_pic_irqsrc[irq].intnum), reg); AW_GPIO_UNLOCK(sc); return (0); } static int aw_gpio_pic_teardown_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct aw_gpio_softc *sc; struct gpio_irqsrc *gi; sc = device_get_softc(dev); gi = (struct gpio_irqsrc *)isrc; /* Switch back the pin to it's original function */ AW_GPIO_LOCK(sc); aw_gpio_set_function(sc, gi->pin, gi->oldfunc); AW_GPIO_UNLOCK(sc); return (0); } static void aw_gpio_pic_post_filter(device_t dev, struct intr_irqsrc *isrc) { struct aw_gpio_softc *sc; struct gpio_irqsrc *gi; sc = device_get_softc(dev); gi = (struct gpio_irqsrc *)isrc; arm_irq_memory_barrier(0); AW_GPIO_WRITE(sc, AW_GPIO_GP_INT_STA(gi->bank), 1 << gi->intnum); } static void aw_gpio_pic_post_ithread(device_t dev, struct intr_irqsrc *isrc) { struct aw_gpio_softc *sc; struct gpio_irqsrc *gi; sc = device_get_softc(dev); gi = (struct gpio_irqsrc *)isrc; arm_irq_memory_barrier(0); AW_GPIO_WRITE(sc, AW_GPIO_GP_INT_STA(gi->bank), 1 << gi->intnum); aw_gpio_pic_enable_intr(dev, isrc); } static void aw_gpio_pic_pre_ithread(device_t dev, struct intr_irqsrc *isrc) { struct aw_gpio_softc *sc; sc = device_get_softc(dev); aw_gpio_pic_disable_intr_locked(sc, isrc); } /* * OFWBUS Interface */ static phandle_t aw_gpio_get_node(device_t dev, device_t bus) { /* We only have one child, the GPIO bus, which needs our own node. */ return (ofw_bus_get_node(dev)); } static device_method_t aw_gpio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aw_gpio_probe), DEVMETHOD(device_attach, aw_gpio_attach), DEVMETHOD(device_detach, aw_gpio_detach), -#ifdef INTRNG /* Interrupt controller interface */ DEVMETHOD(pic_disable_intr, aw_gpio_pic_disable_intr), DEVMETHOD(pic_enable_intr, aw_gpio_pic_enable_intr), DEVMETHOD(pic_map_intr, aw_gpio_pic_map_intr), DEVMETHOD(pic_setup_intr, aw_gpio_pic_setup_intr), DEVMETHOD(pic_teardown_intr, aw_gpio_pic_teardown_intr), DEVMETHOD(pic_post_filter, aw_gpio_pic_post_filter), DEVMETHOD(pic_post_ithread, aw_gpio_pic_post_ithread), DEVMETHOD(pic_pre_ithread, aw_gpio_pic_pre_ithread), -#endif /* GPIO protocol */ DEVMETHOD(gpio_get_bus, aw_gpio_get_bus), DEVMETHOD(gpio_pin_max, aw_gpio_pin_max), DEVMETHOD(gpio_pin_getname, aw_gpio_pin_getname), DEVMETHOD(gpio_pin_getflags, aw_gpio_pin_getflags), DEVMETHOD(gpio_pin_getcaps, aw_gpio_pin_getcaps), DEVMETHOD(gpio_pin_setflags, aw_gpio_pin_setflags), DEVMETHOD(gpio_pin_get, aw_gpio_pin_get), DEVMETHOD(gpio_pin_set, aw_gpio_pin_set), DEVMETHOD(gpio_pin_toggle, aw_gpio_pin_toggle), DEVMETHOD(gpio_pin_access_32, aw_gpio_pin_access_32), DEVMETHOD(gpio_pin_config_32, aw_gpio_pin_config_32), DEVMETHOD(gpio_map_gpios, aw_gpio_map_gpios), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, aw_gpio_get_node), /* fdt_pinctrl interface */ DEVMETHOD(fdt_pinctrl_configure,aw_fdt_configure_pins), DEVMETHOD_END }; static devclass_t aw_gpio_devclass; static driver_t aw_gpio_driver = { "gpio", aw_gpio_methods, sizeof(struct aw_gpio_softc), }; EARLY_DRIVER_MODULE(aw_gpio, simplebus, aw_gpio_driver, aw_gpio_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE); Index: head/sys/arm/allwinner/files.allwinner =================================================================== --- head/sys/arm/allwinner/files.allwinner (revision 368140) +++ head/sys/arm/allwinner/files.allwinner (revision 368141) @@ -1,47 +1,47 @@ # $FreeBSD$ arm/allwinner/a10_ahci.c optional ahci arm/allwinner/a10_codec.c optional sound arm/allwinner/a10_dmac.c optional a10_dmac arm/allwinner/a31_dmac.c optional a31_dmac arm/allwinner/a10_sramc.c optional SOC_ALLWINNER_A10 arm/allwinner/aw_gpio.c optional gpio arm/allwinner/aw_if_dwc.c optional dwc arm/allwinner/aw_machdep.c standard arm/allwinner/aw_mmc.c optional mmc | mmccam arm/allwinner/aw_mp.c optional smp -arm/allwinner/aw_nmi.c optional intrng +arm/allwinner/aw_nmi.c standard arm/allwinner/aw_rsb.c optional rsb | p2wi arm/allwinner/aw_rtc.c optional aw_rtc arm/allwinner/aw_syscon.c optional ext_resources syscon arm/allwinner/aw_ts.c optional aw_thermal arm/allwinner/aw_usbphy.c optional ehci | ohci arm/allwinner/aw_wdog.c optional aw_wdog arm/allwinner/axp209.c optional axp209 arm/allwinner/axp81x.c optional axp81x arm/allwinner/if_awg.c optional awg ext_resources syscon arm/allwinner/if_emac.c optional emac arm/allwinner/sunxi_dma_if.m optional a10_dmac | a31_dmac dev/iicbus/twsi/a10_twsi.c optional twsi dev/usb/controller/generic_ohci.c optional ohci dev/usb/controller/generic_usb_if.m optional ohci dev/usb/controller/generic_ehci.c optional ehci dev/usb/controller/generic_ehci_fdt.c optional ehci dev/usb/controller/musb_otg_allwinner.c optional musb arm/allwinner/aw_sid.c optional aw_sid arm/allwinner/aw_thermal.c optional aw_thermal arm/allwinner/aw_cir.c optional aw_cir evdev arm/allwinner/aw_reset.c standard arm/allwinner/aw_ccu.c standard arm/allwinner/aw_gmacclk.c standard arm/allwinner/clkng/aw_ccung.c standard arm/allwinner/clkng/aw_clk_frac.c standard arm/allwinner/clkng/aw_clk_m.c standard arm/allwinner/clkng/aw_clk_mipi.c standard arm/allwinner/clkng/aw_clk_nkmp.c standard arm/allwinner/clkng/aw_clk_nm.c standard arm/allwinner/clkng/aw_clk_np.c standard arm/allwinner/clkng/aw_clk_nmm.c standard arm/allwinner/clkng/aw_clk_prediv_mux.c standard Index: head/sys/arm/annapurna/alpine/alpine_common.c =================================================================== --- head/sys/arm/annapurna/alpine/alpine_common.c (revision 368140) +++ head/sys/arm/annapurna/alpine/alpine_common.c (nonexistent) @@ -1,72 +0,0 @@ -/*- - * Copyright (c) 2013 Ruslan Bukin - * Copyright (c) 2015 Semihalf. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - */ - -#include -__FBSDID("$FreeBSD$"); - -#include "opt_platform.h" - -#include -#include -#include -#include - -#include -#include - -#include -#include -#include - -#ifndef INTRNG -static int alpine_pic_decode_fdt(uint32_t iparent, uint32_t *intr, - int *interrupt, int *trig, int *pol); - -static int -alpine_pic_decode_fdt(uint32_t iparent, uint32_t *intr, int *interrupt, - int *trig, int *pol) -{ - int rv = 0; - - rv = gic_decode_fdt(iparent, intr, interrupt, trig, pol); - if (rv == 0) { - /* This was recognized as our PIC and decoded. */ - interrupt = FDT_MAP_IRQ(iparent, interrupt); - - /* Configure the interrupt if callback provided */ - if (arm_config_irq) - (*arm_config_irq)(*interrupt, *trig, *pol); - } - return (rv); -} - -fdt_pic_decode_t fdt_pic_table[] = { - &alpine_pic_decode_fdt, - NULL -}; -#endif Property changes on: head/sys/arm/annapurna/alpine/alpine_common.c ___________________________________________________________________ Deleted: svn:eol-style ## -1 +0,0 ## -native \ No newline at end of property Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Deleted: svn:mime-type ## -1 +0,0 ## -text/plain \ No newline at end of property Index: head/sys/arm/annapurna/alpine/files.alpine =================================================================== --- head/sys/arm/annapurna/alpine/files.alpine (revision 368140) +++ head/sys/arm/annapurna/alpine/files.alpine (revision 368141) @@ -1,8 +1,7 @@ # $FreeBSD$ arm/versatile/sp804.c standard dev/uart/uart_dev_ns8250.c optional uart -arm/annapurna/alpine/alpine_common.c standard arm/annapurna/alpine/alpine_machdep.c standard arm/annapurna/alpine/alpine_machdep_mp.c optional smp Index: head/sys/arm/arm/bcopyinout.S =================================================================== --- head/sys/arm/arm/bcopyinout.S (revision 368140) +++ head/sys/arm/arm/bcopyinout.S (revision 368141) @@ -1,641 +1,632 @@ /* $NetBSD: bcopyinout.S,v 1.11 2003/10/13 21:22:40 scw Exp $ */ /*- * Copyright (c) 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Allen Briggs for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "assym.inc" #include #include .L_arm_memcpy: .word _C_LABEL(_arm_memcpy) .L_min_memcpy_size: .word _C_LABEL(_min_memcpy_size) __FBSDID("$FreeBSD$"); #ifdef _ARM_ARCH_5E #include #else .text .align 2 -#if __ARM_ARCH >= 6 #define GET_PCB(tmp) \ mrc p15, 0, tmp, c13, c0, 4; \ add tmp, tmp, #(TD_PCB) -#else -.Lcurpcb: - .word _C_LABEL(__pcpu) + PC_CURPCB - -#define GET_PCB(tmp) \ - ldr tmp, .Lcurpcb -#endif - #define SAVE_REGS stmfd sp!, {r4-r11}; _SAVE({r4-r11}) #define RESTORE_REGS ldmfd sp!, {r4-r11} #if defined(_ARM_ARCH_5E) #define HELLOCPP # #define PREFETCH(rx,o) pld [ rx , HELLOCPP (o) ] #else #define PREFETCH(rx,o) #endif /* * r0 = user space address * r1 = kernel space address * r2 = length * * Copies bytes from user space to kernel space * * We save/restore r4-r11: * r4-r11 are scratch */ ENTRY(copyin) /* Quick exit if length is zero */ teq r2, #0 moveq r0, #0 RETeq adds r3, r0, r2 movcs r0, #EFAULT RETc(cs) ldr r12, =(VM_MAXUSER_ADDRESS + 1) cmp r3, r12 movcs r0, #EFAULT RETc(cs) ldr r3, .L_arm_memcpy ldr r3, [r3] cmp r3, #0 beq .Lnormal ldr r3, .L_min_memcpy_size ldr r3, [r3] cmp r2, r3 blt .Lnormal stmfd sp!, {r0-r2, r4, lr} mov r3, r0 mov r0, r1 mov r1, r3 mov r3, #2 /* SRC_IS_USER */ ldr r4, .L_arm_memcpy mov lr, pc ldr pc, [r4] cmp r0, #0 ldmfd sp!, {r0-r2, r4, lr} moveq r0, #0 RETeq .Lnormal: SAVE_REGS GET_PCB(r4) ldr r4, [r4] ldr r5, [r4, #PCB_ONFAULT] adr r3, .Lcopyfault str r3, [r4, #PCB_ONFAULT] PREFETCH(r0, 0) PREFETCH(r1, 0) /* * If not too many bytes, take the slow path. */ cmp r2, #0x08 blt .Licleanup /* * Align destination to word boundary. */ and r6, r1, #0x3 ldr pc, [pc, r6, lsl #2] b .Lialend .word .Lialend .word .Lial3 .word .Lial2 .word .Lial1 .Lial3: ldrbt r6, [r0], #1 sub r2, r2, #1 strb r6, [r1], #1 .Lial2: ldrbt r7, [r0], #1 sub r2, r2, #1 strb r7, [r1], #1 .Lial1: ldrbt r6, [r0], #1 sub r2, r2, #1 strb r6, [r1], #1 .Lialend: /* * If few bytes left, finish slow. */ cmp r2, #0x08 blt .Licleanup /* * If source is not aligned, finish slow. */ ands r3, r0, #0x03 bne .Licleanup cmp r2, #0x60 /* Must be > 0x5f for unrolled cacheline */ blt .Licleanup8 /* * Align destination to cacheline boundary. * If source and destination are nicely aligned, this can be a big * win. If not, it's still cheaper to copy in groups of 32 even if * we don't get the nice cacheline alignment. */ and r6, r1, #0x1f ldr pc, [pc, r6] b .Licaligned .word .Licaligned .word .Lical28 .word .Lical24 .word .Lical20 .word .Lical16 .word .Lical12 .word .Lical8 .word .Lical4 .Lical28:ldrt r6, [r0], #4 sub r2, r2, #4 str r6, [r1], #4 .Lical24:ldrt r7, [r0], #4 sub r2, r2, #4 str r7, [r1], #4 .Lical20:ldrt r6, [r0], #4 sub r2, r2, #4 str r6, [r1], #4 .Lical16:ldrt r7, [r0], #4 sub r2, r2, #4 str r7, [r1], #4 .Lical12:ldrt r6, [r0], #4 sub r2, r2, #4 str r6, [r1], #4 .Lical8:ldrt r7, [r0], #4 sub r2, r2, #4 str r7, [r1], #4 .Lical4:ldrt r6, [r0], #4 sub r2, r2, #4 str r6, [r1], #4 /* * We start with > 0x40 bytes to copy (>= 0x60 got us into this * part of the code, and we may have knocked that down by as much * as 0x1c getting aligned). * * This loop basically works out to: * do { * prefetch-next-cacheline(s) * bytes -= 0x20; * copy cacheline * } while (bytes >= 0x40); * bytes -= 0x20; * copy cacheline */ .Licaligned: PREFETCH(r0, 32) PREFETCH(r1, 32) sub r2, r2, #0x20 /* Copy a cacheline */ ldrt r10, [r0], #4 ldrt r11, [r0], #4 ldrt r6, [r0], #4 ldrt r7, [r0], #4 ldrt r8, [r0], #4 ldrt r9, [r0], #4 stmia r1!, {r10-r11} ldrt r10, [r0], #4 ldrt r11, [r0], #4 stmia r1!, {r6-r11} cmp r2, #0x40 bge .Licaligned sub r2, r2, #0x20 /* Copy a cacheline */ ldrt r10, [r0], #4 ldrt r11, [r0], #4 ldrt r6, [r0], #4 ldrt r7, [r0], #4 ldrt r8, [r0], #4 ldrt r9, [r0], #4 stmia r1!, {r10-r11} ldrt r10, [r0], #4 ldrt r11, [r0], #4 stmia r1!, {r6-r11} cmp r2, #0x08 blt .Liprecleanup .Licleanup8: ldrt r8, [r0], #4 ldrt r9, [r0], #4 sub r2, r2, #8 stmia r1!, {r8, r9} cmp r2, #8 bge .Licleanup8 .Liprecleanup: /* * If we're done, bail. */ cmp r2, #0 beq .Lout .Licleanup: and r6, r2, #0x3 ldr pc, [pc, r6, lsl #2] b .Licend .word .Lic4 .word .Lic1 .word .Lic2 .word .Lic3 .Lic4: ldrbt r6, [r0], #1 sub r2, r2, #1 strb r6, [r1], #1 .Lic3: ldrbt r7, [r0], #1 sub r2, r2, #1 strb r7, [r1], #1 .Lic2: ldrbt r6, [r0], #1 sub r2, r2, #1 strb r6, [r1], #1 .Lic1: ldrbt r7, [r0], #1 subs r2, r2, #1 strb r7, [r1], #1 .Licend: bne .Licleanup .Liout: mov r0, #0 str r5, [r4, #PCB_ONFAULT] RESTORE_REGS RET .Lcopyfault: ldr r0, =EFAULT str r5, [r4, #PCB_ONFAULT] RESTORE_REGS RET END(copyin) /* * r0 = kernel space address * r1 = user space address * r2 = length * * Copies bytes from kernel space to user space * * We save/restore r4-r11: * r4-r11 are scratch */ ENTRY(copyout) /* Quick exit if length is zero */ teq r2, #0 moveq r0, #0 RETeq adds r3, r1, r2 movcs r0, #EFAULT RETc(cs) ldr r12, =(VM_MAXUSER_ADDRESS + 1) cmp r3, r12 movcs r0, #EFAULT RETc(cs) ldr r3, .L_arm_memcpy ldr r3, [r3] cmp r3, #0 beq .Lnormale ldr r3, .L_min_memcpy_size ldr r3, [r3] cmp r2, r3 blt .Lnormale stmfd sp!, {r0-r2, r4, lr} _SAVE({r0-r2, r4, lr}) mov r3, r0 mov r0, r1 mov r1, r3 mov r3, #1 /* DST_IS_USER */ ldr r4, .L_arm_memcpy mov lr, pc ldr pc, [r4] cmp r0, #0 ldmfd sp!, {r0-r2, r4, lr} moveq r0, #0 RETeq .Lnormale: SAVE_REGS GET_PCB(r4) ldr r4, [r4] ldr r5, [r4, #PCB_ONFAULT] adr r3, .Lcopyfault str r3, [r4, #PCB_ONFAULT] PREFETCH(r0, 0) PREFETCH(r1, 0) /* * If not too many bytes, take the slow path. */ cmp r2, #0x08 blt .Lcleanup /* * Align destination to word boundary. */ and r6, r1, #0x3 ldr pc, [pc, r6, lsl #2] b .Lalend .word .Lalend .word .Lal3 .word .Lal2 .word .Lal1 .Lal3: ldrb r6, [r0], #1 sub r2, r2, #1 strbt r6, [r1], #1 .Lal2: ldrb r7, [r0], #1 sub r2, r2, #1 strbt r7, [r1], #1 .Lal1: ldrb r6, [r0], #1 sub r2, r2, #1 strbt r6, [r1], #1 .Lalend: /* * If few bytes left, finish slow. */ cmp r2, #0x08 blt .Lcleanup /* * If source is not aligned, finish slow. */ ands r3, r0, #0x03 bne .Lcleanup cmp r2, #0x60 /* Must be > 0x5f for unrolled cacheline */ blt .Lcleanup8 /* * Align source & destination to cacheline boundary. */ and r6, r1, #0x1f ldr pc, [pc, r6] b .Lcaligned .word .Lcaligned .word .Lcal28 .word .Lcal24 .word .Lcal20 .word .Lcal16 .word .Lcal12 .word .Lcal8 .word .Lcal4 .Lcal28:ldr r6, [r0], #4 sub r2, r2, #4 strt r6, [r1], #4 .Lcal24:ldr r7, [r0], #4 sub r2, r2, #4 strt r7, [r1], #4 .Lcal20:ldr r6, [r0], #4 sub r2, r2, #4 strt r6, [r1], #4 .Lcal16:ldr r7, [r0], #4 sub r2, r2, #4 strt r7, [r1], #4 .Lcal12:ldr r6, [r0], #4 sub r2, r2, #4 strt r6, [r1], #4 .Lcal8: ldr r7, [r0], #4 sub r2, r2, #4 strt r7, [r1], #4 .Lcal4: ldr r6, [r0], #4 sub r2, r2, #4 strt r6, [r1], #4 /* * We start with > 0x40 bytes to copy (>= 0x60 got us into this * part of the code, and we may have knocked that down by as much * as 0x1c getting aligned). * * This loop basically works out to: * do { * prefetch-next-cacheline(s) * bytes -= 0x20; * copy cacheline * } while (bytes >= 0x40); * bytes -= 0x20; * copy cacheline */ .Lcaligned: PREFETCH(r0, 32) PREFETCH(r1, 32) sub r2, r2, #0x20 /* Copy a cacheline */ ldmia r0!, {r6-r11} strt r6, [r1], #4 strt r7, [r1], #4 ldmia r0!, {r6-r7} strt r8, [r1], #4 strt r9, [r1], #4 strt r10, [r1], #4 strt r11, [r1], #4 strt r6, [r1], #4 strt r7, [r1], #4 cmp r2, #0x40 bge .Lcaligned sub r2, r2, #0x20 /* Copy a cacheline */ ldmia r0!, {r6-r11} strt r6, [r1], #4 strt r7, [r1], #4 ldmia r0!, {r6-r7} strt r8, [r1], #4 strt r9, [r1], #4 strt r10, [r1], #4 strt r11, [r1], #4 strt r6, [r1], #4 strt r7, [r1], #4 cmp r2, #0x08 blt .Lprecleanup .Lcleanup8: ldmia r0!, {r8-r9} sub r2, r2, #8 strt r8, [r1], #4 strt r9, [r1], #4 cmp r2, #8 bge .Lcleanup8 .Lprecleanup: /* * If we're done, bail. */ cmp r2, #0 beq .Lout .Lcleanup: and r6, r2, #0x3 ldr pc, [pc, r6, lsl #2] b .Lcend .word .Lc4 .word .Lc1 .word .Lc2 .word .Lc3 .Lc4: ldrb r6, [r0], #1 sub r2, r2, #1 strbt r6, [r1], #1 .Lc3: ldrb r7, [r0], #1 sub r2, r2, #1 strbt r7, [r1], #1 .Lc2: ldrb r6, [r0], #1 sub r2, r2, #1 strbt r6, [r1], #1 .Lc1: ldrb r7, [r0], #1 subs r2, r2, #1 strbt r7, [r1], #1 .Lcend: bne .Lcleanup .Lout: mov r0, #0 str r5, [r4, #PCB_ONFAULT] RESTORE_REGS RET END(copyout) #endif /* * int badaddr_read_1(const uint8_t *src, uint8_t *dest) * * Copies a single 8-bit value from src to dest, returning 0 on success, * else EFAULT if a page fault occurred. */ ENTRY(badaddr_read_1) GET_PCB(r2) ldr r2, [r2] ldr ip, [r2, #PCB_ONFAULT] adr r3, 1f str r3, [r2, #PCB_ONFAULT] nop nop nop ldrb r3, [r0] nop nop nop strb r3, [r1] mov r0, #0 /* No fault */ 1: str ip, [r2, #PCB_ONFAULT] RET END(badaddr_read_1) /* * int badaddr_read_2(const uint16_t *src, uint16_t *dest) * * Copies a single 16-bit value from src to dest, returning 0 on success, * else EFAULT if a page fault occurred. */ ENTRY(badaddr_read_2) GET_PCB(r2) ldr r2, [r2] ldr ip, [r2, #PCB_ONFAULT] adr r3, 1f str r3, [r2, #PCB_ONFAULT] nop nop nop ldrh r3, [r0] nop nop nop strh r3, [r1] mov r0, #0 /* No fault */ 1: str ip, [r2, #PCB_ONFAULT] RET END(badaddr_read_2) /* * int badaddr_read_4(const uint32_t *src, uint32_t *dest) * * Copies a single 32-bit value from src to dest, returning 0 on success, * else EFAULT if a page fault occurred. */ ENTRY(badaddr_read_4) GET_PCB(r2) ldr r2, [r2] ldr ip, [r2, #PCB_ONFAULT] adr r3, 1f str r3, [r2, #PCB_ONFAULT] nop nop nop ldr r3, [r0] nop nop nop str r3, [r1] mov r0, #0 /* No fault */ 1: str ip, [r2, #PCB_ONFAULT] RET END(badaddr_read_4) Index: head/sys/arm/arm/bcopyinout_xscale.S =================================================================== --- head/sys/arm/arm/bcopyinout_xscale.S (revision 368140) +++ head/sys/arm/arm/bcopyinout_xscale.S (revision 368141) @@ -1,958 +1,951 @@ /* $NetBSD: bcopyinout_xscale.S,v 1.3 2003/12/15 09:27:18 scw Exp $ */ /*- * Copyright 2003 Wasabi Systems, Inc. * All rights reserved. * * Written by Steve C. Woodford for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); .syntax unified .text .align 2 -#if __ARM_ARCH >= 6 #define GET_PCB(tmp) \ mrc p15, 0, tmp, c13, c0, 4; \ add tmp, tmp, #(TD_PCB) -#else -.Lcurpcb: - .word _C_LABEL(__pcpu) + PC_CURPCB -#define GET_PCB(tmp) \ - ldr tmp, .Lcurpcb -#endif /* * r0 = user space address * r1 = kernel space address * r2 = length * * Copies bytes from user space to kernel space */ ENTRY(copyin) cmp r2, #0x00 movle r0, #0x00 movle pc, lr /* Bail early if length is <= 0 */ adds r3, r0, r2 movcs r0, #EFAULT RETc(cs) ldr r12, =(VM_MAXUSER_ADDRESS + 1) cmp r3, r12 movcs r0, #EFAULT RETc(cs) ldr r3, .L_arm_memcpy ldr r3, [r3] cmp r3, #0 beq .Lnormal ldr r3, .L_min_memcpy_size ldr r3, [r3] cmp r2, r3 blt .Lnormal stmfd sp!, {r0-r2, r4, lr} mov r3, r0 mov r0, r1 mov r1, r3 mov r3, #2 /* SRC_IS_USER */ ldr r4, .L_arm_memcpy mov lr, pc ldr pc, [r4] cmp r0, #0 ldmfd sp!, {r0-r2, r4, lr} moveq r0, #0 RETeq .Lnormal: stmfd sp!, {r10-r11, lr} GET_PCB(r10) ldr r10, [r10] mov r3, #0x00 adr ip, .Lcopyin_fault ldr r11, [r10, #PCB_ONFAULT] str ip, [r10, #PCB_ONFAULT] bl .Lcopyin_guts str r11, [r10, #PCB_ONFAULT] mov r0, #0x00 ldmfd sp!, {r10-r11, pc} .Lcopyin_fault: ldr r0, =EFAULT str r11, [r10, #PCB_ONFAULT] cmp r3, #0x00 ldmfdgt sp!, {r4-r7} /* r3 > 0 Restore r4-r7 */ ldmfdlt sp!, {r4-r9} /* r3 < 0 Restore r4-r9 */ ldmfd sp!, {r10-r11, pc} .Lcopyin_guts: pld [r0] /* Word-align the destination buffer */ ands ip, r1, #0x03 /* Already word aligned? */ beq .Lcopyin_wordaligned /* Yup */ rsb ip, ip, #0x04 cmp r2, ip /* Enough bytes left to align it? */ blt .Lcopyin_l4_2 /* Nope. Just copy bytewise */ sub r2, r2, ip rsbs ip, ip, #0x03 addne pc, pc, ip, lsl #3 nop ldrbt ip, [r0], #0x01 strb ip, [r1], #0x01 ldrbt ip, [r0], #0x01 strb ip, [r1], #0x01 ldrbt ip, [r0], #0x01 strb ip, [r1], #0x01 cmp r2, #0x00 /* All done? */ RETeq /* Destination buffer is now word aligned */ .Lcopyin_wordaligned: ands ip, r0, #0x03 /* Is src also word-aligned? */ bne .Lcopyin_bad_align /* Nope. Things just got bad */ cmp r2, #0x08 /* Less than 8 bytes remaining? */ blt .Lcopyin_w_less_than8 /* Quad-align the destination buffer */ tst r1, #0x07 /* Already quad aligned? */ ldrtne ip, [r0], #0x04 strne ip, [r1], #0x04 subne r2, r2, #0x04 stmfd sp!, {r4-r9} /* Free up some registers */ mov r3, #-1 /* Signal restore r4-r9 */ /* Destination buffer quad aligned, source is word aligned */ subs r2, r2, #0x80 blt .Lcopyin_w_lessthan128 /* Copy 128 bytes at a time */ .Lcopyin_w_loop128: ldrt r4, [r0], #0x04 /* LD:00-03 */ ldrt r5, [r0], #0x04 /* LD:04-07 */ pld [r0, #0x18] /* Prefetch 0x20 */ ldrt r6, [r0], #0x04 /* LD:08-0b */ ldrt r7, [r0], #0x04 /* LD:0c-0f */ ldrt r8, [r0], #0x04 /* LD:10-13 */ ldrt r9, [r0], #0x04 /* LD:14-17 */ strd r4, [r1], #0x08 /* ST:00-07 */ ldrt r4, [r0], #0x04 /* LD:18-1b */ ldrt r5, [r0], #0x04 /* LD:1c-1f */ strd r6, [r1], #0x08 /* ST:08-0f */ ldrt r6, [r0], #0x04 /* LD:20-23 */ ldrt r7, [r0], #0x04 /* LD:24-27 */ pld [r0, #0x18] /* Prefetch 0x40 */ strd r8, [r1], #0x08 /* ST:10-17 */ ldrt r8, [r0], #0x04 /* LD:28-2b */ ldrt r9, [r0], #0x04 /* LD:2c-2f */ strd r4, [r1], #0x08 /* ST:18-1f */ ldrt r4, [r0], #0x04 /* LD:30-33 */ ldrt r5, [r0], #0x04 /* LD:34-37 */ strd r6, [r1], #0x08 /* ST:20-27 */ ldrt r6, [r0], #0x04 /* LD:38-3b */ ldrt r7, [r0], #0x04 /* LD:3c-3f */ strd r8, [r1], #0x08 /* ST:28-2f */ ldrt r8, [r0], #0x04 /* LD:40-43 */ ldrt r9, [r0], #0x04 /* LD:44-47 */ pld [r0, #0x18] /* Prefetch 0x60 */ strd r4, [r1], #0x08 /* ST:30-37 */ ldrt r4, [r0], #0x04 /* LD:48-4b */ ldrt r5, [r0], #0x04 /* LD:4c-4f */ strd r6, [r1], #0x08 /* ST:38-3f */ ldrt r6, [r0], #0x04 /* LD:50-53 */ ldrt r7, [r0], #0x04 /* LD:54-57 */ strd r8, [r1], #0x08 /* ST:40-47 */ ldrt r8, [r0], #0x04 /* LD:58-5b */ ldrt r9, [r0], #0x04 /* LD:5c-5f */ strd r4, [r1], #0x08 /* ST:48-4f */ ldrt r4, [r0], #0x04 /* LD:60-63 */ ldrt r5, [r0], #0x04 /* LD:64-67 */ pld [r0, #0x18] /* Prefetch 0x80 */ strd r6, [r1], #0x08 /* ST:50-57 */ ldrt r6, [r0], #0x04 /* LD:68-6b */ ldrt r7, [r0], #0x04 /* LD:6c-6f */ strd r8, [r1], #0x08 /* ST:58-5f */ ldrt r8, [r0], #0x04 /* LD:70-73 */ ldrt r9, [r0], #0x04 /* LD:74-77 */ strd r4, [r1], #0x08 /* ST:60-67 */ ldrt r4, [r0], #0x04 /* LD:78-7b */ ldrt r5, [r0], #0x04 /* LD:7c-7f */ strd r6, [r1], #0x08 /* ST:68-6f */ strd r8, [r1], #0x08 /* ST:70-77 */ subs r2, r2, #0x80 strd r4, [r1], #0x08 /* ST:78-7f */ bge .Lcopyin_w_loop128 .Lcopyin_w_lessthan128: adds r2, r2, #0x80 /* Adjust for extra sub */ ldmfdeq sp!, {r4-r9} RETeq subs r2, r2, #0x20 blt .Lcopyin_w_lessthan32 /* Copy 32 bytes at a time */ .Lcopyin_w_loop32: ldrt r4, [r0], #0x04 ldrt r5, [r0], #0x04 pld [r0, #0x18] ldrt r6, [r0], #0x04 ldrt r7, [r0], #0x04 ldrt r8, [r0], #0x04 ldrt r9, [r0], #0x04 strd r4, [r1], #0x08 ldrt r4, [r0], #0x04 ldrt r5, [r0], #0x04 strd r6, [r1], #0x08 strd r8, [r1], #0x08 subs r2, r2, #0x20 strd r4, [r1], #0x08 bge .Lcopyin_w_loop32 .Lcopyin_w_lessthan32: adds r2, r2, #0x20 /* Adjust for extra sub */ ldmfdeq sp!, {r4-r9} RETeq /* Return now if done */ and r4, r2, #0x18 rsb r5, r4, #0x18 subs r2, r2, r4 add pc, pc, r5, lsl #1 nop /* At least 24 bytes remaining */ ldrt r4, [r0], #0x04 ldrt r5, [r0], #0x04 nop strd r4, [r1], #0x08 /* At least 16 bytes remaining */ ldrt r4, [r0], #0x04 ldrt r5, [r0], #0x04 nop strd r4, [r1], #0x08 /* At least 8 bytes remaining */ ldrt r4, [r0], #0x04 ldrt r5, [r0], #0x04 nop strd r4, [r1], #0x08 /* Less than 8 bytes remaining */ ldmfd sp!, {r4-r9} RETeq /* Return now if done */ mov r3, #0x00 .Lcopyin_w_less_than8: subs r2, r2, #0x04 ldrtge ip, [r0], #0x04 strge ip, [r1], #0x04 RETeq /* Return now if done */ addlt r2, r2, #0x04 ldrbt ip, [r0], #0x01 cmp r2, #0x02 ldrbtge r2, [r0], #0x01 strb ip, [r1], #0x01 ldrbtgt ip, [r0] strbge r2, [r1], #0x01 strbgt ip, [r1] RET /* * At this point, it has not been possible to word align both buffers. * The destination buffer (r1) is word aligned, but the source buffer * (r0) is not. */ .Lcopyin_bad_align: stmfd sp!, {r4-r7} mov r3, #0x01 bic r0, r0, #0x03 cmp ip, #2 ldrt ip, [r0], #0x04 bgt .Lcopyin_bad3 beq .Lcopyin_bad2 b .Lcopyin_bad1 .Lcopyin_bad1_loop16: #ifdef __ARMEB__ mov r4, ip, lsl #8 #else mov r4, ip, lsr #8 #endif ldrt r5, [r0], #0x04 pld [r0, #0x018] ldrt r6, [r0], #0x04 ldrt r7, [r0], #0x04 ldrt ip, [r0], #0x04 #ifdef __ARMEB__ orr r4, r4, r5, lsr #24 mov r5, r5, lsl #8 orr r5, r5, r6, lsr #24 mov r6, r6, lsl #8 orr r6, r6, r7, lsr #24 mov r7, r7, lsl #8 orr r7, r7, ip, lsr #24 #else orr r4, r4, r5, lsl #24 mov r5, r5, lsr #8 orr r5, r5, r6, lsl #24 mov r6, r6, lsr #8 orr r6, r6, r7, lsl #24 mov r7, r7, lsr #8 orr r7, r7, ip, lsl #24 #endif str r4, [r1], #0x04 str r5, [r1], #0x04 str r6, [r1], #0x04 str r7, [r1], #0x04 .Lcopyin_bad1: subs r2, r2, #0x10 bge .Lcopyin_bad1_loop16 adds r2, r2, #0x10 ldmfdeq sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 sublt r0, r0, #0x03 blt .Lcopyin_l4 .Lcopyin_bad1_loop4: #ifdef __ARMEB__ mov r4, ip, lsl #8 #else mov r4, ip, lsr #8 #endif ldrt ip, [r0], #0x04 subs r2, r2, #0x04 #ifdef __ARMEB__ orr r4, r4, ip, lsr #24 #else orr r4, r4, ip, lsl #24 #endif str r4, [r1], #0x04 bge .Lcopyin_bad1_loop4 sub r0, r0, #0x03 b .Lcopyin_l4 .Lcopyin_bad2_loop16: #ifdef __ARMEB__ mov r4, ip, lsl #16 #else mov r4, ip, lsr #16 #endif ldrt r5, [r0], #0x04 pld [r0, #0x018] ldrt r6, [r0], #0x04 ldrt r7, [r0], #0x04 ldrt ip, [r0], #0x04 #ifdef __ARMEB__ orr r4, r4, r5, lsr #16 mov r5, r5, lsl #16 orr r5, r5, r6, lsr #16 mov r6, r6, lsl #16 orr r6, r6, r7, lsr #16 mov r7, r7, lsl #16 orr r7, r7, ip, lsr #16 #else orr r4, r4, r5, lsl #16 mov r5, r5, lsr #16 orr r5, r5, r6, lsl #16 mov r6, r6, lsr #16 orr r6, r6, r7, lsl #16 mov r7, r7, lsr #16 orr r7, r7, ip, lsl #16 #endif str r4, [r1], #0x04 str r5, [r1], #0x04 str r6, [r1], #0x04 str r7, [r1], #0x04 .Lcopyin_bad2: subs r2, r2, #0x10 bge .Lcopyin_bad2_loop16 adds r2, r2, #0x10 ldmfdeq sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 sublt r0, r0, #0x02 blt .Lcopyin_l4 .Lcopyin_bad2_loop4: #ifdef __ARMEB__ mov r4, ip, lsl #16 #else mov r4, ip, lsr #16 #endif ldrt ip, [r0], #0x04 subs r2, r2, #0x04 #ifdef __ARMEB__ orr r4, r4, ip, lsr #16 #else orr r4, r4, ip, lsl #16 #endif str r4, [r1], #0x04 bge .Lcopyin_bad2_loop4 sub r0, r0, #0x02 b .Lcopyin_l4 .Lcopyin_bad3_loop16: #ifdef __ARMEB__ mov r4, ip, lsl #24 #else mov r4, ip, lsr #24 #endif ldrt r5, [r0], #0x04 pld [r0, #0x018] ldrt r6, [r0], #0x04 ldrt r7, [r0], #0x04 ldrt ip, [r0], #0x04 #ifdef __ARMEB__ orr r4, r4, r5, lsr #8 mov r5, r5, lsl #24 orr r5, r5, r6, lsr #8 mov r6, r6, lsl #24 orr r6, r6, r7, lsr #8 mov r7, r7, lsl #24 orr r7, r7, ip, lsr #8 #else orr r4, r4, r5, lsl #8 mov r5, r5, lsr #24 orr r5, r5, r6, lsl #8 mov r6, r6, lsr #24 orr r6, r6, r7, lsl #8 mov r7, r7, lsr #24 orr r7, r7, ip, lsl #8 #endif str r4, [r1], #0x04 str r5, [r1], #0x04 str r6, [r1], #0x04 str r7, [r1], #0x04 .Lcopyin_bad3: subs r2, r2, #0x10 bge .Lcopyin_bad3_loop16 adds r2, r2, #0x10 ldmfdeq sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 sublt r0, r0, #0x01 blt .Lcopyin_l4 .Lcopyin_bad3_loop4: #ifdef __ARMEB__ mov r4, ip, lsl #24 #else mov r4, ip, lsr #24 #endif ldrt ip, [r0], #0x04 subs r2, r2, #0x04 #ifdef __ARMEB__ orr r4, r4, ip, lsr #8 #else orr r4, r4, ip, lsl #8 #endif str r4, [r1], #0x04 bge .Lcopyin_bad3_loop4 sub r0, r0, #0x01 .Lcopyin_l4: ldmfd sp!, {r4-r7} mov r3, #0x00 adds r2, r2, #0x04 RETeq .Lcopyin_l4_2: rsbs r2, r2, #0x03 addne pc, pc, r2, lsl #3 nop ldrbt ip, [r0], #0x01 strb ip, [r1], #0x01 ldrbt ip, [r0], #0x01 strb ip, [r1], #0x01 ldrbt ip, [r0] strb ip, [r1] RET END(copyin) /* * r0 = kernel space address * r1 = user space address * r2 = length * * Copies bytes from kernel space to user space */ ENTRY(copyout) cmp r2, #0x00 movle r0, #0x00 movle pc, lr /* Bail early if length is <= 0 */ adds r3, r1, r2 movcs r0, #EFAULT RETc(cs) ldr r12, =(VM_MAXUSER_ADDRESS + 1) cmp r3, r12 movcs r0, #EFAULT RETc(cs) ldr r3, .L_arm_memcpy ldr r3, [r3] cmp r3, #0 beq .Lnormale ldr r3, .L_min_memcpy_size ldr r3, [r3] cmp r2, r3 blt .Lnormale stmfd sp!, {r0-r2, r4, lr} mov r3, r0 mov r0, r1 mov r1, r3 mov r3, #1 /* DST_IS_USER */ ldr r4, .L_arm_memcpy mov lr, pc ldr pc, [r4] cmp r0, #0 ldmfd sp!, {r0-r2, r4, lr} moveq r0, #0 RETeq .Lnormale: stmfd sp!, {r10-r11, lr} GET_PCB(r10) ldr r10, [r10] mov r3, #0x00 adr ip, .Lcopyout_fault ldr r11, [r10, #PCB_ONFAULT] str ip, [r10, #PCB_ONFAULT] bl .Lcopyout_guts str r11, [r10, #PCB_ONFAULT] mov r0, #0x00 ldmfd sp!, {r10-r11, pc} .Lcopyout_fault: ldr r0, =EFAULT str r11, [r10, #PCB_ONFAULT] cmp r3, #0x00 ldmfdgt sp!, {r4-r7} /* r3 > 0 Restore r4-r7 */ ldmfdlt sp!, {r4-r9} /* r3 < 0 Restore r4-r9 */ ldmfd sp!, {r10-r11, pc} .Lcopyout_guts: pld [r0] /* Word-align the destination buffer */ ands ip, r1, #0x03 /* Already word aligned? */ beq .Lcopyout_wordaligned /* Yup */ rsb ip, ip, #0x04 cmp r2, ip /* Enough bytes left to align it? */ blt .Lcopyout_l4_2 /* Nope. Just copy bytewise */ sub r2, r2, ip rsbs ip, ip, #0x03 addne pc, pc, ip, lsl #3 nop ldrb ip, [r0], #0x01 strbt ip, [r1], #0x01 ldrb ip, [r0], #0x01 strbt ip, [r1], #0x01 ldrb ip, [r0], #0x01 strbt ip, [r1], #0x01 cmp r2, #0x00 /* All done? */ RETeq /* Destination buffer is now word aligned */ .Lcopyout_wordaligned: ands ip, r0, #0x03 /* Is src also word-aligned? */ bne .Lcopyout_bad_align /* Nope. Things just got bad */ cmp r2, #0x08 /* Less than 8 bytes remaining? */ blt .Lcopyout_w_less_than8 /* Quad-align the destination buffer */ tst r0, #0x07 /* Already quad aligned? */ ldrne ip, [r0], #0x04 subne r2, r2, #0x04 strtne ip, [r1], #0x04 stmfd sp!, {r4-r9} /* Free up some registers */ mov r3, #-1 /* Signal restore r4-r9 */ /* Destination buffer word aligned, source is quad aligned */ subs r2, r2, #0x80 blt .Lcopyout_w_lessthan128 /* Copy 128 bytes at a time */ .Lcopyout_w_loop128: ldrd r4, [r0], #0x08 /* LD:00-07 */ pld [r0, #0x18] /* Prefetch 0x20 */ ldrd r6, [r0], #0x08 /* LD:08-0f */ ldrd r8, [r0], #0x08 /* LD:10-17 */ strt r4, [r1], #0x04 /* ST:00-03 */ strt r5, [r1], #0x04 /* ST:04-07 */ ldrd r4, [r0], #0x08 /* LD:18-1f */ strt r6, [r1], #0x04 /* ST:08-0b */ strt r7, [r1], #0x04 /* ST:0c-0f */ ldrd r6, [r0], #0x08 /* LD:20-27 */ pld [r0, #0x18] /* Prefetch 0x40 */ strt r8, [r1], #0x04 /* ST:10-13 */ strt r9, [r1], #0x04 /* ST:14-17 */ ldrd r8, [r0], #0x08 /* LD:28-2f */ strt r4, [r1], #0x04 /* ST:18-1b */ strt r5, [r1], #0x04 /* ST:1c-1f */ ldrd r4, [r0], #0x08 /* LD:30-37 */ strt r6, [r1], #0x04 /* ST:20-23 */ strt r7, [r1], #0x04 /* ST:24-27 */ ldrd r6, [r0], #0x08 /* LD:38-3f */ strt r8, [r1], #0x04 /* ST:28-2b */ strt r9, [r1], #0x04 /* ST:2c-2f */ ldrd r8, [r0], #0x08 /* LD:40-47 */ pld [r0, #0x18] /* Prefetch 0x60 */ strt r4, [r1], #0x04 /* ST:30-33 */ strt r5, [r1], #0x04 /* ST:34-37 */ ldrd r4, [r0], #0x08 /* LD:48-4f */ strt r6, [r1], #0x04 /* ST:38-3b */ strt r7, [r1], #0x04 /* ST:3c-3f */ ldrd r6, [r0], #0x08 /* LD:50-57 */ strt r8, [r1], #0x04 /* ST:40-43 */ strt r9, [r1], #0x04 /* ST:44-47 */ ldrd r8, [r0], #0x08 /* LD:58-4f */ strt r4, [r1], #0x04 /* ST:48-4b */ strt r5, [r1], #0x04 /* ST:4c-4f */ ldrd r4, [r0], #0x08 /* LD:60-67 */ pld [r0, #0x18] /* Prefetch 0x80 */ strt r6, [r1], #0x04 /* ST:50-53 */ strt r7, [r1], #0x04 /* ST:54-57 */ ldrd r6, [r0], #0x08 /* LD:68-6f */ strt r8, [r1], #0x04 /* ST:58-5b */ strt r9, [r1], #0x04 /* ST:5c-5f */ ldrd r8, [r0], #0x08 /* LD:70-77 */ strt r4, [r1], #0x04 /* ST:60-63 */ strt r5, [r1], #0x04 /* ST:64-67 */ ldrd r4, [r0], #0x08 /* LD:78-7f */ strt r6, [r1], #0x04 /* ST:68-6b */ strt r7, [r1], #0x04 /* ST:6c-6f */ strt r8, [r1], #0x04 /* ST:70-73 */ strt r9, [r1], #0x04 /* ST:74-77 */ subs r2, r2, #0x80 strt r4, [r1], #0x04 /* ST:78-7b */ strt r5, [r1], #0x04 /* ST:7c-7f */ bge .Lcopyout_w_loop128 .Lcopyout_w_lessthan128: adds r2, r2, #0x80 /* Adjust for extra sub */ ldmfdeq sp!, {r4-r9} RETeq /* Return now if done */ subs r2, r2, #0x20 blt .Lcopyout_w_lessthan32 /* Copy 32 bytes at a time */ .Lcopyout_w_loop32: ldrd r4, [r0], #0x08 pld [r0, #0x18] ldrd r6, [r0], #0x08 ldrd r8, [r0], #0x08 strt r4, [r1], #0x04 strt r5, [r1], #0x04 ldrd r4, [r0], #0x08 strt r6, [r1], #0x04 strt r7, [r1], #0x04 strt r8, [r1], #0x04 strt r9, [r1], #0x04 subs r2, r2, #0x20 strt r4, [r1], #0x04 strt r5, [r1], #0x04 bge .Lcopyout_w_loop32 .Lcopyout_w_lessthan32: adds r2, r2, #0x20 /* Adjust for extra sub */ ldmfdeq sp!, {r4-r9} RETeq /* Return now if done */ and r4, r2, #0x18 rsb r5, r4, #0x18 subs r2, r2, r4 add pc, pc, r5, lsl #1 nop /* At least 24 bytes remaining */ ldrd r4, [r0], #0x08 strt r4, [r1], #0x04 strt r5, [r1], #0x04 nop /* At least 16 bytes remaining */ ldrd r4, [r0], #0x08 strt r4, [r1], #0x04 strt r5, [r1], #0x04 nop /* At least 8 bytes remaining */ ldrd r4, [r0], #0x08 strt r4, [r1], #0x04 strt r5, [r1], #0x04 nop /* Less than 8 bytes remaining */ ldmfd sp!, {r4-r9} RETeq /* Return now if done */ mov r3, #0x00 .Lcopyout_w_less_than8: subs r2, r2, #0x04 ldrge ip, [r0], #0x04 strtge ip, [r1], #0x04 RETeq /* Return now if done */ addlt r2, r2, #0x04 ldrb ip, [r0], #0x01 cmp r2, #0x02 ldrbge r2, [r0], #0x01 strbt ip, [r1], #0x01 ldrbgt ip, [r0] strbtge r2, [r1], #0x01 strbtgt ip, [r1] RET /* * At this point, it has not been possible to word align both buffers. * The destination buffer (r1) is word aligned, but the source buffer * (r0) is not. */ .Lcopyout_bad_align: stmfd sp!, {r4-r7} mov r3, #0x01 bic r0, r0, #0x03 cmp ip, #2 ldr ip, [r0], #0x04 bgt .Lcopyout_bad3 beq .Lcopyout_bad2 b .Lcopyout_bad1 .Lcopyout_bad1_loop16: #ifdef __ARMEB__ mov r4, ip, lsl #8 #else mov r4, ip, lsr #8 #endif ldr r5, [r0], #0x04 pld [r0, #0x018] ldr r6, [r0], #0x04 ldr r7, [r0], #0x04 ldr ip, [r0], #0x04 #ifdef __ARMEB__ orr r4, r4, r5, lsr #24 mov r5, r5, lsl #8 orr r5, r5, r6, lsr #24 mov r6, r6, lsl #8 orr r6, r6, r7, lsr #24 mov r7, r7, lsl #8 orr r7, r7, ip, lsr #24 #else orr r4, r4, r5, lsl #24 mov r5, r5, lsr #8 orr r5, r5, r6, lsl #24 mov r6, r6, lsr #8 orr r6, r6, r7, lsl #24 mov r7, r7, lsr #8 orr r7, r7, ip, lsl #24 #endif strt r4, [r1], #0x04 strt r5, [r1], #0x04 strt r6, [r1], #0x04 strt r7, [r1], #0x04 .Lcopyout_bad1: subs r2, r2, #0x10 bge .Lcopyout_bad1_loop16 adds r2, r2, #0x10 ldmfdeq sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 sublt r0, r0, #0x03 blt .Lcopyout_l4 .Lcopyout_bad1_loop4: #ifdef __ARMEB__ mov r4, ip, lsl #8 #else mov r4, ip, lsr #8 #endif ldr ip, [r0], #0x04 subs r2, r2, #0x04 #ifdef __ARMEB__ orr r4, r4, ip, lsr #24 #else orr r4, r4, ip, lsl #24 #endif strt r4, [r1], #0x04 bge .Lcopyout_bad1_loop4 sub r0, r0, #0x03 b .Lcopyout_l4 .Lcopyout_bad2_loop16: #ifdef __ARMEB__ mov r4, ip, lsl #16 #else mov r4, ip, lsr #16 #endif ldr r5, [r0], #0x04 pld [r0, #0x018] ldr r6, [r0], #0x04 ldr r7, [r0], #0x04 ldr ip, [r0], #0x04 #ifdef __ARMEB__ orr r4, r4, r5, lsr #16 mov r5, r5, lsl #16 orr r5, r5, r6, lsr #16 mov r6, r6, lsl #16 orr r6, r6, r7, lsr #16 mov r7, r7, lsl #16 orr r7, r7, ip, lsr #16 #else orr r4, r4, r5, lsl #16 mov r5, r5, lsr #16 orr r5, r5, r6, lsl #16 mov r6, r6, lsr #16 orr r6, r6, r7, lsl #16 mov r7, r7, lsr #16 orr r7, r7, ip, lsl #16 #endif strt r4, [r1], #0x04 strt r5, [r1], #0x04 strt r6, [r1], #0x04 strt r7, [r1], #0x04 .Lcopyout_bad2: subs r2, r2, #0x10 bge .Lcopyout_bad2_loop16 adds r2, r2, #0x10 ldmfdeq sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 sublt r0, r0, #0x02 blt .Lcopyout_l4 .Lcopyout_bad2_loop4: #ifdef __ARMEB__ mov r4, ip, lsl #16 #else mov r4, ip, lsr #16 #endif ldr ip, [r0], #0x04 subs r2, r2, #0x04 #ifdef __ARMEB__ orr r4, r4, ip, lsr #16 #else orr r4, r4, ip, lsl #16 #endif strt r4, [r1], #0x04 bge .Lcopyout_bad2_loop4 sub r0, r0, #0x02 b .Lcopyout_l4 .Lcopyout_bad3_loop16: #ifdef __ARMEB__ mov r4, ip, lsl #24 #else mov r4, ip, lsr #24 #endif ldr r5, [r0], #0x04 pld [r0, #0x018] ldr r6, [r0], #0x04 ldr r7, [r0], #0x04 ldr ip, [r0], #0x04 #ifdef __ARMEB__ orr r4, r4, r5, lsr #8 mov r5, r5, lsl #24 orr r5, r5, r6, lsr #8 mov r6, r6, lsl #24 orr r6, r6, r7, lsr #8 mov r7, r7, lsl #24 orr r7, r7, ip, lsr #8 #else orr r4, r4, r5, lsl #8 mov r5, r5, lsr #24 orr r5, r5, r6, lsl #8 mov r6, r6, lsr #24 orr r6, r6, r7, lsl #8 mov r7, r7, lsr #24 orr r7, r7, ip, lsl #8 #endif strt r4, [r1], #0x04 strt r5, [r1], #0x04 strt r6, [r1], #0x04 strt r7, [r1], #0x04 .Lcopyout_bad3: subs r2, r2, #0x10 bge .Lcopyout_bad3_loop16 adds r2, r2, #0x10 ldmfdeq sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 sublt r0, r0, #0x01 blt .Lcopyout_l4 .Lcopyout_bad3_loop4: #ifdef __ARMEB__ mov r4, ip, lsl #24 #else mov r4, ip, lsr #24 #endif ldr ip, [r0], #0x04 subs r2, r2, #0x04 #ifdef __ARMEB__ orr r4, r4, ip, lsr #8 #else orr r4, r4, ip, lsl #8 #endif strt r4, [r1], #0x04 bge .Lcopyout_bad3_loop4 sub r0, r0, #0x01 .Lcopyout_l4: ldmfd sp!, {r4-r7} mov r3, #0x00 adds r2, r2, #0x04 RETeq .Lcopyout_l4_2: rsbs r2, r2, #0x03 addne pc, pc, r2, lsl #3 nop ldrb ip, [r0], #0x01 strbt ip, [r1], #0x01 ldrb ip, [r0], #0x01 strbt ip, [r1], #0x01 ldrb ip, [r0] strbt ip, [r1] RET END(copyout) Index: head/sys/arm/arm/bus_space_base.c =================================================================== --- head/sys/arm/arm/bus_space_base.c (revision 368140) +++ head/sys/arm/arm/bus_space_base.c (revision 368141) @@ -1,161 +1,158 @@ /*- * Copyright (C) 2008 MARVELL INTERNATIONAL LTD. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include "opt_platform.h" /* Prototypes for all the bus_space structure functions */ bs_protos(generic); /* * The bus space tag. This is constant for all instances, so * we never have to explicitly "create" it. */ static struct bus_space arm_base_bus_space __aligned(CACHE_LINE_SIZE) = { /* privdata is whatever the implementer wants; unused in base tag */ .bs_privdata = NULL, /* mapping/unmapping */ .bs_map = generic_bs_map, .bs_unmap = generic_bs_unmap, .bs_subregion = generic_bs_subregion, /* allocation/deallocation */ .bs_alloc = generic_bs_alloc, .bs_free = generic_bs_free, /* barrier */ .bs_barrier = generic_bs_barrier, /* read (single) */ .bs_r_1 = NULL, /* Use inline code in bus.h */ .bs_r_2 = NULL, /* Use inline code in bus.h */ .bs_r_4 = NULL, /* Use inline code in bus.h */ .bs_r_8 = NULL, /* Use inline code in bus.h */ /* read multiple */ .bs_rm_1 = generic_bs_rm_1, .bs_rm_2 = generic_bs_rm_2, .bs_rm_4 = generic_bs_rm_4, .bs_rm_8 = BS_UNIMPLEMENTED, /* read region */ .bs_rr_1 = generic_bs_rr_1, .bs_rr_2 = generic_bs_rr_2, .bs_rr_4 = generic_bs_rr_4, .bs_rr_8 = BS_UNIMPLEMENTED, /* write (single) */ .bs_w_1 = NULL, /* Use inline code in bus.h */ .bs_w_2 = NULL, /* Use inline code in bus.h */ .bs_w_4 = NULL, /* Use inline code in bus.h */ .bs_w_8 = NULL, /* Use inline code in bus.h */ /* write multiple */ .bs_wm_1 = generic_bs_wm_1, .bs_wm_2 = generic_bs_wm_2, .bs_wm_4 = generic_bs_wm_4, .bs_wm_8 = BS_UNIMPLEMENTED, /* write region */ .bs_wr_1 = generic_bs_wr_1, .bs_wr_2 = generic_bs_wr_2, .bs_wr_4 = generic_bs_wr_4, .bs_wr_8 = BS_UNIMPLEMENTED, /* set multiple */ .bs_sm_1 = BS_UNIMPLEMENTED, .bs_sm_2 = BS_UNIMPLEMENTED, .bs_sm_4 = BS_UNIMPLEMENTED, .bs_sm_8 = BS_UNIMPLEMENTED, /* set region */ .bs_sr_1 = generic_bs_sr_1, .bs_sr_2 = generic_bs_sr_2, .bs_sr_4 = generic_bs_sr_4, .bs_sr_8 = BS_UNIMPLEMENTED, /* copy */ .bs_c_1 = BS_UNIMPLEMENTED, .bs_c_2 = generic_bs_c_2, .bs_c_4 = BS_UNIMPLEMENTED, .bs_c_8 = BS_UNIMPLEMENTED, /* read stream (single) */ .bs_r_1_s = NULL, /* Use inline code in bus.h */ .bs_r_2_s = NULL, /* Use inline code in bus.h */ .bs_r_4_s = NULL, /* Use inline code in bus.h */ .bs_r_8_s = NULL, /* Use inline code in bus.h */ /* read multiple stream */ .bs_rm_1_s = generic_bs_rm_1, .bs_rm_2_s = generic_bs_rm_2, .bs_rm_4_s = generic_bs_rm_4, .bs_rm_8_s = BS_UNIMPLEMENTED, /* read region stream */ .bs_rr_1_s = generic_bs_rr_1, .bs_rr_2_s = generic_bs_rr_2, .bs_rr_4_s = generic_bs_rr_4, .bs_rr_8_s = BS_UNIMPLEMENTED, /* write stream (single) */ .bs_w_1_s = NULL, /* Use inline code in bus.h */ .bs_w_2_s = NULL, /* Use inline code in bus.h */ .bs_w_4_s = NULL, /* Use inline code in bus.h */ .bs_w_8_s = NULL, /* Use inline code in bus.h */ /* write multiple stream */ .bs_wm_1_s = generic_bs_wm_1, .bs_wm_2_s = generic_bs_wm_2, .bs_wm_4_s = generic_bs_wm_4, .bs_wm_8_s = BS_UNIMPLEMENTED, /* write region stream */ .bs_wr_1_s = generic_bs_wr_1, .bs_wr_2_s = generic_bs_wr_2, .bs_wr_4_s = generic_bs_wr_4, .bs_wr_8_s = BS_UNIMPLEMENTED, }; #ifdef FDT bus_space_tag_t fdtbus_bs_tag = &arm_base_bus_space; #endif -#if __ARM_ARCH < 6 -bus_space_tag_t arm_base_bs_tag = &arm_base_bus_space; -#endif Index: head/sys/arm/arm/copystr.S =================================================================== --- head/sys/arm/arm/copystr.S (revision 368140) +++ head/sys/arm/arm/copystr.S (revision 368141) @@ -1,138 +1,130 @@ /* $NetBSD: copystr.S,v 1.8 2002/10/13 14:54:48 bjh21 Exp $ */ /*- * Copyright (c) 1995 Mark Brinicombe. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * copystr.S * * optimised and fault protected copystr functions * * Created : 16/05/95 */ #include "assym.inc" #include #include __FBSDID("$FreeBSD$"); #include .text .align 2 -#if __ARM_ARCH >= 6 #define GET_PCB(tmp) \ mrc p15, 0, tmp, c13, c0, 4; \ add tmp, tmp, #(TD_PCB) -#else -.Lpcb: - .word _C_LABEL(__pcpu) + PC_CURPCB - -#define GET_PCB(tmp) \ - ldr tmp, .Lpcb -#endif #define SAVE_REGS stmfd sp!, {r4-r6} #define RESTORE_REGS ldmfd sp!, {r4-r6} /* * r0 - user space address * r1 - kernel space address * r2 - maxlens * r3 - lencopied * * Copy string from user space to kernel space */ ENTRY(copyinstr) SAVE_REGS teq r2, #0x00000000 mov r6, #0x00000000 moveq r0, #ENAMETOOLONG beq 2f ldr r12, =VM_MAXUSER_ADDRESS GET_PCB(r4) ldr r4, [r4] #ifdef DIAGNOSTIC teq r4, #0x00000000 beq .Lcopystrpcbfault #endif adr r5, .Lcopystrfault str r5, [r4, #PCB_ONFAULT] 1: cmp r0, r12 bcs .Lcopystrfault ldrbt r5, [r0], #0x0001 add r6, r6, #0x00000001 teq r5, #0x00000000 strb r5, [r1], #0x0001 teqne r6, r2 bne 1b mov r0, #0x00000000 str r0, [r4, #PCB_ONFAULT] teq r5, #0x00000000 moveq r0, #0x00000000 movne r0, #ENAMETOOLONG 2: teq r3, #0x00000000 strne r6, [r3] RESTORE_REGS RET END(copyinstr) /* A fault occurred during the copy */ .Lcopystrfault: mov r1, #0x00000000 str r1, [r4, #PCB_ONFAULT] mov r0, #EFAULT RESTORE_REGS RET #ifdef DIAGNOSTIC .Lcopystrpcbfault: mov r2, r1 mov r1, r0 adr r0, Lcopystrpcbfaulttext bic sp, sp, #7 /* align stack to 8 bytes */ b _C_LABEL(panic) Lcopystrpcbfaulttext: .asciz "No valid PCB during copyinoutstr() addr1=%08x addr2=%08x\n" .align 2 #endif Index: head/sys/arm/arm/cpuinfo.c =================================================================== --- head/sys/arm/arm/cpuinfo.c (revision 368140) +++ head/sys/arm/arm/cpuinfo.c (revision 368141) @@ -1,539 +1,528 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include -#if __ARM_ARCH >= 6 void reinit_mmu(uint32_t ttb, uint32_t aux_clr, uint32_t aux_set); int disable_bp_hardening; int spectre_v2_safe = 1; -#endif struct cpuinfo cpuinfo = { /* Use safe defaults for start */ .dcache_line_size = 32, .dcache_line_mask = 31, .icache_line_size = 32, .icache_line_mask = 31, }; static SYSCTL_NODE(_hw, OID_AUTO, cpu, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CPU"); static SYSCTL_NODE(_hw_cpu, OID_AUTO, quirks, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CPU quirks"); /* * Tunable CPU quirks. * Be careful, ACTRL cannot be changed if CPU is started in secure * mode(world) and write to ACTRL can cause exception! * These quirks are intended for optimizing CPU performance, not for * applying errata workarounds. Nobody can expect that CPU with unfixed * errata is stable enough to execute the kernel until quirks are applied. */ static uint32_t cpu_quirks_actlr_mask; SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_mask, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_mask, 0, "Bits to be masked in ACTLR"); static uint32_t cpu_quirks_actlr_set; SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_set, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_set, 0, "Bits to be set in ACTLR"); /* Read and parse CPU id scheme */ void cpuinfo_init(void) { -#if __ARM_ARCH >= 6 uint32_t tmp; -#endif /* * Prematurely fetch CPU quirks. Standard fetch for tunable * sysctls is handled using SYSINIT, thus too late for boot CPU. * Keep names in sync with sysctls. */ TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_mask", &cpu_quirks_actlr_mask); TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_set", &cpu_quirks_actlr_set); cpuinfo.midr = cp15_midr_get(); /* Test old version id schemes first */ if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) { if (CPU_ID_ISOLD(cpuinfo.midr)) { /* obsolete ARMv2 or ARMv3 CPU */ cpuinfo.midr = 0; return; } if (CPU_ID_IS7(cpuinfo.midr)) { if ((cpuinfo.midr & (1 << 23)) == 0) { /* obsolete ARMv3 CPU */ cpuinfo.midr = 0; return; } /* ARMv4T CPU */ cpuinfo.architecture = 1; cpuinfo.revision = (cpuinfo.midr >> 16) & 0x7F; } else { /* ARM new id scheme */ cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F; cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F; } } else { /* non ARM -> must be new id scheme */ cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F; cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F; } /* Parse rest of MIDR */ cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF; cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF; cpuinfo.patch = cpuinfo.midr & 0x0F; /* CP15 c0,c0 regs 0-7 exist on all CPUs (although aliased with MIDR) */ cpuinfo.ctr = cp15_ctr_get(); cpuinfo.tcmtr = cp15_tcmtr_get(); -#if __ARM_ARCH >= 6 cpuinfo.tlbtr = cp15_tlbtr_get(); cpuinfo.mpidr = cp15_mpidr_get(); cpuinfo.revidr = cp15_revidr_get(); -#endif /* if CPU is not v7 cpu id scheme */ if (cpuinfo.architecture != 0xF) return; -#if __ARM_ARCH >= 6 cpuinfo.id_pfr0 = cp15_id_pfr0_get(); cpuinfo.id_pfr1 = cp15_id_pfr1_get(); cpuinfo.id_dfr0 = cp15_id_dfr0_get(); cpuinfo.id_afr0 = cp15_id_afr0_get(); cpuinfo.id_mmfr0 = cp15_id_mmfr0_get(); cpuinfo.id_mmfr1 = cp15_id_mmfr1_get(); cpuinfo.id_mmfr2 = cp15_id_mmfr2_get(); cpuinfo.id_mmfr3 = cp15_id_mmfr3_get(); cpuinfo.id_isar0 = cp15_id_isar0_get(); cpuinfo.id_isar1 = cp15_id_isar1_get(); cpuinfo.id_isar2 = cp15_id_isar2_get(); cpuinfo.id_isar3 = cp15_id_isar3_get(); cpuinfo.id_isar4 = cp15_id_isar4_get(); cpuinfo.id_isar5 = cp15_id_isar5_get(); /* Not yet - CBAR only exist on ARM SMP Cortex A CPUs cpuinfo.cbar = cp15_cbar_get(); */ if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) { cpuinfo.ccsidr = cp15_ccsidr_get(); cpuinfo.clidr = cp15_clidr_get(); } /* Test if revidr is implemented */ if (cpuinfo.revidr == cpuinfo.midr) cpuinfo.revidr = 0; /* parsed bits of above registers */ /* id_mmfr0 */ cpuinfo.outermost_shareability = (cpuinfo.id_mmfr0 >> 8) & 0xF; cpuinfo.shareability_levels = (cpuinfo.id_mmfr0 >> 12) & 0xF; cpuinfo.auxiliary_registers = (cpuinfo.id_mmfr0 >> 20) & 0xF; cpuinfo.innermost_shareability = (cpuinfo.id_mmfr0 >> 28) & 0xF; /* id_mmfr2 */ cpuinfo.mem_barrier = (cpuinfo.id_mmfr2 >> 20) & 0xF; /* id_mmfr3 */ cpuinfo.coherent_walk = (cpuinfo.id_mmfr3 >> 20) & 0xF; cpuinfo.maintenance_broadcast =(cpuinfo.id_mmfr3 >> 12) & 0xF; /* id_pfr1 */ cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF; cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF; cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF; /* mpidr */ cpuinfo.mp_ext = (cpuinfo.mpidr >> 31u) & 0x1; /* L1 Cache sizes */ if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) { cpuinfo.dcache_line_size = 1 << (CPU_CT_DMINLINE(cpuinfo.ctr) + 2); cpuinfo.icache_line_size = 1 << (CPU_CT_IMINLINE(cpuinfo.ctr) + 2); } else { cpuinfo.dcache_line_size = 1 << (CPU_CT_xSIZE_LEN(CPU_CT_DSIZE(cpuinfo.ctr)) + 3); cpuinfo.icache_line_size = 1 << (CPU_CT_xSIZE_LEN(CPU_CT_ISIZE(cpuinfo.ctr)) + 3); } cpuinfo.dcache_line_mask = cpuinfo.dcache_line_size - 1; cpuinfo.icache_line_mask = cpuinfo.icache_line_size - 1; /* Fill AT_HWCAP bits. */ elf_hwcap |= HWCAP_HALF | HWCAP_FAST_MULT; /* Required for all CPUs */ elf_hwcap |= HWCAP_TLS | HWCAP_EDSP; /* Required for v6+ CPUs */ tmp = (cpuinfo.id_isar0 >> 24) & 0xF; /* Divide_instrs */ if (tmp >= 1) elf_hwcap |= HWCAP_IDIVT; if (tmp >= 2) elf_hwcap |= HWCAP_IDIVA; tmp = (cpuinfo.id_pfr0 >> 4) & 0xF; /* State1 */ if (tmp >= 1) elf_hwcap |= HWCAP_THUMB; tmp = (cpuinfo.id_pfr0 >> 12) & 0xF; /* State3 */ if (tmp >= 1) elf_hwcap |= HWCAP_THUMBEE; tmp = (cpuinfo.id_mmfr0 >> 0) & 0xF; /* VMSA */ if (tmp >= 5) elf_hwcap |= HWCAP_LPAE; /* Fill AT_HWCAP2 bits. */ tmp = (cpuinfo.id_isar5 >> 4) & 0xF; /* AES */ if (tmp >= 1) elf_hwcap2 |= HWCAP2_AES; if (tmp >= 2) elf_hwcap2 |= HWCAP2_PMULL; tmp = (cpuinfo.id_isar5 >> 8) & 0xF; /* SHA1 */ if (tmp >= 1) elf_hwcap2 |= HWCAP2_SHA1; tmp = (cpuinfo.id_isar5 >> 12) & 0xF; /* SHA2 */ if (tmp >= 1) elf_hwcap2 |= HWCAP2_SHA2; tmp = (cpuinfo.id_isar5 >> 16) & 0xF; /* CRC32 */ if (tmp >= 1) elf_hwcap2 |= HWCAP2_CRC32; -#endif } -#if __ARM_ARCH >= 6 /* * Get bits that must be set or cleared in ACLR register. * Note: Bits in ACLR register are IMPLEMENTATION DEFINED. * Its expected that SCU is in operational state before this * function is called. */ static void cpuinfo_get_actlr_modifier(uint32_t *actlr_mask, uint32_t *actlr_set) { *actlr_mask = 0; *actlr_set = 0; if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) { switch (cpuinfo.part_number) { case CPU_ARCH_CORTEX_A75: case CPU_ARCH_CORTEX_A73: case CPU_ARCH_CORTEX_A72: case CPU_ARCH_CORTEX_A57: case CPU_ARCH_CORTEX_A53: /* Nothing to do for AArch32 */ break; case CPU_ARCH_CORTEX_A17: case CPU_ARCH_CORTEX_A12: /* A12 is merged to A17 */ /* * Enable SMP mode */ *actlr_mask = (1 << 6); *actlr_set = (1 << 6); break; case CPU_ARCH_CORTEX_A15: /* * Enable snoop-delayed exclusive handling * Enable SMP mode */ *actlr_mask = (1U << 31) |(1 << 6); *actlr_set = (1U << 31) |(1 << 6); break; case CPU_ARCH_CORTEX_A9: /* * Disable exclusive L1/L2 cache control * Enable SMP mode * Enable Cache and TLB maintenance broadcast */ *actlr_mask = (1 << 7) | (1 << 6) | (1 << 0); *actlr_set = (1 << 6) | (1 << 0); break; case CPU_ARCH_CORTEX_A8: /* * Enable L2 cache * Enable L1 data cache hardware alias checks */ *actlr_mask = (1 << 1) | (1 << 0); *actlr_set = (1 << 1); break; case CPU_ARCH_CORTEX_A7: /* * Enable SMP mode */ *actlr_mask = (1 << 6); *actlr_set = (1 << 6); break; case CPU_ARCH_CORTEX_A5: /* * Disable exclusive L1/L2 cache control * Enable SMP mode * Enable Cache and TLB maintenance broadcast */ *actlr_mask = (1 << 7) | (1 << 6) | (1 << 0); *actlr_set = (1 << 6) | (1 << 0); break; case CPU_ARCH_ARM1176: /* * Restrict cache size to 16KB * Enable the return stack * Enable dynamic branch prediction * Enable static branch prediction */ *actlr_mask = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0); *actlr_set = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0); break; } return; } } /* Reinitialize MMU to final kernel mapping and apply all CPU quirks. */ void cpuinfo_reinit_mmu(uint32_t ttb) { uint32_t actlr_mask; uint32_t actlr_set; cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set); actlr_mask |= cpu_quirks_actlr_mask; actlr_set |= cpu_quirks_actlr_set; reinit_mmu(ttb, actlr_mask, actlr_set); } static bool modify_actlr(uint32_t clear, uint32_t set) { uint32_t reg, newreg; reg = cp15_actlr_get(); newreg = reg; newreg &= ~clear; newreg |= set; if (reg == newreg) return (true); cp15_actlr_set(newreg); reg = cp15_actlr_get(); if (reg == newreg) return (true); return (false); } /* Apply/restore BP hardening on current core. */ static int apply_bp_hardening(bool enable, int kind, bool actrl, uint32_t set_mask) { if (enable) { if (actrl && !modify_actlr(0, set_mask)) return (-1); PCPU_SET(bp_harden_kind, kind); } else { PCPU_SET(bp_harden_kind, PCPU_BP_HARDEN_KIND_NONE); if (actrl) modify_actlr(~0, PCPU_GET(original_actlr)); spectre_v2_safe = 0; } return (0); } static void handle_bp_hardening(bool enable) { int kind; char *kind_str; kind = PCPU_BP_HARDEN_KIND_NONE; /* * Note: Access to ACTRL is locked to secure world on most boards. * This means that full BP hardening depends on updated u-boot/firmware * or is impossible at all (if secure monitor is in on-chip ROM). */ if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) { switch (cpuinfo.part_number) { case CPU_ARCH_CORTEX_A8: /* * For Cortex-A8, IBE bit must be set otherwise * BPIALL is effectively NOP. * Unfortunately, Cortex-A is also affected by * ARM erratum 687067 which causes non-working * BPIALL if IBE bit is set and 'Instruction L1 System * Array Debug Register 0' is not 0. * This register is not reset on power-up and is * accessible only from secure world, so we cannot do * nothing (nor detect) to fix this issue. * I afraid that on chip ROM based secure monitor on * AM335x (BeagleBone) doesn't reset this debug * register. */ kind = PCPU_BP_HARDEN_KIND_BPIALL; if (apply_bp_hardening(enable, kind, true, 1 << 6) != 0) goto actlr_err; break; break; case CPU_ARCH_CORTEX_A9: case CPU_ARCH_CORTEX_A12: case CPU_ARCH_CORTEX_A17: case CPU_ARCH_CORTEX_A57: case CPU_ARCH_CORTEX_A72: case CPU_ARCH_CORTEX_A73: case CPU_ARCH_CORTEX_A75: kind = PCPU_BP_HARDEN_KIND_BPIALL; if (apply_bp_hardening(enable, kind, false, 0) != 0) goto actlr_err; break; case CPU_ARCH_CORTEX_A15: /* * For Cortex-A15, set 'Enable invalidates of BTB' bit. * Despite this, the BPIALL is still effectively NOP, * but with this bit set, the ICIALLU also flushes * branch predictor as side effect. */ kind = PCPU_BP_HARDEN_KIND_ICIALLU; if (apply_bp_hardening(enable, kind, true, 1 << 0) != 0) goto actlr_err; break; default: break; } } else if (cpuinfo.implementer == CPU_IMPLEMENTER_QCOM) { printf("!!!WARNING!!! CPU(%d) is vulnerable to speculative " "branch attacks. !!!\n" "Qualcomm Krait cores are known (or believed) to be " "vulnerable to \n" "speculative branch attacks, no mitigation exists yet.\n", PCPU_GET(cpuid)); goto unkonown_mitigation; } else { goto unkonown_mitigation; } if (bootverbose) { switch (kind) { case PCPU_BP_HARDEN_KIND_NONE: kind_str = "not necessary"; break; case PCPU_BP_HARDEN_KIND_BPIALL: kind_str = "BPIALL"; break; case PCPU_BP_HARDEN_KIND_ICIALLU: kind_str = "ICIALLU"; break; default: panic("Unknown BP hardering kind (%d).", kind); } printf("CPU(%d) applied BP hardening: %s\n", PCPU_GET(cpuid), kind_str); } return; unkonown_mitigation: PCPU_SET(bp_harden_kind, PCPU_BP_HARDEN_KIND_NONE); spectre_v2_safe = 0; return; actlr_err: PCPU_SET(bp_harden_kind, PCPU_BP_HARDEN_KIND_NONE); spectre_v2_safe = 0; printf("!!!WARNING!!! CPU(%d) is vulnerable to speculative branch " "attacks. !!!\n" "We cannot enable required bit(s) in ACTRL register\n" "because it's locked by secure monitor and/or firmware.\n", PCPU_GET(cpuid)); } void cpuinfo_init_bp_hardening(void) { /* * Store original unmodified ACTRL, so we can restore it when * BP hardening is disabled by sysctl. */ PCPU_SET(original_actlr, cp15_actlr_get()); handle_bp_hardening(true); } static void bp_hardening_action(void *arg) { handle_bp_hardening(disable_bp_hardening == 0); } static int sysctl_disable_bp_hardening(SYSCTL_HANDLER_ARGS) { int rv; rv = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); if (!rv && req->newptr) { spectre_v2_safe = 1; dmb(); #ifdef SMP smp_rendezvous_cpus(all_cpus, smp_no_rendezvous_barrier, bp_hardening_action, NULL, NULL); #else bp_hardening_action(NULL); #endif } return (rv); } SYSCTL_PROC(_machdep, OID_AUTO, disable_bp_hardening, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &disable_bp_hardening, 0, sysctl_disable_bp_hardening, "I", "Disable BP hardening mitigation."); SYSCTL_INT(_machdep, OID_AUTO, spectre_v2_safe, CTLFLAG_RD, &spectre_v2_safe, 0, "System is safe to Spectre Version 2 attacks"); - -#endif /* __ARM_ARCH >= 6 */ Index: head/sys/arm/arm/db_interface.c =================================================================== --- head/sys/arm/arm/db_interface.c (revision 368140) +++ head/sys/arm/arm/db_interface.c (revision 368141) @@ -1,328 +1,327 @@ /* $NetBSD: db_interface.c,v 1.33 2003/08/25 04:51:10 mrg Exp $ */ /*- * Copyright (c) 1996 Scott K. Stevens * * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * * From: db_interface.c,v 2.4 1991/02/05 17:11:13 mrt (CMU) */ /* * Interface to new debugger. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include /* just for boothowto */ #include #ifdef KDB #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int nil = 0; int db_access_und_sp (struct db_variable *, db_expr_t *, int); int db_access_abt_sp (struct db_variable *, db_expr_t *, int); int db_access_irq_sp (struct db_variable *, db_expr_t *, int); static db_varfcn_t db_frame; #define DB_OFFSET(x) (db_expr_t *)offsetof(struct trapframe, x) struct db_variable db_regs[] = { { "spsr", DB_OFFSET(tf_spsr), db_frame }, { "r0", DB_OFFSET(tf_r0), db_frame }, { "r1", DB_OFFSET(tf_r1), db_frame }, { "r2", DB_OFFSET(tf_r2), db_frame }, { "r3", DB_OFFSET(tf_r3), db_frame }, { "r4", DB_OFFSET(tf_r4), db_frame }, { "r5", DB_OFFSET(tf_r5), db_frame }, { "r6", DB_OFFSET(tf_r6), db_frame }, { "r7", DB_OFFSET(tf_r7), db_frame }, { "r8", DB_OFFSET(tf_r8), db_frame }, { "r9", DB_OFFSET(tf_r9), db_frame }, { "r10", DB_OFFSET(tf_r10), db_frame }, { "r11", DB_OFFSET(tf_r11), db_frame }, { "r12", DB_OFFSET(tf_r12), db_frame }, { "usr_sp", DB_OFFSET(tf_usr_sp), db_frame }, { "usr_lr", DB_OFFSET(tf_usr_lr), db_frame }, { "svc_sp", DB_OFFSET(tf_svc_sp), db_frame }, { "svc_lr", DB_OFFSET(tf_svc_lr), db_frame }, { "pc", DB_OFFSET(tf_pc), db_frame }, { "und_sp", &nil, db_access_und_sp, }, { "abt_sp", &nil, db_access_abt_sp, }, { "irq_sp", &nil, db_access_irq_sp, }, }; struct db_variable *db_eregs = db_regs + nitems(db_regs); int db_access_und_sp(struct db_variable *vp, db_expr_t *valp, int rw) { if (rw == DB_VAR_GET) { *valp = get_stackptr(PSR_UND32_MODE); return (1); } return (0); } int db_access_abt_sp(struct db_variable *vp, db_expr_t *valp, int rw) { if (rw == DB_VAR_GET) { *valp = get_stackptr(PSR_ABT32_MODE); return (1); } return (0); } int db_access_irq_sp(struct db_variable *vp, db_expr_t *valp, int rw) { if (rw == DB_VAR_GET) { *valp = get_stackptr(PSR_IRQ32_MODE); return (1); } return (0); } int db_frame(struct db_variable *vp, db_expr_t *valp, int rw) { int *reg; if (kdb_frame == NULL) return (0); reg = (int *)((uintptr_t)kdb_frame + (db_expr_t)vp->valuep); if (rw == DB_VAR_GET) *valp = *reg; else *reg = *valp; return (1); } void db_show_mdpcpu(struct pcpu *pc) { -#if __ARM_ARCH >= 6 db_printf("curpmap = %p\n", pc->pc_curpmap); -#endif } + int db_validate_address(vm_offset_t addr) { struct proc *p = curproc; struct pmap *pmap; if (!p || !p->p_vmspace || !p->p_vmspace->vm_map.pmap || #ifndef ARM32_NEW_VM_LAYOUT addr >= VM_MAXUSER_ADDRESS #else addr >= VM_MIN_KERNEL_ADDRESS #endif ) pmap = kernel_pmap; else pmap = p->p_vmspace->vm_map.pmap; return (pmap_extract(pmap, addr) == FALSE); } /* * Read bytes from kernel address space for debugger. */ int db_read_bytes(addr, size, data) vm_offset_t addr; size_t size; char *data; { char *src = (char *)addr; if (db_validate_address((u_int)src)) { db_printf("address %p is invalid\n", src); return (-1); } if (size == 4 && (addr & 3) == 0 && ((uintptr_t)data & 3) == 0) { *((int*)data) = *((int*)src); return (0); } if (size == 2 && (addr & 1) == 0 && ((uintptr_t)data & 1) == 0) { *((short*)data) = *((short*)src); return (0); } while (size-- > 0) { if (db_validate_address((u_int)src)) { db_printf("address %p is invalid\n", src); return (-1); } *data++ = *src++; } return (0); } /* * Write bytes to kernel address space for debugger. */ int db_write_bytes(vm_offset_t addr, size_t size, char *data) { char *dst; size_t loop; dst = (char *)addr; if (db_validate_address((u_int)dst)) { db_printf("address %p is invalid\n", dst); return (0); } if (size == 4 && (addr & 3) == 0 && ((uintptr_t)data & 3) == 0) *((int*)dst) = *((int*)data); else if (size == 2 && (addr & 1) == 0 && ((uintptr_t)data & 1) == 0) *((short*)dst) = *((short*)data); else { loop = size; while (loop-- > 0) { if (db_validate_address((u_int)dst)) { db_printf("address %p is invalid\n", dst); return (-1); } *dst++ = *data++; } } /* make sure the caches and memory are in sync */ icache_sync(addr, size); /* In case the current page tables have been modified ... */ tlb_flush_all(); return (0); } static u_int db_fetch_reg(int reg) { switch (reg) { case 0: return (kdb_frame->tf_r0); case 1: return (kdb_frame->tf_r1); case 2: return (kdb_frame->tf_r2); case 3: return (kdb_frame->tf_r3); case 4: return (kdb_frame->tf_r4); case 5: return (kdb_frame->tf_r5); case 6: return (kdb_frame->tf_r6); case 7: return (kdb_frame->tf_r7); case 8: return (kdb_frame->tf_r8); case 9: return (kdb_frame->tf_r9); case 10: return (kdb_frame->tf_r10); case 11: return (kdb_frame->tf_r11); case 12: return (kdb_frame->tf_r12); case 13: return (kdb_frame->tf_svc_sp); case 14: return (kdb_frame->tf_svc_lr); case 15: return (kdb_frame->tf_pc); default: panic("db_fetch_reg: botch"); } } static u_int db_branch_taken_read_int(void *cookie __unused, vm_offset_t offset, u_int *val) { u_int ret; db_read_bytes(offset, 4, (char *)&ret); *val = ret; return (0); } static u_int db_branch_taken_fetch_reg(void *cookie __unused, int reg) { return (db_fetch_reg(reg)); } u_int branch_taken(u_int insn, db_addr_t pc) { register_t new_pc; int ret; ret = arm_predict_branch(NULL, insn, (register_t)pc, &new_pc, db_branch_taken_fetch_reg, db_branch_taken_read_int); if (ret != 0) kdb_reenter(); return (new_pc); } Index: head/sys/arm/arm/debug_monitor.c =================================================================== --- head/sys/arm/arm/debug_monitor.c (revision 368140) +++ head/sys/arm/arm/debug_monitor.c (revision 368141) @@ -1,1040 +1,1039 @@ /* * Copyright (c) 2015 Juniper Networks Inc. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include enum dbg_t { DBG_TYPE_BREAKPOINT = 0, DBG_TYPE_WATCHPOINT = 1, }; struct dbg_wb_conf { enum dbg_t type; enum dbg_access_t access; db_addr_t address; db_expr_t size; u_int slot; }; static int dbg_reset_state(void); static int dbg_setup_breakpoint(db_expr_t, db_expr_t, u_int); static int dbg_remove_breakpoint(u_int); static u_int dbg_find_slot(enum dbg_t, db_expr_t); static boolean_t dbg_check_slot_free(enum dbg_t, u_int); static int dbg_remove_xpoint(struct dbg_wb_conf *); static int dbg_setup_xpoint(struct dbg_wb_conf *); static int dbg_capable_var; /* Indicates that machine is capable of using HW watchpoints/breakpoints */ static uint32_t dbg_model; /* Debug Arch. Model */ static boolean_t dbg_ossr; /* OS Save and Restore implemented */ static uint32_t dbg_watchpoint_num; static uint32_t dbg_breakpoint_num; /* ID_DFR0 - Debug Feature Register 0 */ #define ID_DFR0_CP_DEBUG_M_SHIFT 0 #define ID_DFR0_CP_DEBUG_M_MASK (0xF << ID_DFR0_CP_DEBUG_M_SHIFT) #define ID_DFR0_CP_DEBUG_M_NS (0x0) /* Not supported */ #define ID_DFR0_CP_DEBUG_M_V6 (0x2) /* v6 Debug arch. CP14 access */ #define ID_DFR0_CP_DEBUG_M_V6_1 (0x3) /* v6.1 Debug arch. CP14 access */ #define ID_DFR0_CP_DEBUG_M_V7 (0x4) /* v7 Debug arch. CP14 access */ #define ID_DFR0_CP_DEBUG_M_V7_1 (0x5) /* v7.1 Debug arch. CP14 access */ /* DBGDIDR - Debug ID Register */ #define DBGDIDR_WRPS_SHIFT 28 #define DBGDIDR_WRPS_MASK (0xF << DBGDIDR_WRPS_SHIFT) #define DBGDIDR_WRPS_NUM(reg) \ ((((reg) & DBGDIDR_WRPS_MASK) >> DBGDIDR_WRPS_SHIFT) + 1) #define DBGDIDR_BRPS_SHIFT 24 #define DBGDIDR_BRPS_MASK (0xF << DBGDIDR_BRPS_SHIFT) #define DBGDIDR_BRPS_NUM(reg) \ ((((reg) & DBGDIDR_BRPS_MASK) >> DBGDIDR_BRPS_SHIFT) + 1) /* DBGPRSR - Device Powerdown and Reset Status Register */ #define DBGPRSR_PU (1 << 0) /* Powerup status */ /* DBGOSLSR - OS Lock Status Register */ #define DBGOSLSR_OSLM0 (1 << 0) /* DBGOSDLR - OS Double Lock Register */ #define DBGPRSR_DLK (1 << 0) /* OS Double Lock set */ /* DBGDSCR - Debug Status and Control Register */ #define DBGSCR_MDBG_EN (1 << 15) /* Monitor debug-mode enable */ /* DBGWVR - Watchpoint Value Register */ #define DBGWVR_ADDR_MASK (~0x3U) /* Watchpoints/breakpoints control register bitfields */ #define DBG_WB_CTRL_LEN_1 (0x1 << 5) #define DBG_WB_CTRL_LEN_2 (0x3 << 5) #define DBG_WB_CTRL_LEN_4 (0xf << 5) #define DBG_WB_CTRL_LEN_8 (0xff << 5) #define DBG_WB_CTRL_LEN_MASK(x) ((x) & (0xff << 5)) #define DBG_WB_CTRL_EXEC (0x0 << 3) #define DBG_WB_CTRL_LOAD (0x1 << 3) #define DBG_WB_CTRL_STORE (0x2 << 3) #define DBG_WB_CTRL_ACCESS_MASK(x) ((x) & (0x3 << 3)) /* Common for breakpoint and watchpoint */ #define DBG_WB_CTRL_PL1 (0x1 << 1) #define DBG_WB_CTRL_PL0 (0x2 << 1) #define DBG_WB_CTRL_PLX_MASK(x) ((x) & (0x3 << 1)) #define DBG_WB_CTRL_E (0x1 << 0) /* * Watchpoint/breakpoint helpers */ #define DBG_BKPT_BT_SLOT 0 /* Slot for branch taken */ #define DBG_BKPT_BNT_SLOT 1 /* Slot for branch not taken */ #define OP2_SHIFT 4 /* Opc2 numbers for coprocessor instructions */ #define DBG_WB_BVR 4 #define DBG_WB_BCR 5 #define DBG_WB_WVR 6 #define DBG_WB_WCR 7 #define DBG_REG_BASE_BVR (DBG_WB_BVR << OP2_SHIFT) #define DBG_REG_BASE_BCR (DBG_WB_BCR << OP2_SHIFT) #define DBG_REG_BASE_WVR (DBG_WB_WVR << OP2_SHIFT) #define DBG_REG_BASE_WCR (DBG_WB_WCR << OP2_SHIFT) #define DBG_WB_READ(cn, cm, op2, val) do { \ __asm __volatile("mrc p14, 0, %0, " #cn "," #cm "," #op2 : "=r" (val)); \ } while (0) #define DBG_WB_WRITE(cn, cm, op2, val) do { \ __asm __volatile("mcr p14, 0, %0, " #cn "," #cm "," #op2 :: "r" (val)); \ } while (0) #define READ_WB_REG_CASE(op2, m, val) \ case (((op2) << OP2_SHIFT) + m): \ DBG_WB_READ(c0, c ## m, op2, val); \ break #define WRITE_WB_REG_CASE(op2, m, val) \ case (((op2) << OP2_SHIFT) + m): \ DBG_WB_WRITE(c0, c ## m, op2, val); \ break #define SWITCH_CASES_READ_WB_REG(op2, val) \ READ_WB_REG_CASE(op2, 0, val); \ READ_WB_REG_CASE(op2, 1, val); \ READ_WB_REG_CASE(op2, 2, val); \ READ_WB_REG_CASE(op2, 3, val); \ READ_WB_REG_CASE(op2, 4, val); \ READ_WB_REG_CASE(op2, 5, val); \ READ_WB_REG_CASE(op2, 6, val); \ READ_WB_REG_CASE(op2, 7, val); \ READ_WB_REG_CASE(op2, 8, val); \ READ_WB_REG_CASE(op2, 9, val); \ READ_WB_REG_CASE(op2, 10, val); \ READ_WB_REG_CASE(op2, 11, val); \ READ_WB_REG_CASE(op2, 12, val); \ READ_WB_REG_CASE(op2, 13, val); \ READ_WB_REG_CASE(op2, 14, val); \ READ_WB_REG_CASE(op2, 15, val) #define SWITCH_CASES_WRITE_WB_REG(op2, val) \ WRITE_WB_REG_CASE(op2, 0, val); \ WRITE_WB_REG_CASE(op2, 1, val); \ WRITE_WB_REG_CASE(op2, 2, val); \ WRITE_WB_REG_CASE(op2, 3, val); \ WRITE_WB_REG_CASE(op2, 4, val); \ WRITE_WB_REG_CASE(op2, 5, val); \ WRITE_WB_REG_CASE(op2, 6, val); \ WRITE_WB_REG_CASE(op2, 7, val); \ WRITE_WB_REG_CASE(op2, 8, val); \ WRITE_WB_REG_CASE(op2, 9, val); \ WRITE_WB_REG_CASE(op2, 10, val); \ WRITE_WB_REG_CASE(op2, 11, val); \ WRITE_WB_REG_CASE(op2, 12, val); \ WRITE_WB_REG_CASE(op2, 13, val); \ WRITE_WB_REG_CASE(op2, 14, val); \ WRITE_WB_REG_CASE(op2, 15, val) static uint32_t dbg_wb_read_reg(int reg, int n) { uint32_t val; val = 0; switch (reg + n) { SWITCH_CASES_READ_WB_REG(DBG_WB_WVR, val); SWITCH_CASES_READ_WB_REG(DBG_WB_WCR, val); SWITCH_CASES_READ_WB_REG(DBG_WB_BVR, val); SWITCH_CASES_READ_WB_REG(DBG_WB_BCR, val); default: db_printf( "trying to read from CP14 reg. using wrong opc2 %d\n", reg >> OP2_SHIFT); } return (val); } static void dbg_wb_write_reg(int reg, int n, uint32_t val) { switch (reg + n) { SWITCH_CASES_WRITE_WB_REG(DBG_WB_WVR, val); SWITCH_CASES_WRITE_WB_REG(DBG_WB_WCR, val); SWITCH_CASES_WRITE_WB_REG(DBG_WB_BVR, val); SWITCH_CASES_WRITE_WB_REG(DBG_WB_BCR, val); default: db_printf( "trying to write to CP14 reg. using wrong opc2 %d\n", reg >> OP2_SHIFT); } isb(); } static __inline boolean_t dbg_capable(void) { return (atomic_cmpset_int(&dbg_capable_var, 0, 0) == 0); } boolean_t kdb_cpu_pc_is_singlestep(db_addr_t pc) { /* * XXX: If the platform fails to enable its debug arch. * there will be no stepping capabilities - * (SOFTWARE_SSTEP is not defined for __ARM_ARCH >= 6). */ if (!dbg_capable()) return (FALSE); if (dbg_find_slot(DBG_TYPE_BREAKPOINT, pc) != ~0U) return (TRUE); return (FALSE); } void kdb_cpu_set_singlestep(void) { db_expr_t inst; db_addr_t pc, brpc; uint32_t wcr; u_int i; if (!dbg_capable()) return; /* * Disable watchpoints, e.g. stepping over watched instruction will * trigger break exception instead of single-step exception and locks * CPU on that instruction for ever. */ for (i = 0; i < dbg_watchpoint_num; i++) { wcr = dbg_wb_read_reg(DBG_REG_BASE_WCR, i); if ((wcr & DBG_WB_CTRL_E) != 0) { dbg_wb_write_reg(DBG_REG_BASE_WCR, i, (wcr & ~DBG_WB_CTRL_E)); } } pc = PC_REGS(); inst = db_get_value(pc, sizeof(pc), FALSE); if (inst_branch(inst) || inst_call(inst) || inst_return(inst)) { brpc = branch_taken(inst, pc); dbg_setup_breakpoint(brpc, INSN_SIZE, DBG_BKPT_BT_SLOT); } pc = next_instr_address(pc, 0); dbg_setup_breakpoint(pc, INSN_SIZE, DBG_BKPT_BNT_SLOT); } void kdb_cpu_clear_singlestep(void) { uint32_t wvr, wcr; u_int i; if (!dbg_capable()) return; dbg_remove_breakpoint(DBG_BKPT_BT_SLOT); dbg_remove_breakpoint(DBG_BKPT_BNT_SLOT); /* Restore all watchpoints */ for (i = 0; i < dbg_watchpoint_num; i++) { wcr = dbg_wb_read_reg(DBG_REG_BASE_WCR, i); wvr = dbg_wb_read_reg(DBG_REG_BASE_WVR, i); /* Watchpoint considered not empty if address value is not 0 */ if ((wvr & DBGWVR_ADDR_MASK) != 0) { dbg_wb_write_reg(DBG_REG_BASE_WCR, i, (wcr | DBG_WB_CTRL_E)); } } } int dbg_setup_watchpoint(db_expr_t addr, db_expr_t size, enum dbg_access_t access) { struct dbg_wb_conf conf; if (access == HW_BREAKPOINT_X) { db_printf("Invalid access type for watchpoint: %d\n", access); return (EINVAL); } conf.address = addr; conf.size = size; conf.access = access; conf.type = DBG_TYPE_WATCHPOINT; return (dbg_setup_xpoint(&conf)); } int dbg_remove_watchpoint(db_expr_t addr, db_expr_t size __unused) { struct dbg_wb_conf conf; conf.address = addr; conf.type = DBG_TYPE_WATCHPOINT; return (dbg_remove_xpoint(&conf)); } static int dbg_setup_breakpoint(db_expr_t addr, db_expr_t size, u_int slot) { struct dbg_wb_conf conf; conf.address = addr; conf.size = size; conf.access = HW_BREAKPOINT_X; conf.type = DBG_TYPE_BREAKPOINT; conf.slot = slot; return (dbg_setup_xpoint(&conf)); } static int dbg_remove_breakpoint(u_int slot) { struct dbg_wb_conf conf; /* Slot already cleared. Don't recurse */ if (dbg_check_slot_free(DBG_TYPE_BREAKPOINT, slot)) return (0); conf.slot = slot; conf.type = DBG_TYPE_BREAKPOINT; return (dbg_remove_xpoint(&conf)); } static const char * dbg_watchtype_str(uint32_t type) { switch (type) { case DBG_WB_CTRL_EXEC: return ("execute"); case DBG_WB_CTRL_STORE: return ("write"); case DBG_WB_CTRL_LOAD: return ("read"); case DBG_WB_CTRL_LOAD | DBG_WB_CTRL_STORE: return ("read/write"); default: return ("invalid"); } } static int dbg_watchtype_len(uint32_t len) { switch (len) { case DBG_WB_CTRL_LEN_1: return (1); case DBG_WB_CTRL_LEN_2: return (2); case DBG_WB_CTRL_LEN_4: return (4); case DBG_WB_CTRL_LEN_8: return (8); default: return (0); } } void dbg_show_watchpoint(void) { uint32_t wcr, len, type; uint32_t addr; boolean_t is_enabled; int i; if (!dbg_capable()) { db_printf("Architecture does not support HW " "breakpoints/watchpoints\n"); return; } db_printf("\nhardware watchpoints:\n"); db_printf(" watch status type len address symbol\n"); db_printf(" ----- -------- ---------- --- ---------- ------------------\n"); for (i = 0; i < dbg_watchpoint_num; i++) { wcr = dbg_wb_read_reg(DBG_REG_BASE_WCR, i); if ((wcr & DBG_WB_CTRL_E) != 0) is_enabled = TRUE; else is_enabled = FALSE; type = DBG_WB_CTRL_ACCESS_MASK(wcr); len = DBG_WB_CTRL_LEN_MASK(wcr); addr = dbg_wb_read_reg(DBG_REG_BASE_WVR, i) & DBGWVR_ADDR_MASK; db_printf(" %-5d %-8s %10s %3d 0x%08x ", i, is_enabled ? "enabled" : "disabled", is_enabled ? dbg_watchtype_str(type) : "", is_enabled ? dbg_watchtype_len(len) : 0, addr); db_printsym((db_addr_t)addr, DB_STGY_ANY); db_printf("\n"); } } static boolean_t dbg_check_slot_free(enum dbg_t type, u_int slot) { uint32_t cr, vr; uint32_t max; switch(type) { case DBG_TYPE_BREAKPOINT: max = dbg_breakpoint_num; cr = DBG_REG_BASE_BCR; vr = DBG_REG_BASE_BVR; break; case DBG_TYPE_WATCHPOINT: max = dbg_watchpoint_num; cr = DBG_REG_BASE_WCR; vr = DBG_REG_BASE_WVR; break; default: db_printf("%s: Unsupported event type %d\n", __func__, type); return (FALSE); } if (slot >= max) { db_printf("%s: Invalid slot number %d, max %d\n", __func__, slot, max - 1); return (FALSE); } if ((dbg_wb_read_reg(cr, slot) & DBG_WB_CTRL_E) == 0 && (dbg_wb_read_reg(vr, slot) & DBGWVR_ADDR_MASK) == 0) return (TRUE); return (FALSE); } static u_int dbg_find_free_slot(enum dbg_t type) { u_int max, i; switch(type) { case DBG_TYPE_BREAKPOINT: max = dbg_breakpoint_num; break; case DBG_TYPE_WATCHPOINT: max = dbg_watchpoint_num; break; default: db_printf("Unsupported debug type\n"); return (~0U); } for (i = 0; i < max; i++) { if (dbg_check_slot_free(type, i)) return (i); } return (~0U); } static u_int dbg_find_slot(enum dbg_t type, db_expr_t addr) { uint32_t reg_addr, reg_ctrl; u_int max, i; switch(type) { case DBG_TYPE_BREAKPOINT: max = dbg_breakpoint_num; reg_addr = DBG_REG_BASE_BVR; reg_ctrl = DBG_REG_BASE_BCR; break; case DBG_TYPE_WATCHPOINT: max = dbg_watchpoint_num; reg_addr = DBG_REG_BASE_WVR; reg_ctrl = DBG_REG_BASE_WCR; break; default: db_printf("Unsupported debug type\n"); return (~0U); } for (i = 0; i < max; i++) { if ((dbg_wb_read_reg(reg_addr, i) == addr) && ((dbg_wb_read_reg(reg_ctrl, i) & DBG_WB_CTRL_E) != 0)) return (i); } return (~0U); } static __inline boolean_t dbg_monitor_is_enabled(void) { return ((cp14_dbgdscrint_get() & DBGSCR_MDBG_EN) != 0); } static int dbg_enable_monitor(void) { uint32_t dbg_dscr; /* Already enabled? Just return */ if (dbg_monitor_is_enabled()) return (0); dbg_dscr = cp14_dbgdscrint_get(); switch (dbg_model) { case ID_DFR0_CP_DEBUG_M_V6: case ID_DFR0_CP_DEBUG_M_V6_1: /* fall through */ cp14_dbgdscr_v6_set(dbg_dscr | DBGSCR_MDBG_EN); break; case ID_DFR0_CP_DEBUG_M_V7: /* fall through */ case ID_DFR0_CP_DEBUG_M_V7_1: cp14_dbgdscr_v7_set(dbg_dscr | DBGSCR_MDBG_EN); break; default: break; } isb(); /* Verify that Monitor mode is set */ if (dbg_monitor_is_enabled()) return (0); return (ENXIO); } static int dbg_setup_xpoint(struct dbg_wb_conf *conf) { struct pcpu *pcpu; struct dbreg *d; const char *typestr; uint32_t cr_size, cr_priv, cr_access; uint32_t reg_ctrl, reg_addr, ctrl, addr; boolean_t is_bkpt; u_int cpu; u_int i; if (!dbg_capable()) return (ENXIO); is_bkpt = (conf->type == DBG_TYPE_BREAKPOINT); typestr = is_bkpt ? "breakpoint" : "watchpoint"; if (is_bkpt) { if (dbg_breakpoint_num == 0) { db_printf("Breakpoints not supported on this architecture\n"); return (ENXIO); } i = conf->slot; if (!dbg_check_slot_free(DBG_TYPE_BREAKPOINT, i)) { /* * This should never happen. If it does it means that * there is an erroneus scenario somewhere. Still, it can * be done but let's inform the user. */ db_printf("ERROR: Breakpoint already set. Replacing...\n"); } } else { i = dbg_find_free_slot(DBG_TYPE_WATCHPOINT); if (i == ~0U) { db_printf("Can not find slot for %s, max %d slots supported\n", typestr, dbg_watchpoint_num); return (ENXIO); } } /* Kernel access only */ cr_priv = DBG_WB_CTRL_PL1; switch(conf->size) { case 1: cr_size = DBG_WB_CTRL_LEN_1; break; case 2: cr_size = DBG_WB_CTRL_LEN_2; break; case 4: cr_size = DBG_WB_CTRL_LEN_4; break; case 8: cr_size = DBG_WB_CTRL_LEN_8; break; default: db_printf("Unsupported address size for %s\n", typestr); return (EINVAL); } if (is_bkpt) { cr_access = DBG_WB_CTRL_EXEC; reg_ctrl = DBG_REG_BASE_BCR; reg_addr = DBG_REG_BASE_BVR; /* Always unlinked BKPT */ ctrl = (cr_size | cr_access | cr_priv | DBG_WB_CTRL_E); } else { switch(conf->access) { case HW_WATCHPOINT_R: cr_access = DBG_WB_CTRL_LOAD; break; case HW_WATCHPOINT_W: cr_access = DBG_WB_CTRL_STORE; break; case HW_WATCHPOINT_RW: cr_access = DBG_WB_CTRL_LOAD | DBG_WB_CTRL_STORE; break; default: db_printf("Unsupported exception level for %s\n", typestr); return (EINVAL); } reg_ctrl = DBG_REG_BASE_WCR; reg_addr = DBG_REG_BASE_WVR; ctrl = (cr_size | cr_access | cr_priv | DBG_WB_CTRL_E); } addr = conf->address; dbg_wb_write_reg(reg_addr, i, addr); dbg_wb_write_reg(reg_ctrl, i, ctrl); /* * Save watchpoint settings for all CPUs. * We don't need to do the same with breakpoints since HW breakpoints * are only used to perform single stepping. */ if (!is_bkpt) { CPU_FOREACH(cpu) { pcpu = pcpu_find(cpu); /* Fill out the settings for watchpoint */ d = (struct dbreg *)pcpu->pc_dbreg; d->dbg_wvr[i] = addr; d->dbg_wcr[i] = ctrl; /* Skip update command for the current CPU */ if (cpu != PCPU_GET(cpuid)) pcpu->pc_dbreg_cmd = PC_DBREG_CMD_LOAD; } } /* Ensure all data is written before waking other CPUs */ atomic_thread_fence_rel(); return (0); } static int dbg_remove_xpoint(struct dbg_wb_conf *conf) { struct pcpu *pcpu; struct dbreg *d; uint32_t reg_ctrl, reg_addr, addr; boolean_t is_bkpt; u_int cpu; u_int i; if (!dbg_capable()) return (ENXIO); is_bkpt = (conf->type == DBG_TYPE_BREAKPOINT); addr = conf->address; if (is_bkpt) { i = conf->slot; reg_ctrl = DBG_REG_BASE_BCR; reg_addr = DBG_REG_BASE_BVR; } else { i = dbg_find_slot(DBG_TYPE_WATCHPOINT, addr); if (i == ~0U) { db_printf("Can not find watchpoint for address 0%x\n", addr); return (EINVAL); } reg_ctrl = DBG_REG_BASE_WCR; reg_addr = DBG_REG_BASE_WVR; } dbg_wb_write_reg(reg_ctrl, i, 0); dbg_wb_write_reg(reg_addr, i, 0); /* * Save watchpoint settings for all CPUs. * We don't need to do the same with breakpoints since HW breakpoints * are only used to perform single stepping. */ if (!is_bkpt) { CPU_FOREACH(cpu) { pcpu = pcpu_find(cpu); /* Fill out the settings for watchpoint */ d = (struct dbreg *)pcpu->pc_dbreg; d->dbg_wvr[i] = 0; d->dbg_wcr[i] = 0; /* Skip update command for the current CPU */ if (cpu != PCPU_GET(cpuid)) pcpu->pc_dbreg_cmd = PC_DBREG_CMD_LOAD; } /* Ensure all data is written before waking other CPUs */ atomic_thread_fence_rel(); } return (0); } static __inline uint32_t dbg_get_debug_model(void) { uint32_t dbg_m; dbg_m = ((cpuinfo.id_dfr0 & ID_DFR0_CP_DEBUG_M_MASK) >> ID_DFR0_CP_DEBUG_M_SHIFT); return (dbg_m); } static __inline boolean_t dbg_get_ossr(void) { switch (dbg_model) { case ID_DFR0_CP_DEBUG_M_V7: if ((cp14_dbgoslsr_get() & DBGOSLSR_OSLM0) != 0) return (TRUE); return (FALSE); case ID_DFR0_CP_DEBUG_M_V7_1: return (TRUE); default: return (FALSE); } } static __inline boolean_t dbg_arch_supported(void) { uint32_t dbg_didr; switch (dbg_model) { case ID_DFR0_CP_DEBUG_M_V6: case ID_DFR0_CP_DEBUG_M_V6_1: dbg_didr = cp14_dbgdidr_get(); /* * read-all-zeroes is used by QEMU * to indicate that ARMv6 debug support * is not implemented. Real hardware has at * least version bits set */ if (dbg_didr == 0) return (FALSE); return (TRUE); case ID_DFR0_CP_DEBUG_M_V7: case ID_DFR0_CP_DEBUG_M_V7_1: /* fall through */ return (TRUE); default: /* We only support valid v6.x/v7.x modes through CP14 */ return (FALSE); } } static __inline uint32_t dbg_get_wrp_num(void) { uint32_t dbg_didr; dbg_didr = cp14_dbgdidr_get(); return (DBGDIDR_WRPS_NUM(dbg_didr)); } static __inline uint32_t dgb_get_brp_num(void) { uint32_t dbg_didr; dbg_didr = cp14_dbgdidr_get(); return (DBGDIDR_BRPS_NUM(dbg_didr)); } static int dbg_reset_state(void) { u_int cpuid; size_t i; int err; cpuid = PCPU_GET(cpuid); err = 0; switch (dbg_model) { case ID_DFR0_CP_DEBUG_M_V6: case ID_DFR0_CP_DEBUG_M_V6_1: /* fall through */ /* * Arch needs monitor mode selected and enabled * to be able to access breakpoint/watchpoint registers. */ err = dbg_enable_monitor(); if (err != 0) return (err); goto vectr_clr; case ID_DFR0_CP_DEBUG_M_V7: /* Is core power domain powered up? */ if ((cp14_dbgprsr_get() & DBGPRSR_PU) == 0) err = ENXIO; if (err != 0) break; if (dbg_ossr) goto vectr_clr; break; case ID_DFR0_CP_DEBUG_M_V7_1: /* Is double lock set? */ if ((cp14_dbgosdlr_get() & DBGPRSR_DLK) != 0) err = ENXIO; break; default: break; } if (err != 0) { db_printf("Debug facility locked (CPU%d)\n", cpuid); return (err); } /* * DBGOSLAR is always implemented for v7.1 Debug Arch. however is * optional for v7 (depends on OS save and restore support). */ if (((dbg_model & ID_DFR0_CP_DEBUG_M_V7_1) != 0) || dbg_ossr) { /* * Clear OS lock. * Writing any other value than 0xC5ACCESS will unlock. */ cp14_dbgoslar_set(0); isb(); } vectr_clr: /* * After reset we must ensure that DBGVCR has a defined value. * Disable all vector catch events. Safe to use - required in all * implementations. */ cp14_dbgvcr_set(0); isb(); /* * We have limited number of {watch,break}points, each consists of * two registers: * - wcr/bcr regsiter configurates corresponding {watch,break}point * behaviour * - wvr/bvr register keeps address we are hunting for * * Reset all breakpoints and watchpoints. */ for (i = 0; i < dbg_watchpoint_num; ++i) { dbg_wb_write_reg(DBG_REG_BASE_WCR, i, 0); dbg_wb_write_reg(DBG_REG_BASE_WVR, i, 0); } for (i = 0; i < dbg_breakpoint_num; ++i) { dbg_wb_write_reg(DBG_REG_BASE_BCR, i, 0); dbg_wb_write_reg(DBG_REG_BASE_BVR, i, 0); } return (0); } void dbg_monitor_init(void) { int err; /* Fetch ARM Debug Architecture model */ dbg_model = dbg_get_debug_model(); if (!dbg_arch_supported()) { db_printf("ARM Debug Architecture not supported\n"); return; } if (bootverbose) { db_printf("ARM Debug Architecture %s\n", (dbg_model == ID_DFR0_CP_DEBUG_M_V6) ? "v6" : (dbg_model == ID_DFR0_CP_DEBUG_M_V6_1) ? "v6.1" : (dbg_model == ID_DFR0_CP_DEBUG_M_V7) ? "v7" : (dbg_model == ID_DFR0_CP_DEBUG_M_V7_1) ? "v7.1" : "unknown"); } /* Do we have OS Save and Restore mechanism? */ dbg_ossr = dbg_get_ossr(); /* Find out many breakpoints and watchpoints we can use */ dbg_watchpoint_num = dbg_get_wrp_num(); dbg_breakpoint_num = dgb_get_brp_num(); if (bootverbose) { db_printf("%d watchpoints and %d breakpoints supported\n", dbg_watchpoint_num, dbg_breakpoint_num); } err = dbg_reset_state(); if (err == 0) { err = dbg_enable_monitor(); if (err == 0) { atomic_set_int(&dbg_capable_var, 1); return; } } db_printf("HW Breakpoints/Watchpoints not enabled on CPU%d\n", PCPU_GET(cpuid)); } CTASSERT(sizeof(struct dbreg) == sizeof(((struct pcpu *)NULL)->pc_dbreg)); void dbg_monitor_init_secondary(void) { u_int cpuid; int err; /* * This flag is set on the primary CPU * and its meaning is valid for other CPUs too. */ if (!dbg_capable()) return; cpuid = PCPU_GET(cpuid); err = dbg_reset_state(); if (err != 0) { /* * Something is very wrong. * WPs/BPs will not work correctly on this CPU. */ KASSERT(0, ("%s: Failed to reset Debug Architecture " "state on CPU%d", __func__, cpuid)); /* Disable HW debug capabilities for all CPUs */ atomic_set_int(&dbg_capable_var, 0); return; } err = dbg_enable_monitor(); if (err != 0) { KASSERT(0, ("%s: Failed to enable Debug Monitor" " on CPU%d", __func__, cpuid)); atomic_set_int(&dbg_capable_var, 0); } } void dbg_resume_dbreg(void) { struct dbreg *d; u_int i; /* * This flag is set on the primary CPU * and its meaning is valid for other CPUs too. */ if (!dbg_capable()) return; atomic_thread_fence_acq(); switch (PCPU_GET(dbreg_cmd)) { case PC_DBREG_CMD_LOAD: d = (struct dbreg *)PCPU_PTR(dbreg); /* Restore watchpoints */ for (i = 0; i < dbg_watchpoint_num; i++) { dbg_wb_write_reg(DBG_REG_BASE_WVR, i, d->dbg_wvr[i]); dbg_wb_write_reg(DBG_REG_BASE_WCR, i, d->dbg_wcr[i]); } PCPU_SET(dbreg_cmd, PC_DBREG_CMD_NONE); break; } } Index: head/sys/arm/arm/disassem.c =================================================================== --- head/sys/arm/arm/disassem.c (revision 368140) +++ head/sys/arm/arm/disassem.c (revision 368141) @@ -1,692 +1,690 @@ /* $NetBSD: disassem.c,v 1.14 2003/03/27 16:58:36 mycroft Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1996 Mark Brinicombe. * Copyright (c) 1996 Brini. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * db_disasm.c * * Kernel disassembler * * Created : 10/02/96 * * Structured after the sparc/sparc/db_disasm.c by David S. Miller & * Paul Kranenburg * * This code is not complete. Not all instructions are disassembled. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include /* * General instruction format * * insn[cc][mod] [operands] * * Those fields with an uppercase format code indicate that the field * follows directly after the instruction before the separator i.e. * they modify the instruction rather than just being an operand to * the instruction. The only exception is the writeback flag which * follows a operand. * * * 2 - print Operand 2 of a data processing instruction * d - destination register (bits 12-15) * n - n register (bits 16-19) * s - s register (bits 8-11) * o - indirect register rn (bits 16-19) (used by swap) * m - m register (bits 0-3) * a - address operand of ldr/str instruction * l - register list for ldm/stm instruction * f - 1st fp operand (register) (bits 12-14) * g - 2nd fp operand (register) (bits 16-18) * h - 3rd fp operand (register/immediate) (bits 0-4) * b - branch address * t - thumb branch address (bits 24, 0-23) * k - breakpoint comment (bits 0-3, 8-19) * X - block transfer type * Y - block transfer type (r13 base) * c - comment field bits(0-23) * p - saved or current status register * F - PSR transfer fields * D - destination-is-r15 (P) flag on TST, TEQ, CMP, CMN * L - co-processor transfer size * S - set status flag * P - fp precision * Q - fp precision (for ldf/stf) * R - fp rounding * v - co-processor data transfer registers + addressing mode * W - writeback flag * x - instruction in hex * # - co-processor number * y - co-processor data processing registers * z - co-processor register transfer registers */ struct arm32_insn { u_int mask; u_int pattern; char* name; char* format; }; static const struct arm32_insn arm32_i[] = { { 0x0fffffff, 0x0ff00000, "imb", "c" }, /* Before swi */ { 0x0fffffff, 0x0ff00001, "imbrange", "c" }, /* Before swi */ { 0x0f000000, 0x0f000000, "swi", "c" }, { 0xfe000000, 0xfa000000, "blx", "t" }, /* Before b and bl */ { 0x0f000000, 0x0a000000, "b", "b" }, { 0x0f000000, 0x0b000000, "bl", "b" }, { 0x0fe000f0, 0x00000090, "mul", "Snms" }, { 0x0fe000f0, 0x00200090, "mla", "Snmsd" }, { 0x0fe000f0, 0x00800090, "umull", "Sdnms" }, { 0x0fe000f0, 0x00c00090, "smull", "Sdnms" }, { 0x0fe000f0, 0x00a00090, "umlal", "Sdnms" }, { 0x0fe000f0, 0x00e00090, "smlal", "Sdnms" }, { 0x0d700000, 0x04200000, "strt", "daW" }, { 0x0d700000, 0x04300000, "ldrt", "daW" }, { 0x0d700000, 0x04600000, "strbt", "daW" }, { 0x0d700000, 0x04700000, "ldrbt", "daW" }, { 0x0c500000, 0x04000000, "str", "daW" }, { 0x0c500000, 0x04100000, "ldr", "daW" }, { 0x0c500000, 0x04400000, "strb", "daW" }, { 0x0c500000, 0x04500000, "ldrb", "daW" }, -#if __ARM_ARCH >= 6 { 0x0fff0ff0, 0x06bf0fb0, "rev16", "dm" }, { 0xffffffff, 0xf57ff01f, "clrex", "c" }, { 0x0ff00ff0, 0x01800f90, "strex", "dmo" }, { 0x0ff00fff, 0x01900f9f, "ldrex", "do" }, { 0x0ff00ff0, 0x01a00f90, "strexd", "dmo" }, { 0x0ff00fff, 0x01b00f9f, "ldrexd", "do" }, { 0x0ff00ff0, 0x01c00f90, "strexb", "dmo" }, { 0x0ff00fff, 0x01d00f9f, "ldrexb", "do" }, { 0x0ff00ff0, 0x01e00f90, "strexh", "dmo" }, { 0x0ff00fff, 0x01f00f9f, "ldrexh", "do" }, -#endif { 0x0e1f0000, 0x080d0000, "stm", "YnWl" },/* separate out r13 base */ { 0x0e1f0000, 0x081d0000, "ldm", "YnWl" },/* separate out r13 base */ { 0x0e100000, 0x08000000, "stm", "XnWl" }, { 0x0e100000, 0x08100000, "ldm", "XnWl" }, { 0x0e1000f0, 0x00100090, "ldrb", "de" }, { 0x0e1000f0, 0x00000090, "strb", "de" }, { 0x0e1000f0, 0x001000d0, "ldrsb", "de" }, { 0x0e1000f0, 0x001000b0, "ldrh", "de" }, { 0x0e1000f0, 0x000000b0, "strh", "de" }, { 0x0e1000f0, 0x001000f0, "ldrsh", "de" }, { 0x0f200090, 0x00200090, "und", "x" }, /* Before data processing */ { 0x0e1000d0, 0x000000d0, "und", "x" }, /* Before data processing */ { 0x0ff00ff0, 0x01000090, "swp", "dmo" }, { 0x0ff00ff0, 0x01400090, "swpb", "dmo" }, { 0x0fbf0fff, 0x010f0000, "mrs", "dp" }, /* Before data processing */ { 0x0fb0fff0, 0x0120f000, "msr", "pFm" },/* Before data processing */ { 0x0fb0f000, 0x0320f000, "msr", "pF2" },/* Before data processing */ { 0x0ffffff0, 0x012fff10, "bx", "m" }, { 0x0fff0ff0, 0x016f0f10, "clz", "dm" }, { 0x0ffffff0, 0x012fff30, "blx", "m" }, { 0xfff000f0, 0xe1200070, "bkpt", "k" }, { 0x0de00000, 0x00000000, "and", "Sdn2" }, { 0x0de00000, 0x00200000, "eor", "Sdn2" }, { 0x0de00000, 0x00400000, "sub", "Sdn2" }, { 0x0de00000, 0x00600000, "rsb", "Sdn2" }, { 0x0de00000, 0x00800000, "add", "Sdn2" }, { 0x0de00000, 0x00a00000, "adc", "Sdn2" }, { 0x0de00000, 0x00c00000, "sbc", "Sdn2" }, { 0x0de00000, 0x00e00000, "rsc", "Sdn2" }, { 0x0df00000, 0x01100000, "tst", "Dn2" }, { 0x0df00000, 0x01300000, "teq", "Dn2" }, { 0x0de00000, 0x01400000, "cmp", "Dn2" }, { 0x0de00000, 0x01600000, "cmn", "Dn2" }, { 0x0de00000, 0x01800000, "orr", "Sdn2" }, { 0x0de00000, 0x01a00000, "mov", "Sd2" }, { 0x0de00000, 0x01c00000, "bic", "Sdn2" }, { 0x0de00000, 0x01e00000, "mvn", "Sd2" }, { 0x0ff08f10, 0x0e000100, "adf", "PRfgh" }, { 0x0ff08f10, 0x0e100100, "muf", "PRfgh" }, { 0x0ff08f10, 0x0e200100, "suf", "PRfgh" }, { 0x0ff08f10, 0x0e300100, "rsf", "PRfgh" }, { 0x0ff08f10, 0x0e400100, "dvf", "PRfgh" }, { 0x0ff08f10, 0x0e500100, "rdf", "PRfgh" }, { 0x0ff08f10, 0x0e600100, "pow", "PRfgh" }, { 0x0ff08f10, 0x0e700100, "rpw", "PRfgh" }, { 0x0ff08f10, 0x0e800100, "rmf", "PRfgh" }, { 0x0ff08f10, 0x0e900100, "fml", "PRfgh" }, { 0x0ff08f10, 0x0ea00100, "fdv", "PRfgh" }, { 0x0ff08f10, 0x0eb00100, "frd", "PRfgh" }, { 0x0ff08f10, 0x0ec00100, "pol", "PRfgh" }, { 0x0f008f10, 0x0e000100, "fpbop", "PRfgh" }, { 0x0ff08f10, 0x0e008100, "mvf", "PRfh" }, { 0x0ff08f10, 0x0e108100, "mnf", "PRfh" }, { 0x0ff08f10, 0x0e208100, "abs", "PRfh" }, { 0x0ff08f10, 0x0e308100, "rnd", "PRfh" }, { 0x0ff08f10, 0x0e408100, "sqt", "PRfh" }, { 0x0ff08f10, 0x0e508100, "log", "PRfh" }, { 0x0ff08f10, 0x0e608100, "lgn", "PRfh" }, { 0x0ff08f10, 0x0e708100, "exp", "PRfh" }, { 0x0ff08f10, 0x0e808100, "sin", "PRfh" }, { 0x0ff08f10, 0x0e908100, "cos", "PRfh" }, { 0x0ff08f10, 0x0ea08100, "tan", "PRfh" }, { 0x0ff08f10, 0x0eb08100, "asn", "PRfh" }, { 0x0ff08f10, 0x0ec08100, "acs", "PRfh" }, { 0x0ff08f10, 0x0ed08100, "atn", "PRfh" }, { 0x0f008f10, 0x0e008100, "fpuop", "PRfh" }, { 0x0e100f00, 0x0c000100, "stf", "QLv" }, { 0x0e100f00, 0x0c100100, "ldf", "QLv" }, { 0x0ff00f10, 0x0e000110, "flt", "PRgd" }, { 0x0ff00f10, 0x0e100110, "fix", "PRdh" }, { 0x0ff00f10, 0x0e200110, "wfs", "d" }, { 0x0ff00f10, 0x0e300110, "rfs", "d" }, { 0x0ff00f10, 0x0e400110, "wfc", "d" }, { 0x0ff00f10, 0x0e500110, "rfc", "d" }, { 0x0ff0ff10, 0x0e90f110, "cmf", "PRgh" }, { 0x0ff0ff10, 0x0eb0f110, "cnf", "PRgh" }, { 0x0ff0ff10, 0x0ed0f110, "cmfe", "PRgh" }, { 0x0ff0ff10, 0x0ef0f110, "cnfe", "PRgh" }, { 0xff100010, 0xfe000010, "mcr2", "#z" }, { 0x0f100010, 0x0e000010, "mcr", "#z" }, { 0xff100010, 0xfe100010, "mrc2", "#z" }, { 0x0f100010, 0x0e100010, "mrc", "#z" }, { 0xff000010, 0xfe000000, "cdp2", "#y" }, { 0x0f000010, 0x0e000000, "cdp", "#y" }, { 0xfe100090, 0xfc100000, "ldc2", "L#v" }, { 0x0e100090, 0x0c100000, "ldc", "L#v" }, { 0xfe100090, 0xfc000000, "stc2", "L#v" }, { 0x0e100090, 0x0c000000, "stc", "L#v" }, { 0x00000000, 0x00000000, NULL, NULL } }; static char const arm32_insn_conditions[][4] = { "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc", "hi", "ls", "ge", "lt", "gt", "le", "", "nv" }; static char const insn_block_transfers[][4] = { "da", "ia", "db", "ib" }; static char const insn_stack_block_transfers[][4] = { "ed", "ea", "fd", "fa" }; static char const op_shifts[][4] = { "lsl", "lsr", "asr", "ror" }; static char const insn_fpa_rounding[][2] = { "", "p", "m", "z" }; static char const insn_fpa_precision[][2] = { "s", "d", "e", "p" }; static char const insn_fpaconstants[][8] = { "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0" }; #define insn_condition(x) arm32_insn_conditions[(x >> 28) & 0x0f] #define insn_blktrans(x) insn_block_transfers[(x >> 23) & 3] #define insn_stkblktrans(x) insn_stack_block_transfers[(x >> 23) & 3] #define op2_shift(x) op_shifts[(x >> 5) & 3] #define insn_fparnd(x) insn_fpa_rounding[(x >> 5) & 0x03] #define insn_fpaprec(x) insn_fpa_precision[(((x >> 18) & 2)|(x >> 7)) & 1] #define insn_fpaprect(x) insn_fpa_precision[(((x >> 21) & 2)|(x >> 15)) & 1] #define insn_fpaimm(x) insn_fpaconstants[x & 0x07] /* Local prototypes */ static void disasm_register_shift(const disasm_interface_t *di, u_int insn); static void disasm_print_reglist(const disasm_interface_t *di, u_int insn); static void disasm_insn_ldrstr(const disasm_interface_t *di, u_int insn, u_int loc); static void disasm_insn_ldrhstrh(const disasm_interface_t *di, u_int insn, u_int loc); static void disasm_insn_ldcstc(const disasm_interface_t *di, u_int insn, u_int loc); static u_int disassemble_readword(u_int address); static void disassemble_printaddr(u_int address); vm_offset_t disasm(const disasm_interface_t *di, vm_offset_t loc, int altfmt) { const struct arm32_insn *i_ptr = arm32_i; u_int insn; int matchp; int branch; char* f_ptr; int fmt; fmt = 0; matchp = 0; insn = di->di_readword(loc); /* di->di_printf("loc=%08x insn=%08x : ", loc, insn);*/ while (i_ptr->name) { if ((insn & i_ptr->mask) == i_ptr->pattern) { matchp = 1; break; } i_ptr++; } if (!matchp) { di->di_printf("und%s\t%08x\n", insn_condition(insn), insn); return(loc + INSN_SIZE); } /* If instruction forces condition code, don't print it. */ if ((i_ptr->mask & 0xf0000000) == 0xf0000000) di->di_printf("%s", i_ptr->name); else di->di_printf("%s%s", i_ptr->name, insn_condition(insn)); f_ptr = i_ptr->format; /* Insert tab if there are no instruction modifiers */ if (*(f_ptr) < 'A' || *(f_ptr) > 'Z') { ++fmt; di->di_printf("\t"); } while (*f_ptr) { switch (*f_ptr) { /* 2 - print Operand 2 of a data processing instruction */ case '2': if (insn & 0x02000000) { int rotate= ((insn >> 7) & 0x1e); di->di_printf("#0x%08x", (insn & 0xff) << (32 - rotate) | (insn & 0xff) >> rotate); } else { disasm_register_shift(di, insn); } break; /* d - destination register (bits 12-15) */ case 'd': di->di_printf("r%d", ((insn >> 12) & 0x0f)); break; /* D - insert 'p' if Rd is R15 */ case 'D': if (((insn >> 12) & 0x0f) == 15) di->di_printf("p"); break; /* n - n register (bits 16-19) */ case 'n': di->di_printf("r%d", ((insn >> 16) & 0x0f)); break; /* s - s register (bits 8-11) */ case 's': di->di_printf("r%d", ((insn >> 8) & 0x0f)); break; /* o - indirect register rn (bits 16-19) (used by swap) */ case 'o': di->di_printf("[r%d]", ((insn >> 16) & 0x0f)); break; /* m - m register (bits 0-4) */ case 'm': di->di_printf("r%d", ((insn >> 0) & 0x0f)); break; /* a - address operand of ldr/str instruction */ case 'a': disasm_insn_ldrstr(di, insn, loc); break; /* e - address operand of ldrh/strh instruction */ case 'e': disasm_insn_ldrhstrh(di, insn, loc); break; /* l - register list for ldm/stm instruction */ case 'l': disasm_print_reglist(di, insn); break; /* f - 1st fp operand (register) (bits 12-14) */ case 'f': di->di_printf("f%d", (insn >> 12) & 7); break; /* g - 2nd fp operand (register) (bits 16-18) */ case 'g': di->di_printf("f%d", (insn >> 16) & 7); break; /* h - 3rd fp operand (register/immediate) (bits 0-4) */ case 'h': if (insn & (1 << 3)) di->di_printf("#%s", insn_fpaimm(insn)); else di->di_printf("f%d", insn & 7); break; /* b - branch address */ case 'b': branch = ((insn << 2) & 0x03ffffff); if (branch & 0x02000000) branch |= 0xfc000000; di->di_printaddr(loc + 8 + branch); break; /* t - blx address */ case 't': branch = ((insn << 2) & 0x03ffffff) | (insn >> 23 & 0x00000002); if (branch & 0x02000000) branch |= 0xfc000000; di->di_printaddr(loc + 8 + branch); break; /* X - block transfer type */ case 'X': di->di_printf("%s", insn_blktrans(insn)); break; /* Y - block transfer type (r13 base) */ case 'Y': di->di_printf("%s", insn_stkblktrans(insn)); break; /* c - comment field bits(0-23) */ case 'c': di->di_printf("0x%08x", (insn & 0x00ffffff)); break; /* k - breakpoint comment (bits 0-3, 8-19) */ case 'k': di->di_printf("0x%04x", (insn & 0x000fff00) >> 4 | (insn & 0x0000000f)); break; /* p - saved or current status register */ case 'p': if (insn & 0x00400000) di->di_printf("spsr"); else di->di_printf("cpsr"); break; /* F - PSR transfer fields */ case 'F': di->di_printf("_"); if (insn & (1 << 16)) di->di_printf("c"); if (insn & (1 << 17)) di->di_printf("x"); if (insn & (1 << 18)) di->di_printf("s"); if (insn & (1 << 19)) di->di_printf("f"); break; /* B - byte transfer flag */ case 'B': if (insn & 0x00400000) di->di_printf("b"); break; /* L - co-processor transfer size */ case 'L': if (insn & (1 << 22)) di->di_printf("l"); break; /* S - set status flag */ case 'S': if (insn & 0x00100000) di->di_printf("s"); break; /* P - fp precision */ case 'P': di->di_printf("%s", insn_fpaprec(insn)); break; /* Q - fp precision (for ldf/stf) */ case 'Q': break; /* R - fp rounding */ case 'R': di->di_printf("%s", insn_fparnd(insn)); break; /* W - writeback flag */ case 'W': if (insn & (1 << 21)) di->di_printf("!"); break; /* # - co-processor number */ case '#': di->di_printf("p%d", (insn >> 8) & 0x0f); break; /* v - co-processor data transfer registers+addressing mode */ case 'v': disasm_insn_ldcstc(di, insn, loc); break; /* x - instruction in hex */ case 'x': di->di_printf("0x%08x", insn); break; /* y - co-processor data processing registers */ case 'y': di->di_printf("%d, ", (insn >> 20) & 0x0f); di->di_printf("c%d, c%d, c%d", (insn >> 12) & 0x0f, (insn >> 16) & 0x0f, insn & 0x0f); di->di_printf(", %d", (insn >> 5) & 0x07); break; /* z - co-processor register transfer registers */ case 'z': di->di_printf("%d, ", (insn >> 21) & 0x07); di->di_printf("r%d, c%d, c%d, %d", (insn >> 12) & 0x0f, (insn >> 16) & 0x0f, insn & 0x0f, (insn >> 5) & 0x07); /* if (((insn >> 5) & 0x07) != 0) di->di_printf(", %d", (insn >> 5) & 0x07);*/ break; default: di->di_printf("[%c - unknown]", *f_ptr); break; } if (*(f_ptr+1) >= 'A' && *(f_ptr+1) <= 'Z') ++f_ptr; else if (*(++f_ptr)) { ++fmt; if (fmt == 1) di->di_printf("\t"); else di->di_printf(", "); } } di->di_printf("\n"); return(loc + INSN_SIZE); } static void disasm_register_shift(const disasm_interface_t *di, u_int insn) { di->di_printf("r%d", (insn & 0x0f)); if ((insn & 0x00000ff0) == 0) ; else if ((insn & 0x00000ff0) == 0x00000060) di->di_printf(", rrx"); else { if (insn & 0x10) di->di_printf(", %s r%d", op2_shift(insn), (insn >> 8) & 0x0f); else di->di_printf(", %s #%d", op2_shift(insn), (insn >> 7) & 0x1f); } } static void disasm_print_reglist(const disasm_interface_t *di, u_int insn) { int loop; int start; int comma; di->di_printf("{"); start = -1; comma = 0; for (loop = 0; loop < 17; ++loop) { if (start != -1) { if (loop == 16 || !(insn & (1 << loop))) { if (comma) di->di_printf(", "); else comma = 1; if (start == loop - 1) di->di_printf("r%d", start); else di->di_printf("r%d-r%d", start, loop - 1); start = -1; } } else { if (insn & (1 << loop)) start = loop; } } di->di_printf("}"); if (insn & (1 << 22)) di->di_printf("^"); } static void disasm_insn_ldrstr(const disasm_interface_t *di, u_int insn, u_int loc) { int offset; offset = insn & 0xfff; if ((insn & 0x032f0000) == 0x010f0000) { /* rA = pc, immediate index */ if (insn & 0x00800000) loc += offset; else loc -= offset; di->di_printaddr(loc + 8); } else { di->di_printf("[r%d", (insn >> 16) & 0x0f); if ((insn & 0x03000fff) != 0x01000000) { di->di_printf("%s, ", (insn & (1 << 24)) ? "" : "]"); if (!(insn & 0x00800000)) di->di_printf("-"); if (insn & (1 << 25)) disasm_register_shift(di, insn); else di->di_printf("#0x%03x", offset); } if (insn & (1 << 24)) di->di_printf("]"); } } static void disasm_insn_ldrhstrh(const disasm_interface_t *di, u_int insn, u_int loc) { int offset; offset = ((insn & 0xf00) >> 4) | (insn & 0xf); if ((insn & 0x004f0000) == 0x004f0000) { /* rA = pc, immediate index */ if (insn & 0x00800000) loc += offset; else loc -= offset; di->di_printaddr(loc + 8); } else { di->di_printf("[r%d", (insn >> 16) & 0x0f); if ((insn & 0x01400f0f) != 0x01400000) { di->di_printf("%s, ", (insn & (1 << 24)) ? "" : "]"); if (!(insn & 0x00800000)) di->di_printf("-"); if (insn & (1 << 22)) di->di_printf("#0x%02x", offset); else di->di_printf("r%d", (insn & 0x0f)); } if (insn & (1 << 24)) di->di_printf("]"); } } static void disasm_insn_ldcstc(const disasm_interface_t *di, u_int insn, u_int loc) { if (((insn >> 8) & 0xf) == 1) di->di_printf("f%d, ", (insn >> 12) & 0x07); else di->di_printf("c%d, ", (insn >> 12) & 0x0f); di->di_printf("[r%d", (insn >> 16) & 0x0f); di->di_printf("%s, ", (insn & (1 << 24)) ? "" : "]"); if (!(insn & (1 << 23))) di->di_printf("-"); di->di_printf("#0x%03x", (insn & 0xff) << 2); if (insn & (1 << 24)) di->di_printf("]"); if (insn & (1 << 21)) di->di_printf("!"); } static u_int disassemble_readword(u_int address) { return(*((u_int *)address)); } static void disassemble_printaddr(u_int address) { printf("0x%08x", address); } static const disasm_interface_t disassemble_di = { disassemble_readword, disassemble_printaddr, db_printf }; void disassemble(u_int address) { (void)disasm(&disassemble_di, address, 0); } /* End of disassem.c */ Index: head/sys/arm/arm/elf_machdep.c =================================================================== --- head/sys/arm/arm/elf_machdep.c (revision 368140) +++ head/sys/arm/arm/elf_machdep.c (revision 368141) @@ -1,344 +1,336 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright 1996-1998 John D. Polstra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #include "opt_ddb.h" /* for OPT_DDB */ #include "opt_global.h" /* for OPT_KDTRACE_HOOKS */ #include "opt_stack.h" /* for OPT_STACK */ static boolean_t elf32_arm_abi_supported(struct image_params *, int32_t *, uint32_t *); u_long elf_hwcap; u_long elf_hwcap2; struct sysentvec elf32_freebsd_sysvec = { .sv_size = SYS_MAXSYSCALL, .sv_table = sysent, .sv_transtrap = NULL, .sv_fixup = __elfN(freebsd_fixup), .sv_sendsig = sendsig, .sv_sigcode = sigcode, .sv_szsigcode = &szsigcode, .sv_name = "FreeBSD ELF32", .sv_coredump = __elfN(coredump), .sv_imgact_try = NULL, .sv_minsigstksz = MINSIGSTKSZ, .sv_minuser = VM_MIN_ADDRESS, .sv_maxuser = VM_MAXUSER_ADDRESS, .sv_usrstack = USRSTACK, .sv_psstrings = PS_STRINGS, .sv_stackprot = VM_PROT_ALL, .sv_copyout_auxargs = __elfN(freebsd_copyout_auxargs), .sv_copyout_strings = exec_copyout_strings, .sv_setregs = exec_setregs, .sv_fixlimit = NULL, .sv_maxssiz = NULL, .sv_flags = -#if __ARM_ARCH >= 6 SV_ASLR | SV_SHP | SV_TIMEKEEP | SV_RNG_SEED_VER | -#endif SV_ABI_FREEBSD | SV_ILP32 | SV_ASLR, .sv_set_syscall_retval = cpu_set_syscall_retval, .sv_fetch_syscall_args = cpu_fetch_syscall_args, .sv_syscallnames = syscallnames, .sv_shared_page_base = SHAREDPAGE, .sv_shared_page_len = PAGE_SIZE, .sv_schedtail = NULL, .sv_thread_detach = NULL, .sv_trap = NULL, .sv_hwcap = &elf_hwcap, .sv_hwcap2 = &elf_hwcap2, }; INIT_SYSENTVEC(elf32_sysvec, &elf32_freebsd_sysvec); static Elf32_Brandinfo freebsd_brand_info = { .brand = ELFOSABI_FREEBSD, .machine = EM_ARM, .compat_3_brand = "FreeBSD", .emul_path = NULL, .interp_path = "/libexec/ld-elf.so.1", .sysvec = &elf32_freebsd_sysvec, .interp_newpath = NULL, .brand_note = &elf32_freebsd_brandnote, .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE, .header_supported= elf32_arm_abi_supported, }; SYSINIT(elf32, SI_SUB_EXEC, SI_ORDER_FIRST, (sysinit_cfunc_t) elf32_insert_brand_entry, &freebsd_brand_info); static boolean_t elf32_arm_abi_supported(struct image_params *imgp, int32_t *osrel __unused, uint32_t *fctl0 __unused) { const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; /* * When configured for EABI, FreeBSD supports EABI vesions 4 and 5. */ if (EF_ARM_EABI_VERSION(hdr->e_flags) < EF_ARM_EABI_FREEBSD_MIN) { if (bootverbose) uprintf("Attempting to execute non EABI binary (rev %d) image %s", EF_ARM_EABI_VERSION(hdr->e_flags), imgp->args->fname); return (FALSE); } return (TRUE); } void elf32_dump_thread(struct thread *td, void *dst, size_t *off) { #ifdef VFP mcontext_vfp_t vfp; if (dst != NULL) { get_vfpcontext(td, &vfp); *off = elf32_populate_note(NT_ARM_VFP, &vfp, dst, sizeof(vfp), NULL); } else *off = elf32_populate_note(NT_ARM_VFP, NULL, NULL, sizeof(vfp), NULL); #endif } bool elf_is_ifunc_reloc(Elf_Size r_info __unused) { return (false); } /* * It is possible for the compiler to emit relocations for unaligned data. * We handle this situation with these inlines. */ #define RELOC_ALIGNED_P(x) \ (((uintptr_t)(x) & (sizeof(void *) - 1)) == 0) static __inline Elf_Addr load_ptr(Elf_Addr *where) { Elf_Addr res; if (RELOC_ALIGNED_P(where)) return *where; memcpy(&res, where, sizeof(res)); return (res); } static __inline void store_ptr(Elf_Addr *where, Elf_Addr val) { if (RELOC_ALIGNED_P(where)) *where = val; else memcpy(where, &val, sizeof(val)); } #undef RELOC_ALIGNED_P /* Process one elf relocation with addend. */ static int elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, int local, elf_lookup_fn lookup) { Elf_Addr *where; Elf_Addr addr; Elf_Addr addend; Elf_Word rtype, symidx; const Elf_Rel *rel; const Elf_Rela *rela; int error; switch (type) { case ELF_RELOC_REL: rel = (const Elf_Rel *)data; where = (Elf_Addr *) (relocbase + rel->r_offset); addend = load_ptr(where); rtype = ELF_R_TYPE(rel->r_info); symidx = ELF_R_SYM(rel->r_info); break; case ELF_RELOC_RELA: rela = (const Elf_Rela *)data; where = (Elf_Addr *) (relocbase + rela->r_offset); addend = rela->r_addend; rtype = ELF_R_TYPE(rela->r_info); symidx = ELF_R_SYM(rela->r_info); break; default: panic("unknown reloc type %d\n", type); } if (local) { if (rtype == R_ARM_RELATIVE) { /* A + B */ addr = elf_relocaddr(lf, relocbase + addend); if (load_ptr(where) != addr) store_ptr(where, addr); } return (0); } switch (rtype) { case R_ARM_NONE: /* none */ break; case R_ARM_ABS32: error = lookup(lf, symidx, 1, &addr); if (error != 0) return (-1); store_ptr(where, addr + load_ptr(where)); break; case R_ARM_COPY: /* none */ /* * There shouldn't be copy relocations in kernel * objects. */ printf("kldload: unexpected R_COPY relocation, " "symbol index %d\n", symidx); return (-1); break; case R_ARM_JUMP_SLOT: error = lookup(lf, symidx, 1, &addr); if (error == 0) { store_ptr(where, addr); return (0); } return (-1); case R_ARM_RELATIVE: break; default: printf("kldload: unexpected relocation type %d, " "symbol index %d\n", rtype, symidx); return (-1); } return(0); } int elf_reloc(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, elf_lookup_fn lookup) { return (elf_reloc_internal(lf, relocbase, data, type, 0, lookup)); } int elf_reloc_local(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, elf_lookup_fn lookup) { return (elf_reloc_internal(lf, relocbase, data, type, 1, lookup)); } int elf_cpu_load_file(linker_file_t lf) { /* * The pmap code does not do an icache sync upon establishing executable * mappings in the kernel pmap. It's an optimization based on the fact * that kernel memory allocations always have EXECUTABLE protection even * when the memory isn't going to hold executable code. The only time * kernel memory holding instructions does need a sync is after loading * a kernel module, and that's when this function gets called. * * This syncs data and instruction caches after loading a module. We * don't worry about the kernel itself (lf->id is 1) as locore.S did * that on entry. Even if data cache maintenance was done by IO code, * the relocation fixup process creates dirty cache entries that we must * write back before doing icache sync. The instruction cache sync also * invalidates the branch predictor cache on platforms that have one. */ if (lf->id == 1) return (0); -#if __ARM_ARCH >= 6 dcache_wb_pou((vm_offset_t)lf->address, (vm_size_t)lf->size); icache_inv_all(); -#else - cpu_dcache_wb_range((vm_offset_t)lf->address, (vm_size_t)lf->size); - cpu_l2cache_wb_range((vm_offset_t)lf->address, (vm_size_t)lf->size); - cpu_icache_sync_range((vm_offset_t)lf->address, (vm_size_t)lf->size); -#endif #if defined(DDB) || defined(KDTRACE_HOOKS) || defined(STACK) /* * Inform the stack(9) code of the new module, so it can acquire its * per-module unwind data. */ unwind_module_loaded(lf); #endif return (0); } int elf_cpu_parse_dynamic(caddr_t loadbase __unused, Elf_Dyn *dynamic __unused) { return (0); } int elf_cpu_unload_file(linker_file_t lf) { #if defined(DDB) || defined(KDTRACE_HOOKS) || defined(STACK) /* Inform the stack(9) code that this module is gone. */ unwind_module_unloaded(lf); #endif return (0); } Index: head/sys/arm/arm/exception.S =================================================================== --- head/sys/arm/arm/exception.S (revision 368140) +++ head/sys/arm/arm/exception.S (revision 368141) @@ -1,503 +1,418 @@ /* $NetBSD: exception.S,v 1.13 2003/10/31 16:30:15 scw Exp $ */ /*- * Copyright (c) 1994-1997 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * exception.S * * Low level handlers for exception vectors * * Created : 24/09/94 * * Based on kate/display/abort.s * */ #include "assym.inc" #include #include #include #include __FBSDID("$FreeBSD$"); #ifdef KDTRACE_HOOKS .bss .align 4 .global _C_LABEL(dtrace_invop_jump_addr) _C_LABEL(dtrace_invop_jump_addr): .word 0 .word 0 #endif .text .align 2 /* * ASM macros for pushing and pulling trapframes from the stack * * These macros are used to handle the irqframe and trapframe structures * defined above. */ /* * PUSHFRAME - macro to push a trap frame on the stack in the current mode * Since the current mode is used, the SVC lr field is not defined. - * - * NOTE: r13 and r14 are stored separately as a work around for the - * SA110 rev 2 STM^ bug */ -#if __ARM_ARCH < 6 #define PUSHFRAME \ sub sp, sp, #4; /* Align the stack */ \ str lr, [sp, #-4]!; /* Push the return address */ \ sub sp, sp, #(4*17); /* Adjust the stack pointer */ \ stmia sp, {r0-r12}; /* Push the user mode registers */ \ add r0, sp, #(4*13); /* Adjust the stack pointer */ \ stmia r0, {r13-r14}^; /* Push the user mode registers */ \ mov r0, r0; /* NOP for previous instruction */ \ mrs r0, spsr; /* Put the SPSR on the stack */ \ - str r0, [sp, #-4]!; \ - ldr r0, =ARM_RAS_START; \ - mov r1, #0; \ - str r1, [r0]; \ - mov r1, #0xffffffff; \ - str r1, [r0, #4]; -#else -#define PUSHFRAME \ - sub sp, sp, #4; /* Align the stack */ \ - str lr, [sp, #-4]!; /* Push the return address */ \ - sub sp, sp, #(4*17); /* Adjust the stack pointer */ \ - stmia sp, {r0-r12}; /* Push the user mode registers */ \ - add r0, sp, #(4*13); /* Adjust the stack pointer */ \ - stmia r0, {r13-r14}^; /* Push the user mode registers */ \ - mov r0, r0; /* NOP for previous instruction */ \ - mrs r0, spsr; /* Put the SPSR on the stack */ \ str r0, [sp, #-4]!; -#endif /* * PULLFRAME - macro to pull a trap frame from the stack in the current mode * Since the current mode is used, the SVC lr field is ignored. */ -#if __ARM_ARCH < 6 #define PULLFRAME \ - ldr r0, [sp], #4; /* Get the SPSR from stack */ \ - msr spsr_fsxc, r0; \ - ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ - mov r0, r0; /* NOP for previous instruction */ \ - add sp, sp, #(4*17); /* Adjust the stack pointer */ \ - ldr lr, [sp], #4; /* Pull the return address */ \ - add sp, sp, #4 /* Align the stack */ -#else -#define PULLFRAME \ ldr r0, [sp], #4 ; /* Get the SPSR from stack */ \ msr spsr_fsxc, r0; \ clrex; \ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ mov r0, r0; /* NOP for previous instruction */ \ add sp, sp, #(4*17); /* Adjust the stack pointer */ \ ldr lr, [sp], #4; /* Pull the return address */ \ add sp, sp, #4 /* Align the stack */ -#endif /* * PUSHFRAMEINSVC - macro to push a trap frame on the stack in SVC32 mode * This should only be used if the processor is not currently in SVC32 * mode. The processor mode is switched to SVC mode and the trap frame is * stored. The SVC lr field is used to store the previous value of * lr in SVC mode. - * - * NOTE: r13 and r14 are stored separately as a work around for the - * SA110 rev 2 STM^ bug */ -#if __ARM_ARCH < 6 #define PUSHFRAMEINSVC \ stmdb sp, {r0-r3}; /* Save 4 registers */ \ mov r0, lr; /* Save xxx32 r14 */ \ mov r1, sp; /* Save xxx32 sp */ \ mrs r3, spsr; /* Save xxx32 spsr */ \ - mrs r2, cpsr; /* Get the CPSR */ \ - bic r2, r2, #(PSR_MODE); /* Fix for SVC mode */ \ - orr r2, r2, #(PSR_SVC32_MODE); \ - msr cpsr_c, r2; /* Punch into SVC mode */ \ - mov r2, sp; /* Save SVC sp */ \ - bic sp, sp, #7; /* Align sp to an 8-byte addrress */ \ - sub sp, sp, #(4 * 17); /* Pad trapframe to keep alignment */ \ - /* and for dtrace to emulate push/pop */ \ - str r0, [sp, #-4]!; /* Push return address */ \ - str lr, [sp, #-4]!; /* Push SVC lr */ \ - str r2, [sp, #-4]!; /* Push SVC sp */ \ - msr spsr_fsxc, r3; /* Restore correct spsr */ \ - ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \ - sub sp, sp, #(4*15); /* Adjust the stack pointer */ \ - stmia sp, {r0-r12}; /* Push the user mode registers */ \ - add r0, sp, #(4*13); /* Adjust the stack pointer */ \ - stmia r0, {r13-r14}^; /* Push the user mode registers */ \ - mov r0, r0; /* NOP for previous instruction */ \ - ldr r5, =ARM_RAS_START; /* Check if there's any RAS */ \ - ldr r4, [r5, #4]; /* reset it to point at the */ \ - cmp r4, #0xffffffff; /* end of memory if necessary; */ \ - movne r1, #0xffffffff; /* leave value in r4 for later */ \ - strne r1, [r5, #4]; /* comparison against PC. */ \ - ldr r3, [r5]; /* Retrieve global RAS_START */ \ - cmp r3, #0; /* and reset it if non-zero. */ \ - movne r1, #0; /* If non-zero RAS_START and */ \ - strne r1, [r5]; /* PC was lower than RAS_END, */ \ - ldrne r1, [r0, #16]; /* adjust the saved PC so that */ \ - cmpne r4, r1; /* execution later resumes at */ \ - strhi r3, [r0, #16]; /* the RAS_START location. */ \ - mrs r0, spsr; \ - str r0, [sp, #-4]! -#else -#define PUSHFRAMEINSVC \ - stmdb sp, {r0-r3}; /* Save 4 registers */ \ - mov r0, lr; /* Save xxx32 r14 */ \ - mov r1, sp; /* Save xxx32 sp */ \ - mrs r3, spsr; /* Save xxx32 spsr */ \ mrs r2, cpsr; /* Get the CPSR */ \ bic r2, r2, #(PSR_MODE); /* Fix for SVC mode */ \ orr r2, r2, #(PSR_SVC32_MODE); \ msr cpsr_c, r2; /* Punch into SVC mode */ \ mov r2, sp; /* Save SVC sp */ \ bic sp, sp, #7; /* Align sp to an 8-byte addrress */ \ sub sp, sp, #(4 * 17); /* Pad trapframe to keep alignment */ \ /* and for dtrace to emulate push/pop */ \ str r0, [sp, #-4]!; /* Push return address */ \ str lr, [sp, #-4]!; /* Push SVC lr */ \ str r2, [sp, #-4]!; /* Push SVC sp */ \ msr spsr_fsxc, r3; /* Restore correct spsr */ \ ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \ sub sp, sp, #(4*15); /* Adjust the stack pointer */ \ stmia sp, {r0-r12}; /* Push the user mode registers */ \ add r0, sp, #(4*13); /* Adjust the stack pointer */ \ stmia r0, {r13-r14}^; /* Push the user mode registers */ \ mov r0, r0; /* NOP for previous instruction */ \ mrs r0, spsr; /* Put the SPSR on the stack */ \ str r0, [sp, #-4]! -#endif /* * PULLFRAMEFROMSVCANDEXIT - macro to pull a trap frame from the stack * in SVC32 mode and restore the saved processor mode and PC. * This should be used when the SVC lr register needs to be restored on * exit. */ -#if __ARM_ARCH < 6 #define PULLFRAMEFROMSVCANDEXIT \ ldr r0, [sp], #4; /* Get the SPSR from stack */ \ msr spsr_fsxc, r0; /* restore SPSR */ \ - ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ - mov r0, r0; /* NOP for previous instruction */ \ - add sp, sp, #(4*15); /* Adjust the stack pointer */ \ - ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */ -#else -#define PULLFRAMEFROMSVCANDEXIT \ - ldr r0, [sp], #4; /* Get the SPSR from stack */ \ - msr spsr_fsxc, r0; /* restore SPSR */ \ clrex; \ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ mov r0, r0; /* NOP for previous instruction */ \ add sp, sp, #(4*15); /* Adjust the stack pointer */ \ ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */ -#endif /* * Unwind hints so we can unwind past functions that use * PULLFRAMEFROMSVCANDEXIT. They are run in reverse order. * As the last thing we do is restore the stack pointer * we can ignore the padding at the end of struct trapframe. */ #define UNWINDSVCFRAME \ .save {r13-r15}; /* Restore sp, lr, pc */ \ .pad #(2*4); /* Skip user sp and lr */ \ .save {r0-r12}; /* Restore r0-r12 */ \ .pad #(4) /* Skip spsr */ #define DO_AST \ ldr r0, [sp]; /* Get the SPSR from stack */ \ mrs r4, cpsr; /* save CPSR */ \ orr r1, r4, #(PSR_I|PSR_F); \ msr cpsr_c, r1; /* Disable interrupts */ \ and r0, r0, #(PSR_MODE); /* Returning to USR mode? */ \ teq r0, #(PSR_USR32_MODE); \ bne 2f; /* Nope, get out now */ \ bic r4, r4, #(PSR_I|PSR_F); \ 1: GET_CURTHREAD_PTR(r5); \ ldr r1, [r5, #(TD_FLAGS)]; \ and r1, r1, #(TDF_ASTPENDING|TDF_NEEDRESCHED); \ teq r1, #0; \ beq 2f; /* Nope. Just bail */ \ msr cpsr_c, r4; /* Restore interrupts */ \ mov r0, sp; \ bl _C_LABEL(ast); /* ast(frame) */ \ orr r0, r4, #(PSR_I|PSR_F); \ msr cpsr_c, r0; \ b 1b; \ 2: /* * Entry point for a Software Interrupt (SWI). * * The hardware switches to svc32 mode on a swi, so we're already on the * right stack; just build a trapframe and call the handler. */ ASENTRY_NP(swi_entry) PUSHFRAME /* Build the trapframe on the */ mov r0, sp /* scv32 stack, pass it to the */ bl _C_LABEL(swi_handler) /* swi handler. */ /* * The fork_trampoline() code in swtch.S aranges for the MI fork_exit() * to return to swi_exit here, to return to userland. The net effect is * that a newly created thread appears to return from a SWI just like * the parent thread that created it. */ ASEENTRY_NP(swi_exit) DO_AST /* Handle pending signals. */ PULLFRAME /* Deallocate trapframe. */ movs pc, lr /* Return to userland. */ STOP_UNWINDING /* Don't unwind into user mode. */ EEND(swi_exit) END(swi_entry) /* * Standard exception exit handler. * * This is used to return from all exceptions except SWI. It uses DO_AST and * PULLFRAMEFROMSVCANDEXIT and can only be called if the exception entry code * used PUSHFRAMEINSVC. * * If the return is to user mode, this uses DO_AST to deliver any pending * signals and/or handle TDF_NEEDRESCHED first. */ ASENTRY_NP(exception_exit) DO_AST /* Handle pending signals. */ PULLFRAMEFROMSVCANDEXIT /* Return. */ UNWINDSVCFRAME /* Special unwinding for exceptions. */ END(exception_exit) /* * Entry point for a Prefetch Abort exception. * * The hardware switches to the abort mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the abort mode stack). */ ASENTRY_NP(prefetch_abort_entry) #ifdef __XSCALE__ nop /* Make absolutely sure any pending */ nop /* imprecise aborts have occurred. */ #endif sub lr, lr, #4 /* Adjust the lr. Transition to scv32 */ PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Return from handler via standard */ mov r0, sp /* exception exit routine. Pass the */ mov r1, #1 /* Type flag */ b _C_LABEL(abort_handler) END(prefetch_abort_entry) /* * Entry point for a Data Abort exception. * * The hardware switches to the abort mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the abort mode stack). */ ASENTRY_NP(data_abort_entry) #ifdef __XSCALE__ nop /* Make absolutely sure any pending */ nop /* imprecise aborts have occurred. */ #endif sub lr, lr, #8 /* Adjust the lr. Transition to scv32 */ PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Exception exit routine */ mov r0, sp /* Trapframe to the handler */ mov r1, #0 /* Type flag */ b _C_LABEL(abort_handler) END(data_abort_entry) /* * Entry point for an Undefined Instruction exception. * * The hardware switches to the undefined mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the undefined mode stack). */ ASENTRY_NP(undefined_entry) PUSHFRAMEINSVC /* mode stack, build trapframe there. */ mov r4, r0 /* R0 contains SPSR */ adr lr, exception_exit /* Return from handler via standard */ mov r0, sp /* exception exit routine. pass frame */ ldr r2, [sp, #(TF_PC)] /* load pc */ #if __ARM_ARCH >= 7 tst r4, #(PSR_T) /* test if PSR_T */ subne r2, r2, #(THUMB_INSN_SIZE) subeq r2, r2, #(INSN_SIZE) #else sub r2, r2, #(INSN_SIZE) /* fix pc */ #endif str r2, [sp, #TF_PC] /* store pc */ #ifdef KDTRACE_HOOKS /* Check if dtrace is enabled */ ldr r1, =_C_LABEL(dtrace_invop_jump_addr) ldr r3, [r1] cmp r3, #0 beq undefinedinstruction and r4, r4, #(PSR_MODE) /* Mask out unneeded bits */ cmp r4, #(PSR_USR32_MODE) /* Check if we came from usermode */ beq undefinedinstruction ldr r4, [r2] /* load instrution */ ldr r1, =FBT_BREAKPOINT /* load fbt inv op */ cmp r1, r4 bne undefinedinstruction bx r3 /* call invop_jump_addr */ #endif b undefinedinstruction /* call stadnard handler */ END(undefined_entry) /* * Entry point for a normal IRQ. * * The hardware switches to the IRQ mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the IRQ mode stack). */ ASENTRY_NP(irq_entry) sub lr, lr, #4 /* Adjust the lr. Transition to scv32 */ PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Return from handler via standard */ mov r0, sp /* exception exit routine. Pass the */ b _C_LABEL(intr_irq_handler)/* trapframe to the handler. */ END(irq_entry) /* * Entry point for an FIQ interrupt. * * We don't currently support FIQ handlers very much. Something can * install itself in the FIQ vector using code (that may or may not work * these days) in fiq.c. If nobody does that and an FIQ happens, this * default handler just disables FIQs and otherwise ignores it. */ ASENTRY_NP(fiq_entry) mrs r8, cpsr /* FIQ handling isn't supported, */ bic r8, #(PSR_F) /* just disable FIQ and return. */ msr cpsr_c, r8 /* The r8 we trash here is the */ subs pc, lr, #4 /* banked FIQ-mode r8. */ END(fiq_entry) /* * Entry point for an Address Exception exception. * This is an arm26 exception that should never happen. */ ASENTRY_NP(addr_exception_entry) mov r3, lr mrs r2, spsr mrs r1, cpsr adr r0, Laddr_exception_msg b _C_LABEL(panic) Laddr_exception_msg: .asciz "Address Exception CPSR=0x%08x SPSR=0x%08x LR=0x%08x\n" .balign 4 END(addr_exception_entry) /* * Entry point for the system Reset vector. * This should never happen, so panic. */ ASENTRY_NP(reset_entry) mov r1, lr adr r0, Lreset_panicmsg b _C_LABEL(panic) /* NOTREACHED */ Lreset_panicmsg: .asciz "Reset vector called, LR = 0x%08x" .balign 4 END(reset_entry) /* * page0 and page0_data -- An image of the ARM vectors which is copied to * the ARM vectors page (high or low) as part of CPU initialization. The * code that does the copy assumes that page0_data holds one 32-bit word * of data for each of the predefined ARM vectors. It also assumes that * page0_data follows the vectors in page0, but other stuff can appear * between the two. We currently leave room between the two for some fiq * handler code to be copied in. */ .global _C_LABEL(page0), _C_LABEL(page0_data) _C_LABEL(page0): ldr pc, .Lreset_entry ldr pc, .Lundefined_entry ldr pc, .Lswi_entry ldr pc, .Lprefetch_abort_entry ldr pc, .Ldata_abort_entry ldr pc, .Laddr_exception_entry ldr pc, .Lirq_entry .fiqv: ldr pc, .Lfiq_entry .space 256 /* room for some fiq handler code */ _C_LABEL(page0_data): .Lreset_entry: .word reset_entry .Lundefined_entry: .word undefined_entry .Lswi_entry: .word swi_entry .Lprefetch_abort_entry: .word prefetch_abort_entry .Ldata_abort_entry: .word data_abort_entry .Laddr_exception_entry: .word addr_exception_entry .Lirq_entry: .word irq_entry .Lfiq_entry: .word fiq_entry /* * These items are used by the code in fiq.c to install what it calls the * "null" handler. It's actually our default vector entry that just jumps * to the default handler which just disables FIQs and returns. */ .global _C_LABEL(fiq_nullhandler_code), _C_LABEL(fiq_nullhandler_size) _C_LABEL(fiq_nullhandler_code): .word .fiqv _C_LABEL(fiq_nullhandler_size): .word 4 Index: head/sys/arm/arm/fiq.c =================================================================== --- head/sys/arm/arm/fiq.c (revision 368140) +++ head/sys/arm/arm/fiq.c (revision 368141) @@ -1,174 +1,166 @@ /* $NetBSD: fiq.c,v 1.5 2002/04/03 23:33:27 thorpej Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2001, 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Jason R. Thorpe for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include TAILQ_HEAD(, fiqhandler) fiqhandler_stack = TAILQ_HEAD_INITIALIZER(fiqhandler_stack); extern char *fiq_nullhandler_code; extern uint32_t fiq_nullhandler_size; /* * fiq_installhandler: * * Actually install the FIQ handler down at the FIQ vector. * * The FIQ vector is fixed by the hardware definition as the * seventh 32-bit word in the vector page. * * Note: If the FIQ is invoked via an extra layer of * indirection, the actual FIQ code store lives in the * data segment, so there is no need to manipulate * the vector page's protection. */ static void fiq_installhandler(void *func, size_t size) { const uint32_t fiqvector = 7 * sizeof(uint32_t); -#if __ARM_ARCH < 6 && !defined(__ARM_FIQ_INDIRECT) - vector_page_setprot(VM_PROT_READ|VM_PROT_WRITE); -#endif - memcpy((void *)(vector_page + fiqvector), func, size); - -#if __ARM_ARCH < 6 && !defined(__ARM_FIQ_INDIRECT) - vector_page_setprot(VM_PROT_READ); -#endif icache_sync((vm_offset_t) fiqvector, size); } /* * fiq_claim: * * Claim the FIQ vector. */ int fiq_claim(struct fiqhandler *fh) { struct fiqhandler *ofh; u_int oldirqstate; int error = 0; if (fh->fh_size > 0x100) return (EFBIG); oldirqstate = disable_interrupts(PSR_F); if ((ofh = TAILQ_FIRST(&fiqhandler_stack)) != NULL) { if ((ofh->fh_flags & FH_CANPUSH) == 0) { error = EBUSY; goto out; } /* Save the previous FIQ handler's registers. */ if (ofh->fh_regs != NULL) fiq_getregs(ofh->fh_regs); } /* Set FIQ mode registers to ours. */ if (fh->fh_regs != NULL) fiq_setregs(fh->fh_regs); TAILQ_INSERT_HEAD(&fiqhandler_stack, fh, fh_list); /* Now copy the actual handler into place. */ fiq_installhandler(fh->fh_func, fh->fh_size); /* Make sure FIQs are enabled when we return. */ oldirqstate &= ~PSR_F; out: restore_interrupts(oldirqstate); return (error); } /* * fiq_release: * * Release the FIQ vector. */ void fiq_release(struct fiqhandler *fh) { u_int oldirqstate; struct fiqhandler *ofh; oldirqstate = disable_interrupts(PSR_F); /* * If we are the currently active FIQ handler, then we * need to save our registers and pop the next one back * into the vector. */ if (fh == TAILQ_FIRST(&fiqhandler_stack)) { if (fh->fh_regs != NULL) fiq_getregs(fh->fh_regs); TAILQ_REMOVE(&fiqhandler_stack, fh, fh_list); if ((ofh = TAILQ_FIRST(&fiqhandler_stack)) != NULL) { if (ofh->fh_regs != NULL) fiq_setregs(ofh->fh_regs); fiq_installhandler(ofh->fh_func, ofh->fh_size); } } else TAILQ_REMOVE(&fiqhandler_stack, fh, fh_list); if (TAILQ_FIRST(&fiqhandler_stack) == NULL) { /* Copy the NULL handler back down into the vector. */ fiq_installhandler(fiq_nullhandler_code, fiq_nullhandler_size); /* Make sure FIQs are disabled when we return. */ oldirqstate |= PSR_F; } restore_interrupts(oldirqstate); } Index: head/sys/arm/arm/fusu.S =================================================================== --- head/sys/arm/arm/fusu.S (revision 368140) +++ head/sys/arm/arm/fusu.S (revision 368141) @@ -1,328 +1,313 @@ /* $NetBSD: fusu.S,v 1.10 2003/12/01 13:34:44 rearnsha Exp $ */ /*- * Copyright (c) 1996-1998 Mark Brinicombe. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include "assym.inc" __FBSDID("$FreeBSD$"); .syntax unified -#if __ARM_ARCH >= 6 #define GET_PCB(tmp) \ mrc p15, 0, tmp, c13, c0, 4; \ add tmp, tmp, #(TD_PCB) -#else -.Lcurpcb: - .word _C_LABEL(__pcpu) + PC_CURPCB -#define GET_PCB(tmp) \ - ldr tmp, .Lcurpcb -#endif /* * casueword32(volatile uint32_t *base, uint32_t oldval, uint32_t *oldvalp, * uint32_t newval); */ ENTRY(casueword) EENTRY_NP(casueword32) stmfd sp!, {r4, r5, r6} ldr r4, =(VM_MAXUSER_ADDRESS-3) cmp r0, r4 mvncs r0, #0 bcs 1f GET_PCB(r6) ldr r6, [r6] #ifdef DIAGNOSTIC teq r6, #0x00000000 ldmfdeq sp!, {r4, r5, r6} beq .Lfusupcbfault #endif adr r4, .Lcasuwordfault str r4, [r6, #PCB_ONFAULT] -#if __ARM_ARCH >= 6 mov r5, #1 ldrex r4, [r0] cmp r4, r1 strexeq r5, r3, [r0] -#else - ldrt r4, [r0] - cmp r4, r1 - strteq r3, [r0] -#endif str r4, [r2] mov r0, #0 str r0, [r6, #PCB_ONFAULT] -#if __ARM_ARCH >= 6 mov r0, r5 -#endif 1: ldmfd sp!, {r4, r5, r6} RET EEND(casueword32) END(casueword) /* * Handle faults from casuword. Clean up and return -1. */ .Lcasuwordfault: mov r0, #0x00000000 str r0, [r6, #PCB_ONFAULT] mvn r0, #0 ldmfd sp!, {r4, r5, r6} RET /* * fueword(caddr_t uaddr, long *val); * Fetch an int from the user's address space. */ ENTRY(fueword) EENTRY_NP(fueword32) ldr r3, =(VM_MAXUSER_ADDRESS-3) cmp r0, r3 mvncs r0, #0 RETc(cs) GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r3, .Lfusufault str r3, [r2, #PCB_ONFAULT] ldrt r3, [r0] str r3, [r1] mov r0, #0x00000000 str r0, [r2, #PCB_ONFAULT] RET EEND(fueword32) END(fueword) /* * fusword(caddr_t uaddr); * Fetch a short from the user's address space. */ ENTRY(fusword) ldr r3, =(VM_MAXUSER_ADDRESS-1) cmp r0, r3 mvncs r0, #0 RETc(cs) GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r1, .Lfusufault str r1, [r2, #PCB_ONFAULT] ldrbt r3, [r0], #1 ldrbt ip, [r0] #ifdef __ARMEB__ orr r0, ip, r3, asl #8 #else orr r0, r3, ip, asl #8 #endif mov r1, #0x00000000 str r1, [r2, #PCB_ONFAULT] RET END(fusword) /* * fubyte(caddr_t uaddr); * Fetch a byte from the user's address space. */ ENTRY(fubyte) ldr r3, =VM_MAXUSER_ADDRESS cmp r0, r3 mvncs r0, #0 RETc(cs) GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r1, .Lfusufault str r1, [r2, #PCB_ONFAULT] ldrbt r3, [r0] mov r1, #0x00000000 str r1, [r2, #PCB_ONFAULT] mov r0, r3 RET END(fubyte) /* * Handle faults from [fs]u*(). Clean up and return -1. */ .Lfusufault: mov r0, #0x00000000 str r0, [r2, #PCB_ONFAULT] mvn r0, #0x00000000 RET #ifdef DIAGNOSTIC /* * Handle earlier faults from [fs]u*(), due to no pcb */ .Lfusupcbfault: mov r1, r0 adr r0, fusupcbfaulttext b _C_LABEL(panic) fusupcbfaulttext: .asciz "Yikes - no valid PCB during fusuxxx() addr=%08x\n" .align 2 #endif /* * suword(caddr_t uaddr, int x); * Store an int in the user's address space. */ ENTRY(suword) EENTRY_NP(suword32) ldr r3, =(VM_MAXUSER_ADDRESS-3) cmp r0, r3 mvncs r0, #0 RETc(cs) GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r3, .Lfusufault str r3, [r2, #PCB_ONFAULT] strt r1, [r0] mov r0, #0x00000000 str r0, [r2, #PCB_ONFAULT] RET EEND(suword32) END(suword) /* * susword(caddr_t uaddr, short x); * Store a short in the user's address space. */ ENTRY(susword) ldr r3, =(VM_MAXUSER_ADDRESS-1) cmp r0, r3 mvncs r0, #0 RETc(cs) GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r3, .Lfusufault str r3, [r2, #PCB_ONFAULT] #ifdef __ARMEB__ mov ip, r1, lsr #8 strbt ip, [r0], #1 #else strbt r1, [r0], #1 mov r1, r1, lsr #8 #endif strbt r1, [r0] mov r0, #0x00000000 str r0, [r2, #PCB_ONFAULT] RET END(susword) /* * subyte(caddr_t uaddr, char x); * Store a byte in the user's address space. */ ENTRY(subyte) ldr r3, =VM_MAXUSER_ADDRESS cmp r0, r3 mvncs r0, #0 RETc(cs) GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r3, .Lfusufault str r3, [r2, #PCB_ONFAULT] strbt r1, [r0] mov r0, #0x00000000 str r0, [r2, #PCB_ONFAULT] RET END(subyte) Index: head/sys/arm/arm/genassym.c =================================================================== --- head/sys/arm/arm/genassym.c (revision 368140) +++ head/sys/arm/arm/genassym.c (revision 368141) @@ -1,179 +1,145 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2004 Olivier Houchard * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* For KERNVIRTADDR */ #include #include #include #include #include ASSYM(KERNBASE, KERNBASE); ASSYM(KERNVIRTADDR, KERNVIRTADDR); -#if __ARM_ARCH >= 6 ASSYM(CPU_ASID_KERNEL,CPU_ASID_KERNEL); -#endif ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); -#if __ARM_ARCH < 6 -ASSYM(PCB_DACR, offsetof(struct pcb, pcb_dacr)); -#endif ASSYM(PCB_PAGEDIR, offsetof(struct pcb, pcb_pagedir)); -#if __ARM_ARCH < 6 -ASSYM(PCB_L1VEC, offsetof(struct pcb, pcb_l1vec)); -ASSYM(PCB_PL1VEC, offsetof(struct pcb, pcb_pl1vec)); -#endif ASSYM(PCB_R4, offsetof(struct pcb, pcb_regs.sf_r4)); ASSYM(PCB_R5, offsetof(struct pcb, pcb_regs.sf_r5)); ASSYM(PCB_R6, offsetof(struct pcb, pcb_regs.sf_r6)); ASSYM(PCB_R7, offsetof(struct pcb, pcb_regs.sf_r7)); ASSYM(PCB_R8, offsetof(struct pcb, pcb_regs.sf_r8)); ASSYM(PCB_R9, offsetof(struct pcb, pcb_regs.sf_r9)); ASSYM(PCB_R10, offsetof(struct pcb, pcb_regs.sf_r10)); ASSYM(PCB_R11, offsetof(struct pcb, pcb_regs.sf_r11)); ASSYM(PCB_R12, offsetof(struct pcb, pcb_regs.sf_r12)); ASSYM(PCB_SP, offsetof(struct pcb, pcb_regs.sf_sp)); ASSYM(PCB_LR, offsetof(struct pcb, pcb_regs.sf_lr)); ASSYM(PCB_PC, offsetof(struct pcb, pcb_regs.sf_pc)); -#if __ARM_ARCH >= 6 ASSYM(PCB_TPIDRURW, offsetof(struct pcb, pcb_regs.sf_tpidrurw)); -#endif ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb)); ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread)); ASSYM(M_LEN, offsetof(struct mbuf, m_len)); ASSYM(M_DATA, offsetof(struct mbuf, m_data)); ASSYM(M_NEXT, offsetof(struct mbuf, m_next)); ASSYM(IP_SRC, offsetof(struct ip, ip_src)); ASSYM(IP_DST, offsetof(struct ip, ip_dst)); -#if __ARM_ARCH < 6 -ASSYM(CF_CONTEXT_SWITCH, offsetof(struct cpu_functions, cf_context_switch)); -ASSYM(CF_DCACHE_WB_RANGE, offsetof(struct cpu_functions, cf_dcache_wb_range)); -ASSYM(CF_IDCACHE_WBINV_ALL, offsetof(struct cpu_functions, cf_idcache_wbinv_all)); -ASSYM(CF_L2CACHE_WBINV_ALL, offsetof(struct cpu_functions, cf_l2cache_wbinv_all)); -ASSYM(CF_TLB_FLUSHID_SE, offsetof(struct cpu_functions, cf_tlb_flushID_SE)); -#endif ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_MD, offsetof(struct thread, td_md)); ASSYM(TD_LOCK, offsetof(struct thread, td_lock)); -#if __ARM_ARCH < 6 -ASSYM(MD_TP, offsetof(struct mdthread, md_tp)); -ASSYM(MD_RAS_START, offsetof(struct mdthread, md_ras_start)); -ASSYM(MD_RAS_END, offsetof(struct mdthread, md_ras_end)); -#endif ASSYM(TF_SPSR, offsetof(struct trapframe, tf_spsr)); ASSYM(TF_R0, offsetof(struct trapframe, tf_r0)); ASSYM(TF_R1, offsetof(struct trapframe, tf_r1)); ASSYM(TF_PC, offsetof(struct trapframe, tf_pc)); ASSYM(P_PID, offsetof(struct proc, p_pid)); ASSYM(P_FLAG, offsetof(struct proc, p_flag)); ASSYM(SIGF_UC, offsetof(struct sigframe, sf_uc)); -#if __ARM_ARCH < 6 -ASSYM(ARM_TP_ADDRESS, ARM_TP_ADDRESS); -ASSYM(ARM_RAS_START, ARM_RAS_START); -ASSYM(ARM_RAS_END, ARM_RAS_END); -#endif - #ifdef VFP ASSYM(PCB_VFPSTATE, offsetof(struct pcb, pcb_vfpstate)); #endif -#if __ARM_ARCH >= 6 ASSYM(PC_CURPMAP, offsetof(struct pcpu, pc_curpmap)); ASSYM(PC_BP_HARDEN_KIND, offsetof(struct pcpu, pc_bp_harden_kind)); ASSYM(PCPU_BP_HARDEN_KIND_NONE, PCPU_BP_HARDEN_KIND_NONE); ASSYM(PCPU_BP_HARDEN_KIND_BPIALL, PCPU_BP_HARDEN_KIND_BPIALL); ASSYM(PCPU_BP_HARDEN_KIND_ICIALLU, PCPU_BP_HARDEN_KIND_ICIALLU); -#endif ASSYM(PAGE_SIZE, PAGE_SIZE); -#if __ARM_ARCH < 6 -ASSYM(PMAP_DOMAIN_KERNEL, PMAP_DOMAIN_KERNEL); -#endif #ifdef PMAP_INCLUDE_PTE_SYNC ASSYM(PMAP_INCLUDE_PTE_SYNC, 1); #endif ASSYM(TDF_ASTPENDING, TDF_ASTPENDING); ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED); ASSYM(MAXCOMLEN, MAXCOMLEN); ASSYM(MAXCPU, MAXCPU); ASSYM(_NCPUWORDS, _NCPUWORDS); ASSYM(NIRQ, NIRQ); ASSYM(PCPU_SIZE, sizeof(struct pcpu)); ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace)); ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap)); ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active)); ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid)); ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS); ASSYM(DCACHE_LINE_SIZE, offsetof(struct cpuinfo, dcache_line_size)); ASSYM(DCACHE_LINE_MASK, offsetof(struct cpuinfo, dcache_line_mask)); ASSYM(ICACHE_LINE_SIZE, offsetof(struct cpuinfo, icache_line_size)); ASSYM(ICACHE_LINE_MASK, offsetof(struct cpuinfo, icache_line_mask)); /* * Emit the LOCORE_MAP_MB option as a #define only if the option was set. */ #include "opt_locore.h" #ifdef LOCORE_MAP_MB ASSYM(LOCORE_MAP_MB, LOCORE_MAP_MB); #endif Index: head/sys/arm/arm/machdep.c =================================================================== --- head/sys/arm/arm/machdep.c (revision 368140) +++ head/sys/arm/arm/machdep.c (revision 368141) @@ -1,1308 +1,955 @@ /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2004 Olivier Houchard * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * for the NetBSD Project. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Machine dependent functions for kernel setup * * Created : 17/09/94 * Updated : 18/04/01 updated for new wscons */ #include "opt_ddb.h" #include "opt_kstack_pages.h" #include "opt_platform.h" #include "opt_sched.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #endif #ifdef DEBUG #define debugf(fmt, args...) printf(fmt, ##args) #else #define debugf(fmt, args...) #endif #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) || \ defined(COMPAT_FREEBSD9) #error FreeBSD/arm doesn't provide compatibility with releases prior to 10 #endif -#if __ARM_ARCH >= 6 && !defined(INTRNG) -#error armv6 requires INTRNG -#endif #ifndef _ARM_ARCH_5E #error FreeBSD requires ARMv5 or later #endif struct pcpu __pcpu[MAXCPU]; struct pcpu *pcpup = &__pcpu[0]; static struct trapframe proc0_tf; uint32_t cpu_reset_address = 0; int cold = 1; vm_offset_t vector_page; /* The address at which the kernel was loaded. Set early in initarm(). */ vm_paddr_t arm_physmem_kernaddr; int (*_arm_memcpy)(void *, void *, int, int) = NULL; int (*_arm_bzero)(void *, int, int) = NULL; int _min_memcpy_size = 0; int _min_bzero_size = 0; extern int *end; #ifdef FDT vm_paddr_t pmap_pa; -#if __ARM_ARCH >= 6 vm_offset_t systempage; vm_offset_t irqstack; vm_offset_t undstack; vm_offset_t abtstack; -#else -/* - * This is the number of L2 page tables required for covering max - * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf, - * stacks etc.), uprounded to be divisible by 4. - */ -#define KERNEL_PT_MAX 78 -static struct pv_addr kernel_pt_table[KERNEL_PT_MAX]; -struct pv_addr systempage; -static struct pv_addr msgbufpv; -struct pv_addr irqstack; -struct pv_addr undstack; -struct pv_addr abtstack; -static struct pv_addr kernelstack; -#endif /* __ARM_ARCH >= 6 */ #endif /* FDT */ #ifdef PLATFORM static delay_func *delay_impl; static void *delay_arg; #endif struct kva_md_info kmi; /* * arm32_vector_init: * * Initialize the vector page, and select whether or not to * relocate the vectors. * * NOTE: We expect the vector page to be mapped at its expected * destination. */ extern unsigned int page0[], page0_data[]; void arm_vector_init(vm_offset_t va, int which) { unsigned int *vectors = (int *) va; unsigned int *vectors_data = vectors + (page0_data - page0); int vec; /* * Loop through the vectors we're taking over, and copy the * vector's insn and data word. */ for (vec = 0; vec < ARM_NVEC; vec++) { if ((which & (1 << vec)) == 0) { /* Don't want to take over this vector. */ continue; } vectors[vec] = page0[vec]; vectors_data[vec] = page0_data[vec]; } /* Now sync the vectors. */ icache_sync(va, (ARM_NVEC * 2) * sizeof(u_int)); vector_page = va; -#if __ARM_ARCH < 6 - if (va == ARM_VECTORS_HIGH) { - /* - * Enable high vectors in the system control reg (SCTLR). - * - * Assume the MD caller knows what it's doing here, and really - * does want the vector page relocated. - * - * Note: This has to be done here (and not just in - * cpu_setup()) because the vector page needs to be - * accessible *before* cpu_startup() is called. - * Think ddb(9) ... - */ - cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC); - } -#endif } static void cpu_startup(void *dummy) { struct pcb *pcb = thread0.td_pcb; const unsigned int mbyte = 1024 * 1024; -#if __ARM_ARCH < 6 && !defined(ARM_CACHE_LOCK_ENABLE) - vm_page_t m; -#endif identify_arm_cpu(); vm_ksubmap_init(&kmi); /* * Display the RAM layout. */ printf("real memory = %ju (%ju MB)\n", (uintmax_t)arm32_ptob(realmem), (uintmax_t)arm32_ptob(realmem) / mbyte); printf("avail memory = %ju (%ju MB)\n", (uintmax_t)arm32_ptob(vm_free_count()), (uintmax_t)arm32_ptob(vm_free_count()) / mbyte); if (bootverbose) { physmem_print_tables(); devmap_print_table(); } bufinit(); vm_pager_bufferinit(); pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack + USPACE_SVC_STACK_TOP; pmap_set_pcb_pagedir(kernel_pmap, pcb); -#if __ARM_ARCH < 6 - vector_page_setprot(VM_PROT_READ); - pmap_postinit(); -#ifdef ARM_CACHE_LOCK_ENABLE - pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS); - arm_lock_cache_line(ARM_TP_ADDRESS); -#else - m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO); - pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m)); -#endif - *(uint32_t *)ARM_RAS_START = 0; - *(uint32_t *)ARM_RAS_END = 0xffffffff; -#endif } SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { dcache_wb_poc((vm_offset_t)ptr, (vm_paddr_t)vtophys(ptr), len); } /* Get current clock frequency for the given cpu id. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { -#if __ARM_ARCH >= 6 struct pcpu *pc; pc = pcpu_find(cpu_id); if (pc == NULL || rate == NULL) return (EINVAL); if (pc->pc_clock == 0) return (EOPNOTSUPP); *rate = pc->pc_clock; return (0); -#else - return (ENXIO); -#endif } void cpu_idle(int busy) { CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu); spinlock_enter(); if (!busy) cpu_idleclock(); if (!sched_runnable()) cpu_sleep(0); if (!busy) cpu_activeclock(); spinlock_exit(); CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu); } int cpu_idle_wakeup(int cpu) { return (0); } void cpu_initclocks(void) { #ifdef SMP if (PCPU_GET(cpuid) == 0) cpu_initclocks_bsp(); else cpu_initclocks_ap(); #else cpu_initclocks_bsp(); #endif } #ifdef PLATFORM void arm_set_delay(delay_func *impl, void *arg) { KASSERT(impl != NULL, ("No DELAY implementation")); delay_impl = impl; delay_arg = arg; } void DELAY(int usec) { TSENTER(); delay_impl(usec, delay_arg); TSEXIT(); } #endif void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { } void spinlock_enter(void) { struct thread *td; register_t cspr; td = curthread; if (td->td_md.md_spinlock_count == 0) { cspr = disable_interrupts(PSR_I | PSR_F); td->td_md.md_spinlock_count = 1; td->td_md.md_saved_cspr = cspr; critical_enter(); } else td->td_md.md_spinlock_count++; } void spinlock_exit(void) { struct thread *td; register_t cspr; td = curthread; cspr = td->td_md.md_saved_cspr; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) { critical_exit(); restore_interrupts(cspr); } } /* * Clear registers on exec */ void exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack) { struct trapframe *tf = td->td_frame; memset(tf, 0, sizeof(*tf)); tf->tf_usr_sp = stack; tf->tf_usr_lr = imgp->entry_addr; tf->tf_svc_lr = 0x77777777; tf->tf_pc = imgp->entry_addr; tf->tf_spsr = PSR_USR32_MODE; } #ifdef VFP /* * Get machine VFP context. */ void get_vfpcontext(struct thread *td, mcontext_vfp_t *vfp) { struct pcb *pcb; pcb = td->td_pcb; if (td == curthread) { critical_enter(); vfp_store(&pcb->pcb_vfpstate, false); critical_exit(); } else MPASS(TD_IS_SUSPENDED(td)); memcpy(vfp->mcv_reg, pcb->pcb_vfpstate.reg, sizeof(vfp->mcv_reg)); vfp->mcv_fpscr = pcb->pcb_vfpstate.fpscr; } /* * Set machine VFP context. */ void set_vfpcontext(struct thread *td, mcontext_vfp_t *vfp) { struct pcb *pcb; pcb = td->td_pcb; if (td == curthread) { critical_enter(); vfp_discard(td); critical_exit(); } else MPASS(TD_IS_SUSPENDED(td)); memcpy(pcb->pcb_vfpstate.reg, vfp->mcv_reg, sizeof(pcb->pcb_vfpstate.reg)); pcb->pcb_vfpstate.fpscr = vfp->mcv_fpscr; } #endif int arm_get_vfpstate(struct thread *td, void *args) { int rv; struct arm_get_vfpstate_args ua; mcontext_vfp_t mcontext_vfp; rv = copyin(args, &ua, sizeof(ua)); if (rv != 0) return (rv); if (ua.mc_vfp_size != sizeof(mcontext_vfp_t)) return (EINVAL); #ifdef VFP get_vfpcontext(td, &mcontext_vfp); #else bzero(&mcontext_vfp, sizeof(mcontext_vfp)); #endif rv = copyout(&mcontext_vfp, ua.mc_vfp, sizeof(mcontext_vfp)); if (rv != 0) return (rv); return (0); } /* * Get machine context. */ int get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) { struct trapframe *tf = td->td_frame; __greg_t *gr = mcp->__gregs; if (clear_ret & GET_MC_CLEAR_RET) { gr[_REG_R0] = 0; gr[_REG_CPSR] = tf->tf_spsr & ~PSR_C; } else { gr[_REG_R0] = tf->tf_r0; gr[_REG_CPSR] = tf->tf_spsr; } gr[_REG_R1] = tf->tf_r1; gr[_REG_R2] = tf->tf_r2; gr[_REG_R3] = tf->tf_r3; gr[_REG_R4] = tf->tf_r4; gr[_REG_R5] = tf->tf_r5; gr[_REG_R6] = tf->tf_r6; gr[_REG_R7] = tf->tf_r7; gr[_REG_R8] = tf->tf_r8; gr[_REG_R9] = tf->tf_r9; gr[_REG_R10] = tf->tf_r10; gr[_REG_R11] = tf->tf_r11; gr[_REG_R12] = tf->tf_r12; gr[_REG_SP] = tf->tf_usr_sp; gr[_REG_LR] = tf->tf_usr_lr; gr[_REG_PC] = tf->tf_pc; mcp->mc_vfp_size = 0; mcp->mc_vfp_ptr = NULL; memset(&mcp->mc_spare, 0, sizeof(mcp->mc_spare)); return (0); } /* * Set machine context. * * However, we don't set any but the user modifiable flags, and we won't * touch the cs selector. */ int set_mcontext(struct thread *td, mcontext_t *mcp) { mcontext_vfp_t mc_vfp, *vfp; struct trapframe *tf = td->td_frame; const __greg_t *gr = mcp->__gregs; int spsr; /* * Make sure the processor mode has not been tampered with and * interrupts have not been disabled. */ spsr = gr[_REG_CPSR]; if ((spsr & PSR_MODE) != PSR_USR32_MODE || (spsr & (PSR_I | PSR_F)) != 0) return (EINVAL); #ifdef WITNESS if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_size != sizeof(mc_vfp)) { printf("%s: %s: Malformed mc_vfp_size: %d (0x%08X)\n", td->td_proc->p_comm, __func__, mcp->mc_vfp_size, mcp->mc_vfp_size); } else if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_ptr == NULL) { printf("%s: %s: c_vfp_size != 0 but mc_vfp_ptr == NULL\n", td->td_proc->p_comm, __func__); } #endif if (mcp->mc_vfp_size == sizeof(mc_vfp) && mcp->mc_vfp_ptr != NULL) { if (copyin(mcp->mc_vfp_ptr, &mc_vfp, sizeof(mc_vfp)) != 0) return (EFAULT); vfp = &mc_vfp; } else { vfp = NULL; } tf->tf_r0 = gr[_REG_R0]; tf->tf_r1 = gr[_REG_R1]; tf->tf_r2 = gr[_REG_R2]; tf->tf_r3 = gr[_REG_R3]; tf->tf_r4 = gr[_REG_R4]; tf->tf_r5 = gr[_REG_R5]; tf->tf_r6 = gr[_REG_R6]; tf->tf_r7 = gr[_REG_R7]; tf->tf_r8 = gr[_REG_R8]; tf->tf_r9 = gr[_REG_R9]; tf->tf_r10 = gr[_REG_R10]; tf->tf_r11 = gr[_REG_R11]; tf->tf_r12 = gr[_REG_R12]; tf->tf_usr_sp = gr[_REG_SP]; tf->tf_usr_lr = gr[_REG_LR]; tf->tf_pc = gr[_REG_PC]; tf->tf_spsr = gr[_REG_CPSR]; #ifdef VFP if (vfp != NULL) set_vfpcontext(td, vfp); #endif return (0); } void sendsig(catcher, ksi, mask) sig_t catcher; ksiginfo_t *ksi; sigset_t *mask; { struct thread *td; struct proc *p; struct trapframe *tf; struct sigframe *fp, frame; struct sigacts *psp; struct sysentvec *sysent; int onstack; int sig; int code; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; code = ksi->ksi_code; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_usr_sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else fp = (struct sigframe *)td->td_frame->tf_usr_sp; /* make room on the stack */ fp--; /* make the stack aligned */ fp = (struct sigframe *)STACKALIGN(fp); /* Populate the siginfo frame. */ bzero(&frame, sizeof(frame)); get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); #ifdef VFP get_vfpcontext(td, &frame.sf_vfp); frame.sf_uc.uc_mcontext.mc_vfp_size = sizeof(fp->sf_vfp); frame.sf_uc.uc_mcontext.mc_vfp_ptr = &fp->sf_vfp; #else frame.sf_uc.uc_mcontext.mc_vfp_size = 0; frame.sf_uc.uc_mcontext.mc_vfp_ptr = NULL; #endif frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack = td->td_sigstk; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) != 0 ? (onstack ? SS_ONSTACK : 0) : SS_DISABLE; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } /* * Build context to run handler in. We invoke the handler * directly, only returning via the trampoline. Note the * trampoline version numbers are coordinated with machine- * dependent code in libc. */ tf->tf_r0 = sig; tf->tf_r1 = (register_t)&fp->sf_si; tf->tf_r2 = (register_t)&fp->sf_uc; /* the trampoline uses r5 as the uc address */ tf->tf_r5 = (register_t)&fp->sf_uc; tf->tf_pc = (register_t)catcher; tf->tf_usr_sp = (register_t)fp; sysent = p->p_sysent; if (sysent->sv_sigcode_base != 0) tf->tf_usr_lr = (register_t)sysent->sv_sigcode_base; else tf->tf_usr_lr = (register_t)(sysent->sv_psstrings - *(sysent->sv_szsigcode)); /* Set the mode to enter in the signal handler */ #if __ARM_ARCH >= 7 if ((register_t)catcher & 1) tf->tf_spsr |= PSR_T; else tf->tf_spsr &= ~PSR_T; #endif CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr, tf->tf_usr_sp); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } int sys_sigreturn(td, uap) struct thread *td; struct sigreturn_args /* { const struct __ucontext *sigcntxp; } */ *uap; { ucontext_t uc; int error; if (uap == NULL) return (EFAULT); if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); /* Restore register context. */ error = set_mcontext(td, &uc.uc_mcontext); if (error != 0) return (error); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { pcb->pcb_regs.sf_r4 = tf->tf_r4; pcb->pcb_regs.sf_r5 = tf->tf_r5; pcb->pcb_regs.sf_r6 = tf->tf_r6; pcb->pcb_regs.sf_r7 = tf->tf_r7; pcb->pcb_regs.sf_r8 = tf->tf_r8; pcb->pcb_regs.sf_r9 = tf->tf_r9; pcb->pcb_regs.sf_r10 = tf->tf_r10; pcb->pcb_regs.sf_r11 = tf->tf_r11; pcb->pcb_regs.sf_r12 = tf->tf_r12; pcb->pcb_regs.sf_pc = tf->tf_pc; pcb->pcb_regs.sf_lr = tf->tf_usr_lr; pcb->pcb_regs.sf_sp = tf->tf_usr_sp; } void pcpu0_init(void) { -#if __ARM_ARCH >= 6 set_curthread(&thread0); -#endif pcpu_init(pcpup, 0, sizeof(struct pcpu)); PCPU_SET(curthread, &thread0); } /* * Initialize proc0 */ void init_proc0(vm_offset_t kstack) { proc_linkup0(&proc0, &thread0); thread0.td_kstack = kstack; thread0.td_kstack_pages = kstack_pages; thread0.td_pcb = (struct pcb *)(thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE) - 1; thread0.td_pcb->pcb_flags = 0; thread0.td_pcb->pcb_vfpcpu = -1; thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN; thread0.td_frame = &proc0_tf; pcpup->pc_curpcb = thread0.td_pcb; } -#if __ARM_ARCH >= 6 void set_stackptrs(int cpu) { set_stackptr(PSR_IRQ32_MODE, irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_ABT32_MODE, abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_UND32_MODE, undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); } -#else -void -set_stackptrs(int cpu) -{ - set_stackptr(PSR_IRQ32_MODE, - irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); - set_stackptr(PSR_ABT32_MODE, - abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); - set_stackptr(PSR_UND32_MODE, - undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); -} -#endif - static void arm_kdb_init(void) { kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif } #ifdef FDT -#if __ARM_ARCH < 6 void * initarm(struct arm_boot_params *abp) { struct mem_region mem_regions[FDT_MEM_REGIONS]; - struct pv_addr kernel_l1pt; - struct pv_addr dpcpu; - vm_offset_t dtbp, freemempos, l2_start, lastaddr; - uint64_t memsize; - uint32_t l2size; - char *env; - void *kmdp; - u_int l1pagetable; - int i, j, err_devmap, mem_regions_sz; - - lastaddr = parse_boot_param(abp); - arm_physmem_kernaddr = abp->abp_physaddr; - - memsize = 0; - - cpuinfo_init(); - set_cpufuncs(); - - /* - * Find the dtb passed in by the boot loader. - */ - kmdp = preload_search_by_type("elf kernel"); - if (kmdp != NULL) - dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); - else - dtbp = (vm_offset_t)NULL; - -#if defined(FDT_DTB_STATIC) - /* - * In case the device tree blob was not retrieved (from metadata) try - * to use the statically embedded one. - */ - if (dtbp == (vm_offset_t)NULL) - dtbp = (vm_offset_t)&fdt_static_dtb; -#endif - - if (OF_install(OFW_FDT, 0) == FALSE) - panic("Cannot install FDT"); - - if (OF_init((void *)dtbp) != 0) - panic("OF_init failed with the found device tree"); - - /* Grab physical memory regions information from device tree. */ - if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0) - panic("Cannot get physical memory regions"); - physmem_hardware_regions(mem_regions, mem_regions_sz); - - /* Grab reserved memory regions information from device tree. */ - if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0) - physmem_exclude_regions(mem_regions, mem_regions_sz, - EXFLAG_NODUMP | EXFLAG_NOALLOC); - - /* Platform-specific initialisation */ - platform_probe_and_attach(); - - pcpu0_init(); - - /* Do basic tuning, hz etc */ - init_param1(); - - /* Calculate number of L2 tables needed for mapping vm_page_array */ - l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page); - l2size = (l2size >> L1_S_SHIFT) + 1; - - /* - * Add one table for end of kernel map, one for stacks, msgbuf and - * L1 and L2 tables map, one for vectors map and two for - * l2 structures from pmap_bootstrap. - */ - l2size += 5; - - /* Make it divisible by 4 */ - l2size = (l2size + 3) & ~3; - - freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK; - - /* Define a macro to simplify memory allocation */ -#define valloc_pages(var, np) \ - alloc_pages((var).pv_va, (np)); \ - (var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR); - -#define alloc_pages(var, np) \ - (var) = freemempos; \ - freemempos += (np * PAGE_SIZE); \ - memset((char *)(var), 0, ((np) * PAGE_SIZE)); - - while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) - freemempos += PAGE_SIZE; - valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); - - for (i = 0, j = 0; i < l2size; ++i) { - if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { - valloc_pages(kernel_pt_table[i], - L2_TABLE_SIZE / PAGE_SIZE); - j = i; - } else { - kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va + - L2_TABLE_SIZE_REAL * (i - j); - kernel_pt_table[i].pv_pa = - kernel_pt_table[i].pv_va - KERNVIRTADDR + - abp->abp_physaddr; - } - } - /* - * Allocate a page for the system page mapped to 0x00000000 - * or 0xffff0000. This page will just contain the system vectors - * and can be shared by all processes. - */ - valloc_pages(systempage, 1); - - /* Allocate dynamic per-cpu area. */ - valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); - dpcpu_init((void *)dpcpu.pv_va, 0); - - /* Allocate stacks for all modes */ - valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU); - valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU); - valloc_pages(undstack, UND_STACK_SIZE * MAXCPU); - valloc_pages(kernelstack, kstack_pages); - valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); - - /* - * Now we start construction of the L1 page table - * We start by mapping the L2 page tables into the L1. - * This means that we can replace L1 mappings later on if necessary - */ - l1pagetable = kernel_l1pt.pv_va; - - /* - * Try to map as much as possible of kernel text and data using - * 1MB section mapping and for the rest of initial kernel address - * space use L2 coarse tables. - * - * Link L2 tables for mapping remainder of kernel (modulo 1MB) - * and kernel structures - */ - l2_start = lastaddr & ~(L1_S_OFFSET); - for (i = 0 ; i < l2size - 1; i++) - pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE, - &kernel_pt_table[i]); - - pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE; - - /* Map kernel code and data */ - pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr, - (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK, - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); - - /* Map L1 directory and allocated L2 page tables */ - pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, - L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); - - pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va, - kernel_pt_table[0].pv_pa, - L2_TABLE_SIZE_REAL * l2size, - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); - - /* Map allocated DPCPU, stacks and msgbuf */ - pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, - freemempos - dpcpu.pv_va, - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); - - /* Link and map the vector page */ - pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH, - &kernel_pt_table[l2size - 1]); - pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, - VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE); - - /* Establish static device mappings. */ - err_devmap = platform_devmap_init(); - devmap_bootstrap(l1pagetable, NULL); - vm_max_kernel_address = platform_lastaddr(); - - cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT); - pmap_pa = kernel_l1pt.pv_pa; - cpu_setttb(kernel_l1pt.pv_pa); - cpu_tlb_flushID(); - cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)); - - /* - * Now that proper page tables are installed, call cpu_setup() to enable - * instruction and data caches and other chip-specific features. - */ - cpu_setup(); - - /* - * Only after the SOC registers block is mapped we can perform device - * tree fixups, as they may attempt to read parameters from hardware. - */ - OF_interpret("perform-fixup", 0); - - platform_gpio_init(); - - cninit(); - - debugf("initarm: console initialized\n"); - debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); - debugf(" boothowto = 0x%08x\n", boothowto); - debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); - arm_print_kenv(); - - /* - * Dump the boot metadata. We have to wait for cninit() since console - * output is required. If it's grossly incorrect the kernel will never - * make it this far. - */ - if (getenv_is_true("debug.dump_modinfo_at_boot")) - preload_dump(); - - env = kern_getenv("kernelname"); - if (env != NULL) { - strlcpy(kernelname, env, sizeof(kernelname)); - freeenv(env); - } - - if (err_devmap != 0) - printf("WARNING: could not fully configure devmap, error=%d\n", - err_devmap); - - platform_late_init(); - - /* - * Pages were allocated during the secondary bootstrap for the - * stacks for different CPU modes. - * We must now set the r13 registers in the different CPU modes to - * point to these stacks. - * Since the ARM stacks use STMFD etc. we must set r13 to the top end - * of the stack memory. - */ - cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE); - - set_stackptrs(0); - - /* - * We must now clean the cache again.... - * Cleaning may be done by reading new data to displace any - * dirty data in the cache. This will have happened in cpu_setttb() - * but since we are boot strapping the addresses used for the read - * may have just been remapped and thus the cache could be out - * of sync. A re-clean after the switch will cure this. - * After booting there are no gross relocations of the kernel thus - * this problem will not occur after initarm(). - */ - cpu_idcache_wbinv_all(); - - undefined_init(); - - init_proc0(kernelstack.pv_va); - - arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); - pmap_bootstrap(freemempos, &kernel_l1pt); - msgbufp = (void *)msgbufpv.pv_va; - msgbufinit(msgbufp, msgbufsize); - mutex_init(); - - /* - * Exclude the kernel (and all the things we allocated which immediately - * follow the kernel) from the VM allocation pool but not from crash - * dumps. virtual_avail is a global variable which tracks the kva we've - * "allocated" while setting up pmaps. - * - * Prepare the list of physical memory available to the vm subsystem. - */ - physmem_exclude_region(abp->abp_physaddr, - (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC); - physmem_init_kernel_globals(); - - init_param2(physmem); - dbg_monitor_init(); - arm_kdb_init(); - - return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - - sizeof(struct pcb))); -} -#else /* __ARM_ARCH < 6 */ -void * -initarm(struct arm_boot_params *abp) -{ - struct mem_region mem_regions[FDT_MEM_REGIONS]; vm_paddr_t lastaddr; vm_offset_t dtbp, kernelstack, dpcpu; char *env; void *kmdp; int err_devmap, mem_regions_sz; phandle_t root; char dts_version[255]; #ifdef EFI struct efi_map_header *efihdr; #endif /* get last allocated physical address */ arm_physmem_kernaddr = abp->abp_physaddr; lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr; set_cpufuncs(); cpuinfo_init(); /* * Find the dtb passed in by the boot loader. */ kmdp = preload_search_by_type("elf kernel"); dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); #if defined(FDT_DTB_STATIC) /* * In case the device tree blob was not retrieved (from metadata) try * to use the statically embedded one. */ if (dtbp == (vm_offset_t)NULL) dtbp = (vm_offset_t)&fdt_static_dtb; #endif if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); #if defined(LINUX_BOOT_ABI) arm_parse_fdt_bootargs(); #endif #ifdef EFI efihdr = (struct efi_map_header *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_MAP); if (efihdr != NULL) { arm_add_efi_map_entries(efihdr, mem_regions, &mem_regions_sz); } else #endif { /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,NULL) != 0) panic("Cannot get physical memory regions"); } physmem_hardware_regions(mem_regions, mem_regions_sz); /* Grab reserved memory regions information from device tree. */ if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0) physmem_exclude_regions(mem_regions, mem_regions_sz, EXFLAG_NODUMP | EXFLAG_NOALLOC); /* * Set TEX remapping registers. * Setup kernel page tables and switch to kernel L1 page table. */ pmap_set_tex(); pmap_bootstrap_prepare(lastaddr); /* * If EARLY_PRINTF support is enabled, we need to re-establish the * mapping after pmap_bootstrap_prepare() switches to new page tables. * Note that we can only do the remapping if the VA is outside the * kernel, now that we have real virtual (not VA=PA) mappings in effect. * Early printf does not work between the time pmap_set_tex() does * cp15_prrr_set() and this code remaps the VA. */ #if defined(EARLY_PRINTF) && defined(SOCDEV_PA) && defined(SOCDEV_VA) && SOCDEV_VA < KERNBASE pmap_preboot_map_attr(SOCDEV_PA, SOCDEV_VA, 1024 * 1024, VM_PROT_READ | VM_PROT_WRITE, VM_MEMATTR_DEVICE); #endif /* * Now that proper page tables are installed, call cpu_setup() to enable * instruction and data caches and other chip-specific features. */ cpu_setup(); /* Platform-specific initialisation */ platform_probe_and_attach(); pcpu0_init(); /* Do basic tuning, hz etc */ init_param1(); /* * Allocate a page for the system page mapped to 0xffff0000 * This page will just contain the system vectors and can be * shared by all processes. */ systempage = pmap_preboot_get_pages(1); /* Map the vector page. */ pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH, 1); if (virtual_end >= ARM_VECTORS_HIGH) virtual_end = ARM_VECTORS_HIGH - 1; /* Allocate dynamic per-cpu area. */ dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu, 0); /* Allocate stacks for all modes */ irqstack = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU); abtstack = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU); undstack = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU ); kernelstack = pmap_preboot_get_vpages(kstack_pages); /* Allocate message buffer. */ msgbufp = (void *)pmap_preboot_get_vpages( round_page(msgbufsize) / PAGE_SIZE); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ set_stackptrs(0); mutex_init(); /* Establish static device mappings. */ err_devmap = platform_devmap_init(); devmap_bootstrap(0, NULL); vm_max_kernel_address = platform_lastaddr(); /* * Only after the SOC registers block is mapped we can perform device * tree fixups, as they may attempt to read parameters from hardware. */ OF_interpret("perform-fixup", 0); platform_gpio_init(); cninit(); /* * If we made a mapping for EARLY_PRINTF after pmap_bootstrap_prepare(), * undo it now that the normal console printf works. */ #if defined(EARLY_PRINTF) && defined(SOCDEV_PA) && defined(SOCDEV_VA) && SOCDEV_VA < KERNBASE pmap_kremove(SOCDEV_VA); #endif debugf("initarm: console initialized\n"); debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); debugf(" boothowto = 0x%08x\n", boothowto); debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); debugf(" lastaddr1: 0x%08x\n", lastaddr); arm_print_kenv(); env = kern_getenv("kernelname"); if (env != NULL) strlcpy(kernelname, env, sizeof(kernelname)); if (err_devmap != 0) printf("WARNING: could not fully configure devmap, error=%d\n", err_devmap); platform_late_init(); root = OF_finddevice("/"); if (OF_getprop(root, "freebsd,dts-version", dts_version, sizeof(dts_version)) > 0) { if (strcmp(LINUX_DTS_VERSION, dts_version) != 0) printf("WARNING: DTB version is %s while kernel expects %s, " "please update the DTB in the ESP\n", dts_version, LINUX_DTS_VERSION); } else { printf("WARNING: Cannot find freebsd,dts-version property, " "cannot check DTB compliance\n"); } /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in cpu_setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross relocations of the kernel thus * this problem will not occur after initarm(). */ /* Set stack for exception handlers */ undefined_init(); init_proc0(kernelstack); arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); enable_interrupts(PSR_A); pmap_bootstrap(0); /* Exclude the kernel (and all the things we allocated which immediately * follow the kernel) from the VM allocation pool but not from crash * dumps. virtual_avail is a global variable which tracks the kva we've * "allocated" while setting up pmaps. * * Prepare the list of physical memory available to the vm subsystem. */ physmem_exclude_region(abp->abp_physaddr, pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC); physmem_init_kernel_globals(); init_param2(physmem); /* Init message buffer. */ msgbufinit(msgbufp, msgbufsize); dbg_monitor_init(); arm_kdb_init(); /* Apply possible BP hardening. */ cpuinfo_init_bp_hardening(); return ((void *)STACKALIGN(thread0.td_pcb)); } - -#endif /* __ARM_ARCH < 6 */ #endif /* FDT */ Index: head/sys/arm/arm/machdep_intr.c =================================================================== --- head/sys/arm/arm/machdep_intr.c (revision 368140) +++ head/sys/arm/arm/machdep_intr.c (revision 368141) @@ -1,232 +1,228 @@ /*- * Copyright (c) 2015-2016 Svatopluk Kraus * Copyright (c) 2015-2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#ifdef INTRNG #include "pic_if.h" #ifdef SMP #define INTR_IPI_NAMELEN (MAXCOMLEN + 1) struct intr_ipi { intr_ipi_handler_t * ii_handler; void * ii_handler_arg; intr_ipi_send_t * ii_send; void * ii_send_arg; char ii_name[INTR_IPI_NAMELEN]; u_long * ii_count; }; static struct intr_ipi ipi_sources[INTR_IPI_COUNT]; #endif -#endif /* * arm_irq_memory_barrier() * * Ensure all writes to device memory have reached devices before proceeding. * * This is intended to be called from the post-filter and post-thread routines * of an interrupt controller implementation. A peripheral device driver should * use bus_space_barrier() if it needs to ensure a write has reached the * hardware for some reason other than clearing interrupt conditions. * * The need for this function arises from the ARM weak memory ordering model. * Writes to locations mapped with the Device attribute bypass any caches, but * are buffered. Multiple writes to the same device will be observed by that * device in the order issued by the cpu. Writes to different devices may * appear at those devices in a different order than issued by the cpu. That * is, if the cpu writes to device A then device B, the write to device B could * complete before the write to device A. * * Consider a typical device interrupt handler which services the interrupt and * writes to a device status-acknowledge register to clear the interrupt before * returning. That write is posted to the L2 controller which "immediately" * places it in a store buffer and automatically drains that buffer. This can * be less immediate than you'd think... There may be no free slots in the store * buffers, so an existing buffer has to be drained first to make room. The * target bus may be busy with other traffic (such as DMA for various devices), * delaying the drain of the store buffer for some indeterminate time. While * all this delay is happening, execution proceeds on the CPU, unwinding its way * out of the interrupt call stack to the point where the interrupt driver code * is ready to EOI and unmask the interrupt. The interrupt controller may be * accessed via a faster bus than the hardware whose handler just ran; the write * to unmask and EOI the interrupt may complete quickly while the device write * to ack and clear the interrupt source is still lingering in a store buffer * waiting for access to a slower bus. With the interrupt unmasked at the * interrupt controller but still active at the device, as soon as interrupts * are enabled on the core the device re-interrupts immediately: now you've got * a spurious interrupt on your hands. * * The right way to fix this problem is for every device driver to use the * proper bus_space_barrier() calls in its interrupt handler. For ARM a single * barrier call at the end of the handler would work. This would have to be * done to every driver in the system, not just arm-specific drivers. * * Another potential fix is to map all device memory as Strongly-Ordered rather * than Device memory, which takes the store buffers out of the picture. This * has a pretty big impact on overall system performance, because each strongly * ordered memory access causes all L2 store buffers to be drained. * * A compromise solution is to have the interrupt controller implementation call * this function to establish a barrier between writes to the interrupt-source * device and writes to the interrupt controller device. * * This takes the interrupt number as an argument, and currently doesn't use it. * The plan is that maybe some day there is a way to flag certain interrupts as * "memory barrier safe" and we can avoid this overhead with them. */ void arm_irq_memory_barrier(uintptr_t irq) { dsb(); cpu_l2cache_drain_writebuf(); } -#ifdef INTRNG #ifdef SMP static inline struct intr_ipi * intr_ipi_lookup(u_int ipi) { if (ipi >= INTR_IPI_COUNT) panic("%s: no such IPI %u", __func__, ipi); return (&ipi_sources[ipi]); } void intr_ipi_dispatch(u_int ipi, struct trapframe *tf) { void *arg; struct intr_ipi *ii; ii = intr_ipi_lookup(ipi); if (ii->ii_count == NULL) panic("%s: not setup IPI %u", __func__, ipi); intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid)); /* * Supply ipi filter with trapframe argument * if none is registered. */ arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf; ii->ii_handler(arg); } void intr_ipi_send(cpuset_t cpus, u_int ipi) { struct intr_ipi *ii; ii = intr_ipi_lookup(ipi); if (ii->ii_count == NULL) panic("%s: not setup IPI %u", __func__, ipi); ii->ii_send(ii->ii_send_arg, cpus, ipi); } void intr_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand, void *h_arg, intr_ipi_send_t *send, void *s_arg) { struct intr_ipi *ii; ii = intr_ipi_lookup(ipi); KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi)); KASSERT(send != NULL, ("%s: ipi %u no sender", __func__, ipi)); KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi)); ii->ii_handler = hand; ii->ii_handler_arg = h_arg; ii->ii_send = send; ii->ii_send_arg = s_arg; strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN); ii->ii_count = intr_ipi_setup_counters(name); } /* * Send IPI thru interrupt controller. */ static void pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi) { KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi); } /* * Setup IPI handler on interrupt controller. * * Not SMP coherent. */ int intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand, void *arg) { int error; struct intr_irqsrc *isrc; KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc); if (error != 0) return (error); isrc->isrc_handlers++; intr_ipi_setup(ipi, name, hand, arg, pic_ipi_send, isrc); return (0); } -#endif #endif Index: head/sys/arm/arm/machdep_kdb.c =================================================================== --- head/sys/arm/arm/machdep_kdb.c (revision 368140) +++ head/sys/arm/arm/machdep_kdb.c (revision 368141) @@ -1,147 +1,144 @@ /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */ /*- * Copyright (c) 2004 Olivier Houchard * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_ddb.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #ifdef DDB #include -#if __ARM_ARCH >= 6 - DB_SHOW_COMMAND(cp15, db_show_cp15) { u_int reg; reg = cp15_midr_get(); db_printf("Cpu ID: 0x%08x\n", reg); reg = cp15_ctr_get(); db_printf("Current Cache Lvl ID: 0x%08x\n",reg); reg = cp15_sctlr_get(); db_printf("Ctrl: 0x%08x\n",reg); reg = cp15_actlr_get(); db_printf("Aux Ctrl: 0x%08x\n",reg); reg = cp15_id_pfr0_get(); db_printf("Processor Feat 0: 0x%08x\n", reg); reg = cp15_id_pfr1_get(); db_printf("Processor Feat 1: 0x%08x\n", reg); reg = cp15_id_dfr0_get(); db_printf("Debug Feat 0: 0x%08x\n", reg); reg = cp15_id_afr0_get(); db_printf("Auxiliary Feat 0: 0x%08x\n", reg); reg = cp15_id_mmfr0_get(); db_printf("Memory Model Feat 0: 0x%08x\n", reg); reg = cp15_id_mmfr1_get(); db_printf("Memory Model Feat 1: 0x%08x\n", reg); reg = cp15_id_mmfr2_get(); db_printf("Memory Model Feat 2: 0x%08x\n", reg); reg = cp15_id_mmfr3_get(); db_printf("Memory Model Feat 3: 0x%08x\n", reg); reg = cp15_ttbr_get(); db_printf("TTB0: 0x%08x\n", reg); } DB_SHOW_COMMAND(vtop, db_show_vtop) { u_int reg; if (have_addr) { cp15_ats1cpr_set(addr); reg = cp15_par_get(); db_printf("Physical address reg: 0x%08x\n",reg); } else db_printf("show vtop \n"); } -#endif /* __ARM_ARCH >= 6 */ #endif /* DDB */ int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *tf = td->td_frame; bcopy(&tf->tf_r0, regs->r, sizeof(regs->r)); regs->r_sp = tf->tf_usr_sp; regs->r_lr = tf->tf_usr_lr; regs->r_pc = tf->tf_pc; regs->r_cpsr = tf->tf_spsr; return (0); } int fill_fpregs(struct thread *td, struct fpreg *regs) { bzero(regs, sizeof(*regs)); return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *tf = td->td_frame; bcopy(regs->r, &tf->tf_r0, sizeof(regs->r)); tf->tf_usr_sp = regs->r_sp; tf->tf_usr_lr = regs->r_lr; tf->tf_pc = regs->r_pc; tf->tf_spsr &= ~PSR_FLAGS; tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS; return (0); } int set_fpregs(struct thread *td, struct fpreg *regs) { return (0); } int fill_dbregs(struct thread *td, struct dbreg *regs) { bzero(regs, sizeof(*regs)); return (0); } int set_dbregs(struct thread *td, struct dbreg *regs) { return (0); } Index: head/sys/arm/arm/mem.c =================================================================== --- head/sys/arm/arm/mem.c (revision 368140) +++ head/sys/arm/arm/mem.c (revision 368141) @@ -1,181 +1,179 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1988 University of Utah. * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department, and code derived from software contributed to * Berkeley by William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Utah $Hdr: mem.c 1.13 89/10/08$ * from: @(#)mem.c 7.2 (Berkeley) 5/9/91 */ #include __FBSDID("$FreeBSD$"); /* * Memory special file */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Used in /dev/mem drivers and elsewhere */ MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors"); struct mem_range_softc mem_range_softc; static struct sx tmppt_lock; SX_SYSINIT(tmppt, &tmppt_lock, "mem4map"); /* ARGSUSED */ int memrw(struct cdev *dev, struct uio *uio, int flags) { int o; u_int c = 0, v; struct iovec *iov; int error = 0; vm_offset_t addr, eaddr; while (uio->uio_resid > 0 && error == 0) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("memrw"); continue; } if (dev2unit(dev) == CDEV_MINOR_MEM) { int i; int address_valid = 0; v = uio->uio_offset; v &= ~PAGE_MASK; for (i = 0; dump_avail[i] || dump_avail[i + 1]; i += 2) { if (v >= dump_avail[i] && v < dump_avail[i + 1]) { address_valid = 1; break; } } if (!address_valid) return (EINVAL); sx_xlock(&tmppt_lock); pmap_kenter((vm_offset_t)_tmppt, v); -#if __ARM_ARCH >= 6 pmap_tlb_flush(kernel_pmap, (vm_offset_t)_tmppt); -#endif o = (int)uio->uio_offset & PAGE_MASK; c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK)); c = min(c, (u_int)(PAGE_SIZE - o)); c = min(c, (u_int)iov->iov_len); error = uiomove((caddr_t)&_tmppt[o], (int)c, uio); pmap_qremove((vm_offset_t)_tmppt, 1); sx_xunlock(&tmppt_lock); continue; } else if (dev2unit(dev) == CDEV_MINOR_KMEM) { c = iov->iov_len; /* * Make sure that all of the pages are currently * resident so that we don't create any zero-fill * pages. */ addr = trunc_page(uio->uio_offset); eaddr = round_page(uio->uio_offset + c); for (; addr < eaddr; addr += PAGE_SIZE) if (pmap_extract(kernel_pmap, addr) == 0) return (EFAULT); if (!kernacc((caddr_t)(int)uio->uio_offset, c, uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE)) return (EFAULT); error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio); continue; } /* else panic! */ } return (error); } /* * allow user processes to MMAP some memory sections * instead of going through read/write */ /* ARGSUSED */ int memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int prot __unused, vm_memattr_t *memattr __unused) { if (dev2unit(dev) == CDEV_MINOR_MEM) { *paddr = offset; return (0); } return (-1); } int memioctl_md(struct cdev *dev __unused, u_long cmd __unused, caddr_t data __unused, int flags __unused, struct thread *td __unused) { return (ENOTTY); } Index: head/sys/arm/arm/minidump_machdep.c =================================================================== --- head/sys/arm/arm/minidump_machdep.c (revision 368140) +++ head/sys/arm/arm/minidump_machdep.c (revision 368141) @@ -1,343 +1,339 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Peter Wemm * Copyright (c) 2008 Semihalf, Grzegorz Bernacki * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: FreeBSD: src/sys/i386/i386/minidump_machdep.c,v 1.6 2008/08/17 23:27:27 */ #include __FBSDID("$FreeBSD$"); #include "opt_watchdog.h" #include #include #include #include #include #include #include #ifdef SW_WATCHDOG #include #endif #include #include #include #include #include #include #include #include #include #include #include CTASSERT(sizeof(struct kerneldumpheader) == 512); static struct kerneldumpheader kdh; /* Handle chunked writes. */ static size_t fragsz; static void *dump_va; static uint64_t counter, progress; static int is_dumpable(vm_paddr_t pa) { int i; for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) { if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) return (1); } return (0); } #define PG2MB(pgs) (((pgs) + (1 << 8) - 1) >> 8) static int blk_flush(struct dumperinfo *di) { int error; if (fragsz == 0) return (0); error = dump_append(di, dump_va, 0, fragsz); fragsz = 0; return (error); } static int blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz) { size_t len; int error, i, c; u_int maxdumpsz; maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE); if (maxdumpsz == 0) /* seatbelt */ maxdumpsz = PAGE_SIZE; error = 0; if (ptr != NULL && pa != 0) { printf("cant have both va and pa!\n"); return (EINVAL); } if (pa != 0) { if ((sz % PAGE_SIZE) != 0) { printf("size not page aligned\n"); return (EINVAL); } if ((pa & PAGE_MASK) != 0) { printf("address not page aligned\n"); return (EINVAL); } } if (ptr != NULL) { /* Flush any pre-existing pa pages before a virtual dump. */ error = blk_flush(di); if (error) return (error); } while (sz) { len = maxdumpsz - fragsz; if (len > sz) len = sz; counter += len; progress -= len; if (counter >> 22) { printf(" %lld", PG2MB(progress >> PAGE_SHIFT)); counter &= (1<<22) - 1; } #ifdef SW_WATCHDOG wdog_kern_pat(WD_LASTVAL); #endif if (ptr) { error = dump_append(di, ptr, 0, len); if (error) return (error); ptr += len; sz -= len; } else { for (i = 0; i < len; i += PAGE_SIZE) dump_va = pmap_kenter_temporary(pa + i, (i + fragsz) >> PAGE_SHIFT); fragsz += len; pa += len; sz -= len; if (fragsz == maxdumpsz) { error = blk_flush(di); if (error) return (error); } } /* Check for user abort. */ c = cncheckc(); if (c == 0x03) return (ECANCELED); if (c != -1) printf(" (CTRL-C to abort) "); } return (0); } /* A buffer for general use. Its size must be one page at least. */ static char dumpbuf[PAGE_SIZE]; CTASSERT(sizeof(dumpbuf) % sizeof(pt2_entry_t) == 0); int minidumpsys(struct dumperinfo *di) { struct minidumphdr mdhdr; uint64_t dumpsize; uint32_t ptesize; uint32_t pa, prev_pa = 0, count = 0; vm_offset_t va; int error; char *addr; /* * Flush caches. Note that in the SMP case this operates only on the * current CPU's L1 cache. Before we reach this point, code in either * the system shutdown or kernel debugger has called stop_cpus() to stop * all cores other than this one. Part of the ARM handling of * stop_cpus() is to call wbinv_all() on that core's local L1 cache. So * by time we get to here, all that remains is to flush the L1 for the * current CPU, then the L2. */ dcache_wbinv_poc_all(); counter = 0; /* Walk page table pages, set bits in vm_page_dump */ ptesize = 0; for (va = KERNBASE; va < kernel_vm_end; va += PAGE_SIZE) { pa = pmap_dump_kextract(va, NULL); if (pa != 0 && is_dumpable(pa)) dump_add_page(pa); ptesize += sizeof(pt2_entry_t); } /* Calculate dump size. */ dumpsize = ptesize; dumpsize += round_page(msgbufp->msg_size); dumpsize += round_page(sizeof(dump_avail)); dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages)); VM_PAGE_DUMP_FOREACH(pa) { /* Clear out undumpable pages now if needed */ if (is_dumpable(pa)) dumpsize += PAGE_SIZE; else dump_drop_page(pa); } dumpsize += PAGE_SIZE; progress = dumpsize; /* Initialize mdhdr */ bzero(&mdhdr, sizeof(mdhdr)); strcpy(mdhdr.magic, MINIDUMP_MAGIC); mdhdr.version = MINIDUMP_VERSION; mdhdr.msgbufsize = msgbufp->msg_size; mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages)); mdhdr.ptesize = ptesize; mdhdr.kernbase = KERNBASE; mdhdr.arch = __ARM_ARCH; -#if __ARM_ARCH >= 6 mdhdr.mmuformat = MINIDUMP_MMU_FORMAT_V6; -#else - mdhdr.mmuformat = MINIDUMP_MMU_FORMAT_V4; -#endif mdhdr.dumpavailsize = round_page(sizeof(dump_avail)); dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_ARM_VERSION, dumpsize); error = dump_start(di, &kdh); if (error != 0) goto fail; printf("Physical memory: %u MB\n", ptoa((uintmax_t)physmem) / 1048576); printf("Dumping %llu MB:", (long long)dumpsize >> 20); /* Dump my header */ bzero(dumpbuf, sizeof(dumpbuf)); bcopy(&mdhdr, dumpbuf, sizeof(mdhdr)); error = blk_write(di, dumpbuf, 0, PAGE_SIZE); if (error) goto fail; /* Dump msgbuf up front */ error = blk_write(di, (char *)msgbufp->msg_ptr, 0, round_page(msgbufp->msg_size)); if (error) goto fail; /* Dump dump_avail */ _Static_assert(sizeof(dump_avail) <= sizeof(dumpbuf), "Large dump_avail not handled"); bzero(dumpbuf, sizeof(dumpbuf)); memcpy(dumpbuf, dump_avail, sizeof(dump_avail)); error = blk_write(di, dumpbuf, 0, PAGE_SIZE); if (error) goto fail; /* Dump bitmap */ error = blk_write(di, (char *)vm_page_dump, 0, round_page(BITSET_SIZE(vm_page_dump_pages))); if (error) goto fail; /* Dump kernel page table pages */ addr = dumpbuf; for (va = KERNBASE; va < kernel_vm_end; va += PAGE_SIZE) { pmap_dump_kextract(va, (pt2_entry_t *)addr); addr += sizeof(pt2_entry_t); if (addr == dumpbuf + sizeof(dumpbuf)) { error = blk_write(di, dumpbuf, 0, sizeof(dumpbuf)); if (error != 0) goto fail; addr = dumpbuf; } } if (addr != dumpbuf) { error = blk_write(di, dumpbuf, 0, addr - dumpbuf); if (error != 0) goto fail; } /* Dump memory chunks */ VM_PAGE_DUMP_FOREACH(pa) { if (!count) { prev_pa = pa; count++; } else { if (pa == (prev_pa + count * PAGE_SIZE)) count++; else { error = blk_write(di, NULL, prev_pa, count * PAGE_SIZE); if (error) goto fail; count = 1; prev_pa = pa; } } } if (count) { error = blk_write(di, NULL, prev_pa, count * PAGE_SIZE); if (error) goto fail; count = 0; prev_pa = 0; } error = blk_flush(di); if (error) goto fail; error = dump_finish(di, &kdh); if (error != 0) goto fail; printf("\nDump complete\n"); return (0); fail: if (error < 0) error = -error; if (error == ECANCELED) printf("\nDump aborted\n"); else if (error == E2BIG || error == ENOSPC) { printf("\nDump failed. Partition too small (about %lluMB were " "needed this time).\n", (long long)dumpsize >> 20); } else printf("\n** DUMP FAILED (ERROR %d) **\n", error); return (error); } Index: head/sys/arm/arm/mp_machdep.c =================================================================== --- head/sys/arm/arm/mp_machdep.c (revision 368140) +++ head/sys/arm/arm/mp_machdep.c (revision 368141) @@ -1,395 +1,395 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_ddb.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #ifdef CPU_MV_PJ4B #include #endif /* used to hold the AP's until we are ready to release them */ struct mtx ap_boot_mtx; struct pcb stoppcbs[MAXCPU]; /* # of Applications processors */ volatile int mp_naps; /* Set to 1 once we're ready to let the APs out of the pen. */ volatile int aps_ready = 0; void set_stackptrs(int cpu); /* Temporary variables for init_secondary() */ void *dpcpu[MAXCPU - 1]; /* Determine if we running MP machine */ int cpu_mp_probe(void) { KASSERT(mp_ncpus != 0, ("cpu_mp_probe: mp_ncpus is unset")); CPU_SETOF(0, &all_cpus); return (mp_ncpus > 1); } /* Start Application Processor via platform specific function */ static int check_ap(void) { uint32_t ms; for (ms = 0; ms < 2000; ++ms) { if ((mp_naps + 1) == mp_ncpus) return (0); /* success */ else DELAY(1000); } return (-2); } /* Initialize and fire up non-boot processors */ void cpu_mp_start(void) { int error, i; mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); /* Reserve memory for application processors */ for(i = 0; i < (mp_ncpus - 1); i++) dpcpu[i] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO); dcache_wbinv_poc_all(); /* Initialize boot code and start up processors */ platform_mp_start_ap(); /* Check if ap's started properly */ error = check_ap(); if (error) printf("WARNING: Some AP's failed to start\n"); else for (i = 1; i < mp_ncpus; i++) CPU_SET(i, &all_cpus); } /* Introduce rest of cores to the world */ void cpu_mp_announce(void) { } void init_secondary(int cpu) { struct pcpu *pc; uint32_t loop_counter; pmap_set_tex(); cpuinfo_reinit_mmu(pmap_kern_ttb); cpu_setup(); /* Provide stack pointers for other processor modes. */ set_stackptrs(cpu); enable_interrupts(PSR_A); pc = &__pcpu[cpu]; /* * pcpu_init() updates queue, so it should not be executed in parallel * on several cores */ while(mp_naps < (cpu - 1)) ; pcpu_init(pc, cpu, sizeof(struct pcpu)); dpcpu_init(dpcpu[cpu - 1], cpu); -#if __ARM_ARCH >= 6 && defined(DDB) +#if defined(DDB) dbg_monitor_init_secondary(); #endif /* Signal our startup to BSP */ atomic_add_rel_32(&mp_naps, 1); /* Spin until the BSP releases the APs */ while (!atomic_load_acq_int(&aps_ready)) { #if __ARM_ARCH >= 7 __asm __volatile("wfe"); #endif } /* Initialize curthread */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); pc->pc_curthread = pc->pc_idlethread; pc->pc_curpcb = pc->pc_idlethread->td_pcb; set_curthread(pc->pc_idlethread); #ifdef VFP vfp_init(); #endif /* Configure the interrupt controller */ intr_pic_init_secondary(); /* Apply possible BP hardening */ cpuinfo_init_bp_hardening(); mtx_lock_spin(&ap_boot_mtx); atomic_add_rel_32(&smp_cpus, 1); if (smp_cpus == mp_ncpus) { /* enable IPI's, tlb shootdown, freezes etc */ atomic_store_rel_int(&smp_started, 1); } mtx_unlock_spin(&ap_boot_mtx); loop_counter = 0; while (smp_started == 0) { DELAY(100); loop_counter++; if (loop_counter == 1000) CTR0(KTR_SMP, "AP still wait for smp_started"); } /* Start per-CPU event timers. */ cpu_initclocks_ap(); CTR0(KTR_SMP, "go into scheduler"); /* Enter the scheduler */ sched_throw(NULL); panic("scheduler returned us to %s", __func__); /* NOTREACHED */ } static void ipi_rendezvous(void *dummy __unused) { CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); } static void ipi_ast(void *dummy __unused) { CTR0(KTR_SMP, "IPI_AST"); } static void ipi_stop(void *dummy __unused) { u_int cpu; /* * IPI_STOP_HARD is mapped to IPI_STOP. */ CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD"); cpu = PCPU_GET(cpuid); savectx(&stoppcbs[cpu]); /* * CPUs are stopped when entering the debugger and at * system shutdown, both events which can precede a * panic dump. For the dump to be correct, all caches * must be flushed and invalidated, but on ARM there's * no way to broadcast a wbinv_all to other cores. * Instead, we have each core do the local wbinv_all as * part of stopping the core. The core requesting the * stop will do the l2 cache flush after all other cores * have done their l1 flushes and stopped. */ dcache_wbinv_poc_all(); /* Indicate we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) cpu_spinwait(); CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); #ifdef DDB dbg_resume_dbreg(); #endif CTR0(KTR_SMP, "IPI_STOP (restart)"); } static void ipi_preempt(void *arg) { struct trapframe *oldframe; struct thread *td; critical_enter(); td = curthread; td->td_intr_nesting_level++; oldframe = td->td_intr_frame; td->td_intr_frame = (struct trapframe *)arg; CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(td); td->td_intr_frame = oldframe; td->td_intr_nesting_level--; critical_exit(); } static void ipi_hardclock(void *arg) { struct trapframe *oldframe; struct thread *td; critical_enter(); td = curthread; td->td_intr_nesting_level++; oldframe = td->td_intr_frame; td->td_intr_frame = (struct trapframe *)arg; CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); td->td_intr_frame = oldframe; td->td_intr_nesting_level--; critical_exit(); } static void release_aps(void *dummy __unused) { uint32_t loop_counter; if (mp_ncpus == 1) return; intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL); intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL); intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL); intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL); intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL); atomic_store_rel_int(&aps_ready, 1); /* Wake the other threads up */ dsb(); sev(); printf("Release APs\n"); for (loop_counter = 0; loop_counter < 2000; loop_counter++) { if (smp_started) return; DELAY(1000); } printf("AP's not started\n"); } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); struct cpu_group * cpu_topo(void) { return (smp_topo_1level(CG_SHARE_L2, mp_ncpus, 0)); } void cpu_mp_setmaxid(void) { platform_mp_setmaxid(); } /* Sending IPI */ void ipi_all_but_self(u_int ipi) { cpuset_t other_cpus; other_cpus = all_cpus; CPU_CLR(PCPU_GET(cpuid), &other_cpus); CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); intr_ipi_send(other_cpus, ipi); } void ipi_cpu(int cpu, u_int ipi) { cpuset_t cpus; CPU_ZERO(&cpus); CPU_SET(cpu, &cpus); CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi); intr_ipi_send(cpus, ipi); } void ipi_selected(cpuset_t cpus, u_int ipi) { CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); intr_ipi_send(cpus, ipi); } Index: head/sys/arm/arm/nexus.c =================================================================== --- head/sys/arm/arm/nexus.c (revision 368140) +++ head/sys/arm/arm/nexus.c (revision 368141) @@ -1,471 +1,434 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * This code implements a `root nexus' for Arm Architecture * machines. The function of the root nexus is to serve as an * attachment point for both processors and buses, and to manage * resources which are common to all of them. In particular, * this code implements the core resource managers for interrupt * requests, DMA requests (which rightfully should be a part of the * ISA code but it's easier to do it here for now), I/O port addresses, * and I/O memory address space. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include "ofw_bus_if.h" #endif static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device"); struct nexus_device { struct resource_list nx_resources; }; #define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev)) static struct rman mem_rman; static int nexus_probe(device_t); static int nexus_attach(device_t); static int nexus_print_child(device_t, device_t); static device_t nexus_add_child(device_t, u_int, const char *, int); static struct resource *nexus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int nexus_activate_resource(device_t, device_t, int, int, struct resource *); static bus_space_tag_t nexus_get_bus_tag(device_t, device_t); static bus_dma_tag_t nexus_get_dma_tag(device_t dev, device_t child); -#ifdef INTRNG #ifdef SMP static int nexus_bind_intr(device_t, device_t, struct resource *, int); #endif -#endif static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol); -#ifdef INTRNG static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr); -#endif static int nexus_deactivate_resource(device_t, device_t, int, int, struct resource *); static int nexus_release_resource(device_t, device_t, int, int, struct resource *); static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep); static int nexus_teardown_intr(device_t, device_t, struct resource *, void *); #ifdef FDT static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *intr); #endif /* * Normally NULL (which results in defaults which are handled in * busdma_machdep), platform init code can use nexus_set_dma_tag() to set this * to a tag that will be inherited by all busses and devices on the platform. */ static bus_dma_tag_t nexus_dma_tag; static device_method_t nexus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_probe), DEVMETHOD(device_attach, nexus_attach), /* Bus interface */ DEVMETHOD(bus_print_child, nexus_print_child), DEVMETHOD(bus_add_child, nexus_add_child), DEVMETHOD(bus_alloc_resource, nexus_alloc_resource), DEVMETHOD(bus_activate_resource, nexus_activate_resource), DEVMETHOD(bus_config_intr, nexus_config_intr), DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource), DEVMETHOD(bus_release_resource, nexus_release_resource), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), DEVMETHOD(bus_get_bus_tag, nexus_get_bus_tag), DEVMETHOD(bus_get_dma_tag, nexus_get_dma_tag), -#ifdef INTRNG DEVMETHOD(bus_describe_intr, nexus_describe_intr), #ifdef SMP DEVMETHOD(bus_bind_intr, nexus_bind_intr), #endif -#endif #ifdef FDT DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr), #endif { 0, 0 } }; static devclass_t nexus_devclass; static driver_t nexus_driver = { "nexus", nexus_methods, 1 /* no softc */ }; EARLY_DRIVER_MODULE(nexus, root, nexus_driver, nexus_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_EARLY); static int nexus_probe(device_t dev) { device_quiet(dev); /* suppress attach message for neatness */ return (BUS_PROBE_DEFAULT); } static int nexus_attach(device_t dev) { mem_rman.rm_start = 0; mem_rman.rm_end = BUS_SPACE_MAXADDR; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, BUS_SPACE_MAXADDR)) panic("nexus_probe mem_rman"); /* * First, deal with the children we know about already */ bus_generic_probe(dev); bus_generic_attach(dev); return (0); } static int nexus_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += printf("\n"); return (retval); } static device_t nexus_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct nexus_device *ndev; ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO); if (!ndev) return (0); resource_list_init(&ndev->nx_resources); child = device_add_child_ordered(bus, order, name, unit); /* should we free this in nexus_child_detached? */ device_set_ivars(child, ndev); return (child); } /* * Allocate a resource on behalf of child. NB: child is usually going to be a * child of one of our descendants, not a direct child of nexus0. * (Exceptions include footbridge.) */ static struct resource * nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *rv; struct rman *rm; int needactivate = flags & RF_ACTIVE; flags &= ~RF_ACTIVE; switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rm = &mem_rman; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (0); } } return (rv); } static int nexus_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { int error; if (rman_get_flags(res) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, res); if (error) return (error); } return (rman_release_resource(res)); } static bus_space_tag_t nexus_get_bus_tag(device_t bus __unused, device_t child __unused) { #ifdef FDT return(fdtbus_bs_tag); #else return((void *)1); #endif } static bus_dma_tag_t nexus_get_dma_tag(device_t dev, device_t child) { return nexus_dma_tag; } void nexus_set_dma_tag(bus_dma_tag_t tag) { nexus_dma_tag = tag; } static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { int ret = ENODEV; -#ifdef INTRNG device_printf(dev, "bus_config_intr is obsolete and not supported!\n"); ret = EOPNOTSUPP; -#else - if (arm_config_irq) - ret = (*arm_config_irq)(irq, trig, pol); -#endif return (ret); } static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { -#ifndef INTRNG - int irq; -#endif if ((rman_get_flags(res) & RF_SHAREABLE) == 0) flags |= INTR_EXCL; -#ifdef INTRNG return(intr_setup_irq(child, res, filt, intr, arg, flags, cookiep)); -#else - for (irq = rman_get_start(res); irq <= rman_get_end(res); irq++) { - arm_setup_irqhandler(device_get_nameunit(child), - filt, intr, arg, irq, flags, cookiep); - arm_unmask_irq(irq); - } - return (0); -#endif } static int nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih) { -#ifdef INTRNG return (intr_teardown_irq(child, r, ih)); -#else - return (arm_remove_irqhandler(rman_get_start(r), ih)); -#endif } -#ifdef INTRNG static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { return (intr_describe_irq(child, irq, cookie, descr)); } #ifdef SMP static int nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { return (intr_bind_irq(child, irq, cpu)); } #endif -#endif static int nexus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { int err; bus_addr_t paddr; bus_size_t psize; bus_space_handle_t vaddr; if ((err = rman_activate_resource(r)) != 0) return (err); /* * If this is a memory resource, map it into the kernel. */ if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { paddr = (bus_addr_t)rman_get_start(r); psize = (bus_size_t)rman_get_size(r); #ifdef FDT err = bus_space_map(fdtbus_bs_tag, paddr, psize, 0, &vaddr); if (err != 0) { rman_deactivate_resource(r); return (err); } rman_set_bustag(r, fdtbus_bs_tag); #else vaddr = (bus_space_handle_t)pmap_mapdev((vm_offset_t)paddr, (vm_size_t)psize); if (vaddr == 0) { rman_deactivate_resource(r); return (ENOMEM); } rman_set_bustag(r, (void *)1); #endif rman_set_virtual(r, (void *)vaddr); rman_set_bushandle(r, vaddr); return (0); } else if (type == SYS_RES_IRQ) { -#ifdef INTRNG err = intr_activate_irq(child, r); if (err != 0) { rman_deactivate_resource(r); return (err); } -#endif } return (0); } static int nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { bus_size_t psize; bus_space_handle_t vaddr; if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { psize = (bus_size_t)rman_get_size(r); vaddr = rman_get_bushandle(r); if (vaddr != 0) { #ifdef FDT bus_space_unmap(fdtbus_bs_tag, vaddr, psize); #else pmap_unmapdev((vm_offset_t)vaddr, (vm_size_t)psize); #endif rman_set_virtual(r, NULL); rman_set_bushandle(r, 0); } } else if (type == SYS_RES_IRQ) { -#ifdef INTRNG intr_deactivate_irq(child, r); -#endif } return (rman_deactivate_resource(r)); } #ifdef FDT static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *intr) { -#ifndef INTRNG - return (intr_fdt_map_irq(iparent, intr, icells)); -#else u_int irq; struct intr_map_data_fdt *fdt_data; size_t len; len = sizeof(*fdt_data) + icells * sizeof(pcell_t); fdt_data = (struct intr_map_data_fdt *)intr_alloc_map_data( INTR_MAP_DATA_FDT, len, M_WAITOK | M_ZERO); fdt_data->iparent = iparent; fdt_data->ncells = icells; memcpy(fdt_data->cells, intr, icells * sizeof(pcell_t)); irq = intr_map_irq(NULL, iparent, (struct intr_map_data *)fdt_data); return (irq); -#endif /* INTRNG */ } #endif /* FDT */ Index: head/sys/arm/arm/stdatomic.c =================================================================== --- head/sys/arm/arm/stdatomic.c (revision 368140) +++ head/sys/arm/arm/stdatomic.c (revision 368141) @@ -1,880 +1,429 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013 Ed Schouten * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include /* * Executing statements with interrupts disabled. */ #if defined(_KERNEL) && !defined(SMP) #define WITHOUT_INTERRUPTS(s) do { \ register_t regs; \ \ regs = intr_disable(); \ do s while (0); \ intr_restore(regs); \ } while (0) #endif /* _KERNEL && !SMP */ /* * Memory barriers. * * It turns out __sync_synchronize() does not emit any code when used * with GCC 4.2. Implement our own version that does work reliably. * * Although __sync_lock_test_and_set() should only perform an acquire * barrier, make it do a full barrier like the other functions. This * should make 's atomic_exchange_explicit() work reliably. */ #if defined(_KERNEL) && !defined(SMP) static inline void do_sync(void) { __asm volatile ("" : : : "memory"); } -#elif __ARM_ARCH >= 6 +#else static inline void do_sync(void) { dmb(); } #endif -#if defined(__CLANG_ATOMICS) || defined(__GNUC_ATOMICS) -/* - * New C11 __atomic_* API. - */ - -/* ARMv6+ systems should be supported by the compiler. */ -#if __ARM_ARCH <= 5 - -/* Clang doesn't allow us to reimplement builtins without this. */ -#ifdef __clang__ -#pragma redefine_extname __sync_synchronize_ext __sync_synchronize -#define __sync_synchronize __sync_synchronize_ext -#endif - -void -__sync_synchronize(void) -{ -} - -#ifdef _KERNEL - -#ifdef SMP -#error "On SMP systems we should have proper atomic operations." -#endif - -/* - * On uniprocessor systems, we can perform the atomic operations by - * disabling interrupts. - */ - -#define EMIT_LOAD_N(N, uintN_t) \ -uintN_t \ -__atomic_load_##N(uintN_t *mem, int model __unused) \ -{ \ - uintN_t ret; \ - \ - WITHOUT_INTERRUPTS({ \ - ret = *mem; \ - }); \ - return (ret); \ -} - -#define EMIT_STORE_N(N, uintN_t) \ -void \ -__atomic_store_##N(uintN_t *mem, uintN_t val, int model __unused) \ -{ \ - \ - WITHOUT_INTERRUPTS({ \ - *mem = val; \ - }); \ -} - -#define EMIT_COMPARE_EXCHANGE_N(N, uintN_t) \ -_Bool \ -__atomic_compare_exchange_##N(uintN_t *mem, uintN_t *expected, \ - uintN_t desired, int success __unused, int failure __unused) \ -{ \ - _Bool ret; \ - \ - WITHOUT_INTERRUPTS({ \ - if (*mem == *expected) { \ - *mem = desired; \ - ret = 1; \ - } else { \ - *expected = *mem; \ - ret = 0; \ - } \ - }); \ - return (ret); \ -} - -#define EMIT_FETCH_OP_N(N, uintN_t, name, op) \ -uintN_t \ -__atomic_##name##_##N(uintN_t *mem, uintN_t val, int model __unused) \ -{ \ - uintN_t ret; \ - \ - WITHOUT_INTERRUPTS({ \ - ret = *mem; \ - *mem op val; \ - }); \ - return (ret); \ -} - -#define EMIT_ALL_OPS_N(N, uintN_t) \ -EMIT_LOAD_N(N, uintN_t) \ -EMIT_STORE_N(N, uintN_t) \ -EMIT_COMPARE_EXCHANGE_N(N, uintN_t) \ -EMIT_FETCH_OP_N(N, uintN_t, exchange, =) \ -EMIT_FETCH_OP_N(N, uintN_t, fetch_add, +=) \ -EMIT_FETCH_OP_N(N, uintN_t, fetch_and, &=) \ -EMIT_FETCH_OP_N(N, uintN_t, fetch_or, |=) \ -EMIT_FETCH_OP_N(N, uintN_t, fetch_sub, -=) \ -EMIT_FETCH_OP_N(N, uintN_t, fetch_xor, ^=) - -EMIT_ALL_OPS_N(1, uint8_t) -EMIT_ALL_OPS_N(2, uint16_t) -EMIT_ALL_OPS_N(4, uint32_t) -EMIT_ALL_OPS_N(8, uint64_t) -#undef EMIT_ALL_OPS_N - -#else /* !_KERNEL */ - -/* - * For userspace on uniprocessor systems, we can implement the atomic - * operations by using a Restartable Atomic Sequence. This makes the - * kernel restart the code from the beginning when interrupted. - */ - -#define EMIT_LOAD_N(N, uintN_t) \ -uintN_t \ -__atomic_load_##N(uintN_t *mem, int model __unused) \ -{ \ - \ - return (*mem); \ -} - -#define EMIT_STORE_N(N, uintN_t) \ -void \ -__atomic_store_##N(uintN_t *mem, uintN_t val, int model __unused) \ -{ \ - \ - *mem = val; \ -} - -#define EMIT_EXCHANGE_N(N, uintN_t, ldr, str) \ -uintN_t \ -__atomic_exchange_##N(uintN_t *mem, uintN_t val, int model __unused) \ -{ \ - uint32_t old, temp, ras_start; \ - \ - ras_start = ARM_RAS_START; \ - __asm volatile ( \ - /* Set up Restartable Atomic Sequence. */ \ - "1:" \ - "\tadr %2, 1b\n" \ - "\tstr %2, [%5]\n" \ - "\tadr %2, 2f\n" \ - "\tstr %2, [%5, #4]\n" \ - \ - "\t"ldr" %0, %4\n" /* Load old value. */ \ - "\t"str" %3, %1\n" /* Store new value. */ \ - \ - /* Tear down Restartable Atomic Sequence. */ \ - "2:" \ - "\tmov %2, #0x00000000\n" \ - "\tstr %2, [%5]\n" \ - "\tmov %2, #0xffffffff\n" \ - "\tstr %2, [%5, #4]\n" \ - : "=&r" (old), "=m" (*mem), "=&r" (temp) \ - : "r" (val), "m" (*mem), "r" (ras_start)); \ - return (old); \ -} - -#define EMIT_COMPARE_EXCHANGE_N(N, uintN_t, ldr, streq) \ -_Bool \ -__atomic_compare_exchange_##N(uintN_t *mem, uintN_t *pexpected, \ - uintN_t desired, int success __unused, int failure __unused) \ -{ \ - uint32_t expected, old, temp, ras_start; \ - \ - expected = *pexpected; \ - ras_start = ARM_RAS_START; \ - __asm volatile ( \ - /* Set up Restartable Atomic Sequence. */ \ - "1:" \ - "\tadr %2, 1b\n" \ - "\tstr %2, [%6]\n" \ - "\tadr %2, 2f\n" \ - "\tstr %2, [%6, #4]\n" \ - \ - "\t"ldr" %0, %5\n" /* Load old value. */ \ - "\tcmp %0, %3\n" /* Compare to expected value. */\ - "\t"streq" %4, %1\n" /* Store new value. */ \ - \ - /* Tear down Restartable Atomic Sequence. */ \ - "2:" \ - "\tmov %2, #0x00000000\n" \ - "\tstr %2, [%6]\n" \ - "\tmov %2, #0xffffffff\n" \ - "\tstr %2, [%6, #4]\n" \ - : "=&r" (old), "=m" (*mem), "=&r" (temp) \ - : "r" (expected), "r" (desired), "m" (*mem), \ - "r" (ras_start)); \ - if (old == expected) { \ - return (1); \ - } else { \ - *pexpected = old; \ - return (0); \ - } \ -} - -#define EMIT_FETCH_OP_N(N, uintN_t, ldr, str, name, op, ret) \ -uintN_t \ -__atomic_##name##_##N(uintN_t *mem, uintN_t val, int model __unused) \ -{ \ - uint32_t old, new, ras_start; \ - \ - ras_start = ARM_RAS_START; \ - __asm volatile ( \ - /* Set up Restartable Atomic Sequence. */ \ - "1:" \ - "\tadr %2, 1b\n" \ - "\tstr %2, [%5]\n" \ - "\tadr %2, 2f\n" \ - "\tstr %2, [%5, #4]\n" \ - \ - "\t"ldr" %0, %4\n" /* Load old value. */ \ - "\t"op" %2, %0, %3\n" /* Calculate new value. */ \ - "\t"str" %2, %1\n" /* Store new value. */ \ - \ - /* Tear down Restartable Atomic Sequence. */ \ - "2:" \ - "\tmov %2, #0x00000000\n" \ - "\tstr %2, [%5]\n" \ - "\tmov %2, #0xffffffff\n" \ - "\tstr %2, [%5, #4]\n" \ - : "=&r" (old), "=m" (*mem), "=&r" (new) \ - : "r" (val), "m" (*mem), "r" (ras_start)); \ - return (ret); \ -} - -#define EMIT_ALL_OPS_N(N, uintN_t, ldr, str, streq) \ -EMIT_LOAD_N(N, uintN_t) \ -EMIT_STORE_N(N, uintN_t) \ -EMIT_EXCHANGE_N(N, uintN_t, ldr, str) \ -EMIT_COMPARE_EXCHANGE_N(N, uintN_t, ldr, streq) \ -EMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_add, "add", old) \ -EMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_and, "and", old) \ -EMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_or, "orr", old) \ -EMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_sub, "sub", old) \ -EMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_xor, "eor", old) \ -EMIT_FETCH_OP_N(N, uintN_t, ldr, str, add_fetch, "add", new) \ -EMIT_FETCH_OP_N(N, uintN_t, ldr, str, and_fetch, "and", new) \ -EMIT_FETCH_OP_N(N, uintN_t, ldr, str, or_fetch, "orr", new) \ -EMIT_FETCH_OP_N(N, uintN_t, ldr, str, sub_fetch, "sub", new) \ -EMIT_FETCH_OP_N(N, uintN_t, ldr, str, xor_fetch, "eor", new) - -EMIT_ALL_OPS_N(1, uint8_t, "ldrb", "strb", "strbeq") -EMIT_ALL_OPS_N(2, uint16_t, "ldrh", "strh", "strheq") -EMIT_ALL_OPS_N(4, uint32_t, "ldr", "str", "streq") -#undef EMIT_ALL_OPS_N - -#endif /* _KERNEL */ - -#endif /* __ARM_ARCH */ - -#endif /* __CLANG_ATOMICS || __GNUC_ATOMICS */ - #if defined(__SYNC_ATOMICS) || defined(EMIT_SYNC_ATOMICS) #ifdef __clang__ #pragma redefine_extname __sync_lock_test_and_set_1_c __sync_lock_test_and_set_1 #pragma redefine_extname __sync_lock_test_and_set_2_c __sync_lock_test_and_set_2 #pragma redefine_extname __sync_lock_test_and_set_4_c __sync_lock_test_and_set_4 #pragma redefine_extname __sync_val_compare_and_swap_1_c __sync_val_compare_and_swap_1 #pragma redefine_extname __sync_val_compare_and_swap_2_c __sync_val_compare_and_swap_2 #pragma redefine_extname __sync_val_compare_and_swap_4_c __sync_val_compare_and_swap_4 #pragma redefine_extname __sync_fetch_and_add_1_c __sync_fetch_and_add_1 #pragma redefine_extname __sync_fetch_and_add_2_c __sync_fetch_and_add_2 #pragma redefine_extname __sync_fetch_and_add_4_c __sync_fetch_and_add_4 #pragma redefine_extname __sync_fetch_and_and_1_c __sync_fetch_and_and_1 #pragma redefine_extname __sync_fetch_and_and_2_c __sync_fetch_and_and_2 #pragma redefine_extname __sync_fetch_and_and_4_c __sync_fetch_and_and_4 #pragma redefine_extname __sync_fetch_and_or_1_c __sync_fetch_and_or_1 #pragma redefine_extname __sync_fetch_and_or_2_c __sync_fetch_and_or_2 #pragma redefine_extname __sync_fetch_and_or_4_c __sync_fetch_and_or_4 #pragma redefine_extname __sync_fetch_and_xor_1_c __sync_fetch_and_xor_1 #pragma redefine_extname __sync_fetch_and_xor_2_c __sync_fetch_and_xor_2 #pragma redefine_extname __sync_fetch_and_xor_4_c __sync_fetch_and_xor_4 #pragma redefine_extname __sync_fetch_and_sub_1_c __sync_fetch_and_sub_1 #pragma redefine_extname __sync_fetch_and_sub_2_c __sync_fetch_and_sub_2 #pragma redefine_extname __sync_fetch_and_sub_4_c __sync_fetch_and_sub_4 #endif /* * Old __sync_* API. */ -#if __ARM_ARCH >= 6 /* Implementations for old GCC versions, lacking support for atomics. */ typedef union { uint8_t v8[4]; uint32_t v32; } reg_t; /* * Given a memory address pointing to an 8-bit or 16-bit integer, return * the address of the 32-bit word containing it. */ static inline uint32_t * round_to_word(void *ptr) { return ((uint32_t *)((intptr_t)ptr & ~3)); } /* * Utility functions for loading and storing 8-bit and 16-bit integers * in 32-bit words at an offset corresponding with the location of the * atomic variable. */ static inline void put_1(reg_t *r, const uint8_t *offset_ptr, uint8_t val) { size_t offset; offset = (intptr_t)offset_ptr & 3; r->v8[offset] = val; } static inline uint8_t get_1(const reg_t *r, const uint8_t *offset_ptr) { size_t offset; offset = (intptr_t)offset_ptr & 3; return (r->v8[offset]); } static inline void put_2(reg_t *r, const uint16_t *offset_ptr, uint16_t val) { size_t offset; union { uint16_t in; uint8_t out[2]; } bytes; offset = (intptr_t)offset_ptr & 3; bytes.in = val; r->v8[offset] = bytes.out[0]; r->v8[offset + 1] = bytes.out[1]; } static inline uint16_t get_2(const reg_t *r, const uint16_t *offset_ptr) { size_t offset; union { uint8_t in[2]; uint16_t out; } bytes; offset = (intptr_t)offset_ptr & 3; bytes.in[0] = r->v8[offset]; bytes.in[1] = r->v8[offset + 1]; return (bytes.out); } /* * 8-bit and 16-bit routines. * * These operations are not natively supported by the CPU, so we use * some shifting and bitmasking on top of the 32-bit instructions. */ #define EMIT_LOCK_TEST_AND_SET_N(N, uintN_t) \ uintN_t \ __sync_lock_test_and_set_##N##_c(uintN_t *mem, uintN_t val) \ { \ uint32_t *mem32; \ reg_t val32, negmask, old; \ uint32_t temp1, temp2; \ \ mem32 = round_to_word(mem); \ val32.v32 = 0x00000000; \ put_##N(&val32, mem, val); \ negmask.v32 = 0xffffffff; \ put_##N(&negmask, mem, 0); \ \ do_sync(); \ __asm volatile ( \ "1:" \ "\tldrex %0, %6\n" /* Load old value. */ \ "\tand %2, %5, %0\n" /* Remove the old value. */ \ "\torr %2, %2, %4\n" /* Put in the new value. */ \ "\tstrex %3, %2, %1\n" /* Attempt to store. */ \ "\tcmp %3, #0\n" /* Did it succeed? */ \ "\tbne 1b\n" /* Spin if failed. */ \ : "=&r" (old.v32), "=m" (*mem32), "=&r" (temp1), \ "=&r" (temp2) \ : "r" (val32.v32), "r" (negmask.v32), "m" (*mem32)); \ return (get_##N(&old, mem)); \ } EMIT_LOCK_TEST_AND_SET_N(1, uint8_t) EMIT_LOCK_TEST_AND_SET_N(2, uint16_t) #define EMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t) \ uintN_t \ __sync_val_compare_and_swap_##N##_c(uintN_t *mem, uintN_t expected, \ uintN_t desired) \ { \ uint32_t *mem32; \ reg_t expected32, desired32, posmask, old; \ uint32_t negmask, temp1, temp2; \ \ mem32 = round_to_word(mem); \ expected32.v32 = 0x00000000; \ put_##N(&expected32, mem, expected); \ desired32.v32 = 0x00000000; \ put_##N(&desired32, mem, desired); \ posmask.v32 = 0x00000000; \ put_##N(&posmask, mem, ~0); \ negmask = ~posmask.v32; \ \ do_sync(); \ __asm volatile ( \ "1:" \ "\tldrex %0, %8\n" /* Load old value. */ \ "\tand %2, %6, %0\n" /* Isolate the old value. */ \ "\tcmp %2, %4\n" /* Compare to expected value. */\ "\tbne 2f\n" /* Values are unequal. */ \ "\tand %2, %7, %0\n" /* Remove the old value. */ \ "\torr %2, %5\n" /* Put in the new value. */ \ "\tstrex %3, %2, %1\n" /* Attempt to store. */ \ "\tcmp %3, #0\n" /* Did it succeed? */ \ "\tbne 1b\n" /* Spin if failed. */ \ "2:" \ : "=&r" (old), "=m" (*mem32), "=&r" (temp1), \ "=&r" (temp2) \ : "r" (expected32.v32), "r" (desired32.v32), \ "r" (posmask.v32), "r" (negmask), "m" (*mem32)); \ return (get_##N(&old, mem)); \ } EMIT_VAL_COMPARE_AND_SWAP_N(1, uint8_t) EMIT_VAL_COMPARE_AND_SWAP_N(2, uint16_t) #define EMIT_ARITHMETIC_FETCH_AND_OP_N(N, uintN_t, name, op) \ uintN_t \ __sync_##name##_##N##_c(uintN_t *mem, uintN_t val) \ { \ uint32_t *mem32; \ reg_t val32, posmask, old; \ uint32_t negmask, temp1, temp2; \ \ mem32 = round_to_word(mem); \ val32.v32 = 0x00000000; \ put_##N(&val32, mem, val); \ posmask.v32 = 0x00000000; \ put_##N(&posmask, mem, ~0); \ negmask = ~posmask.v32; \ \ do_sync(); \ __asm volatile ( \ "1:" \ "\tldrex %0, %7\n" /* Load old value. */ \ "\t"op" %2, %0, %4\n" /* Calculate new value. */ \ "\tand %2, %5\n" /* Isolate the new value. */ \ "\tand %3, %6, %0\n" /* Remove the old value. */ \ "\torr %2, %2, %3\n" /* Put in the new value. */ \ "\tstrex %3, %2, %1\n" /* Attempt to store. */ \ "\tcmp %3, #0\n" /* Did it succeed? */ \ "\tbne 1b\n" /* Spin if failed. */ \ : "=&r" (old.v32), "=m" (*mem32), "=&r" (temp1), \ "=&r" (temp2) \ : "r" (val32.v32), "r" (posmask.v32), "r" (negmask), \ "m" (*mem32)); \ return (get_##N(&old, mem)); \ } EMIT_ARITHMETIC_FETCH_AND_OP_N(1, uint8_t, fetch_and_add, "add") EMIT_ARITHMETIC_FETCH_AND_OP_N(1, uint8_t, fetch_and_sub, "sub") EMIT_ARITHMETIC_FETCH_AND_OP_N(2, uint16_t, fetch_and_add, "add") EMIT_ARITHMETIC_FETCH_AND_OP_N(2, uint16_t, fetch_and_sub, "sub") #define EMIT_BITWISE_FETCH_AND_OP_N(N, uintN_t, name, op, idempotence) \ uintN_t \ __sync_##name##_##N##_c(uintN_t *mem, uintN_t val) \ { \ uint32_t *mem32; \ reg_t val32, old; \ uint32_t temp1, temp2; \ \ mem32 = round_to_word(mem); \ val32.v32 = idempotence ? 0xffffffff : 0x00000000; \ put_##N(&val32, mem, val); \ \ do_sync(); \ __asm volatile ( \ "1:" \ "\tldrex %0, %5\n" /* Load old value. */ \ "\t"op" %2, %4, %0\n" /* Calculate new value. */ \ "\tstrex %3, %2, %1\n" /* Attempt to store. */ \ "\tcmp %3, #0\n" /* Did it succeed? */ \ "\tbne 1b\n" /* Spin if failed. */ \ : "=&r" (old.v32), "=m" (*mem32), "=&r" (temp1), \ "=&r" (temp2) \ : "r" (val32.v32), "m" (*mem32)); \ return (get_##N(&old, mem)); \ } EMIT_BITWISE_FETCH_AND_OP_N(1, uint8_t, fetch_and_and, "and", 1) EMIT_BITWISE_FETCH_AND_OP_N(1, uint8_t, fetch_and_or, "orr", 0) EMIT_BITWISE_FETCH_AND_OP_N(1, uint8_t, fetch_and_xor, "eor", 0) EMIT_BITWISE_FETCH_AND_OP_N(2, uint16_t, fetch_and_and, "and", 1) EMIT_BITWISE_FETCH_AND_OP_N(2, uint16_t, fetch_and_or, "orr", 0) EMIT_BITWISE_FETCH_AND_OP_N(2, uint16_t, fetch_and_xor, "eor", 0) /* * 32-bit routines. */ uint32_t __sync_lock_test_and_set_4_c(uint32_t *mem, uint32_t val) { uint32_t old, temp; do_sync(); __asm volatile ( "1:" "\tldrex %0, %4\n" /* Load old value. */ "\tstrex %2, %3, %1\n" /* Attempt to store. */ "\tcmp %2, #0\n" /* Did it succeed? */ "\tbne 1b\n" /* Spin if failed. */ : "=&r" (old), "=m" (*mem), "=&r" (temp) : "r" (val), "m" (*mem)); return (old); } uint32_t __sync_val_compare_and_swap_4_c(uint32_t *mem, uint32_t expected, uint32_t desired) { uint32_t old, temp; do_sync(); __asm volatile ( "1:" "\tldrex %0, %5\n" /* Load old value. */ "\tcmp %0, %3\n" /* Compare to expected value. */ "\tbne 2f\n" /* Values are unequal. */ "\tstrex %2, %4, %1\n" /* Attempt to store. */ "\tcmp %2, #0\n" /* Did it succeed? */ "\tbne 1b\n" /* Spin if failed. */ "2:" : "=&r" (old), "=m" (*mem), "=&r" (temp) : "r" (expected), "r" (desired), "m" (*mem)); return (old); } #define EMIT_FETCH_AND_OP_4(name, op) \ uint32_t \ __sync_##name##_4##_c(uint32_t *mem, uint32_t val) \ { \ uint32_t old, temp1, temp2; \ \ do_sync(); \ __asm volatile ( \ "1:" \ "\tldrex %0, %5\n" /* Load old value. */ \ "\t"op" %2, %0, %4\n" /* Calculate new value. */ \ "\tstrex %3, %2, %1\n" /* Attempt to store. */ \ "\tcmp %3, #0\n" /* Did it succeed? */ \ "\tbne 1b\n" /* Spin if failed. */ \ : "=&r" (old), "=m" (*mem), "=&r" (temp1), \ "=&r" (temp2) \ : "r" (val), "m" (*mem)); \ return (old); \ } EMIT_FETCH_AND_OP_4(fetch_and_add, "add") EMIT_FETCH_AND_OP_4(fetch_and_and, "and") EMIT_FETCH_AND_OP_4(fetch_and_or, "orr") EMIT_FETCH_AND_OP_4(fetch_and_sub, "sub") EMIT_FETCH_AND_OP_4(fetch_and_xor, "eor") #ifndef __clang__ __strong_reference(__sync_lock_test_and_set_1_c, __sync_lock_test_and_set_1); __strong_reference(__sync_lock_test_and_set_2_c, __sync_lock_test_and_set_2); __strong_reference(__sync_lock_test_and_set_4_c, __sync_lock_test_and_set_4); __strong_reference(__sync_val_compare_and_swap_1_c, __sync_val_compare_and_swap_1); __strong_reference(__sync_val_compare_and_swap_2_c, __sync_val_compare_and_swap_2); __strong_reference(__sync_val_compare_and_swap_4_c, __sync_val_compare_and_swap_4); __strong_reference(__sync_fetch_and_add_1_c, __sync_fetch_and_add_1); __strong_reference(__sync_fetch_and_add_2_c, __sync_fetch_and_add_2); __strong_reference(__sync_fetch_and_add_4_c, __sync_fetch_and_add_4); __strong_reference(__sync_fetch_and_and_1_c, __sync_fetch_and_and_1); __strong_reference(__sync_fetch_and_and_2_c, __sync_fetch_and_and_2); __strong_reference(__sync_fetch_and_and_4_c, __sync_fetch_and_and_4); __strong_reference(__sync_fetch_and_sub_1_c, __sync_fetch_and_sub_1); __strong_reference(__sync_fetch_and_sub_2_c, __sync_fetch_and_sub_2); __strong_reference(__sync_fetch_and_sub_4_c, __sync_fetch_and_sub_4); __strong_reference(__sync_fetch_and_or_1_c, __sync_fetch_and_or_1); __strong_reference(__sync_fetch_and_or_2_c, __sync_fetch_and_or_2); __strong_reference(__sync_fetch_and_or_4_c, __sync_fetch_and_or_4); __strong_reference(__sync_fetch_and_xor_1_c, __sync_fetch_and_xor_1); __strong_reference(__sync_fetch_and_xor_2_c, __sync_fetch_and_xor_2); __strong_reference(__sync_fetch_and_xor_4_c, __sync_fetch_and_xor_4); -#endif - -#else /* __ARM_ARCH < 6 */ - -#ifdef _KERNEL - -#ifdef SMP -#error "On SMP systems we should have proper atomic operations." -#endif - -/* - * On uniprocessor systems, we can perform the atomic operations by - * disabling interrupts. - */ - -#define EMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t) \ -uintN_t \ -__sync_val_compare_and_swap_##N(uintN_t *mem, uintN_t expected, \ - uintN_t desired) \ -{ \ - uintN_t ret; \ - \ - WITHOUT_INTERRUPTS({ \ - ret = *mem; \ - if (*mem == expected) \ - *mem = desired; \ - }); \ - return (ret); \ -} - -#define EMIT_FETCH_AND_OP_N(N, uintN_t, name, op) \ -uintN_t \ -__sync_##name##_##N(uintN_t *mem, uintN_t val) \ -{ \ - uintN_t ret; \ - \ - WITHOUT_INTERRUPTS({ \ - ret = *mem; \ - *mem op val; \ - }); \ - return (ret); \ -} - -#define EMIT_ALL_OPS_N(N, uintN_t) \ -EMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t) \ -EMIT_FETCH_AND_OP_N(N, uintN_t, lock_test_and_set, =) \ -EMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_add, +=) \ -EMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_and, &=) \ -EMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_or, |=) \ -EMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_sub, -=) \ -EMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_xor, ^=) - -EMIT_ALL_OPS_N(1, uint8_t) -EMIT_ALL_OPS_N(2, uint16_t) -EMIT_ALL_OPS_N(4, uint32_t) -EMIT_ALL_OPS_N(8, uint64_t) -#undef EMIT_ALL_OPS_N - -#else /* !_KERNEL */ - -/* - * For userspace on uniprocessor systems, we can implement the atomic - * operations by using a Restartable Atomic Sequence. This makes the - * kernel restart the code from the beginning when interrupted. - */ - -#define EMIT_LOCK_TEST_AND_SET_N(N, uintN_t, ldr, str) \ -uintN_t \ -__sync_lock_test_and_set_##N##_c(uintN_t *mem, uintN_t val) \ -{ \ - uint32_t old, temp, ras_start; \ - \ - ras_start = ARM_RAS_START; \ - __asm volatile ( \ - /* Set up Restartable Atomic Sequence. */ \ - "1:" \ - "\tadr %2, 1b\n" \ - "\tstr %2, [%5]\n" \ - "\tadr %2, 2f\n" \ - "\tstr %2, [%5, #4]\n" \ - \ - "\t"ldr" %0, %4\n" /* Load old value. */ \ - "\t"str" %3, %1\n" /* Store new value. */ \ - \ - /* Tear down Restartable Atomic Sequence. */ \ - "2:" \ - "\tmov %2, #0x00000000\n" \ - "\tstr %2, [%5]\n" \ - "\tmov %2, #0xffffffff\n" \ - "\tstr %2, [%5, #4]\n" \ - : "=&r" (old), "=m" (*mem), "=&r" (temp) \ - : "r" (val), "m" (*mem), "r" (ras_start)); \ - return (old); \ -} - -#define EMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t, ldr, streq) \ -uintN_t \ -__sync_val_compare_and_swap_##N##_c(uintN_t *mem, uintN_t expected, \ - uintN_t desired) \ -{ \ - uint32_t old, temp, ras_start; \ - \ - ras_start = ARM_RAS_START; \ - __asm volatile ( \ - /* Set up Restartable Atomic Sequence. */ \ - "1:" \ - "\tadr %2, 1b\n" \ - "\tstr %2, [%6]\n" \ - "\tadr %2, 2f\n" \ - "\tstr %2, [%6, #4]\n" \ - \ - "\t"ldr" %0, %5\n" /* Load old value. */ \ - "\tcmp %0, %3\n" /* Compare to expected value. */\ - "\t"streq" %4, %1\n" /* Store new value. */ \ - \ - /* Tear down Restartable Atomic Sequence. */ \ - "2:" \ - "\tmov %2, #0x00000000\n" \ - "\tstr %2, [%6]\n" \ - "\tmov %2, #0xffffffff\n" \ - "\tstr %2, [%6, #4]\n" \ - : "=&r" (old), "=m" (*mem), "=&r" (temp) \ - : "r" (expected), "r" (desired), "m" (*mem), \ - "r" (ras_start)); \ - return (old); \ -} - -#define EMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, name, op) \ -uintN_t \ -__sync_##name##_##N##_c(uintN_t *mem, uintN_t val) \ -{ \ - uint32_t old, temp, ras_start; \ - \ - ras_start = ARM_RAS_START; \ - __asm volatile ( \ - /* Set up Restartable Atomic Sequence. */ \ - "1:" \ - "\tadr %2, 1b\n" \ - "\tstr %2, [%5]\n" \ - "\tadr %2, 2f\n" \ - "\tstr %2, [%5, #4]\n" \ - \ - "\t"ldr" %0, %4\n" /* Load old value. */ \ - "\t"op" %2, %0, %3\n" /* Calculate new value. */ \ - "\t"str" %2, %1\n" /* Store new value. */ \ - \ - /* Tear down Restartable Atomic Sequence. */ \ - "2:" \ - "\tmov %2, #0x00000000\n" \ - "\tstr %2, [%5]\n" \ - "\tmov %2, #0xffffffff\n" \ - "\tstr %2, [%5, #4]\n" \ - : "=&r" (old), "=m" (*mem), "=&r" (temp) \ - : "r" (val), "m" (*mem), "r" (ras_start)); \ - return (old); \ -} - -#define EMIT_ALL_OPS_N(N, uintN_t, ldr, str, streq) \ -EMIT_LOCK_TEST_AND_SET_N(N, uintN_t, ldr, str) \ -EMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t, ldr, streq) \ -EMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_add, "add") \ -EMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_and, "and") \ -EMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_or, "orr") \ -EMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_sub, "sub") \ -EMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_xor, "eor") - -#ifdef __clang__ -EMIT_ALL_OPS_N(1, uint8_t, "ldrb", "strb", "strbeq") -EMIT_ALL_OPS_N(2, uint16_t, "ldrh", "strh", "strheq") -#else -EMIT_ALL_OPS_N(1, uint8_t, "ldrb", "strb", "streqb") -EMIT_ALL_OPS_N(2, uint16_t, "ldrh", "strh", "streqh") -#endif -EMIT_ALL_OPS_N(4, uint32_t, "ldr", "str", "streq") - -#ifndef __clang__ -__strong_reference(__sync_lock_test_and_set_1_c, __sync_lock_test_and_set_1); -__strong_reference(__sync_lock_test_and_set_2_c, __sync_lock_test_and_set_2); -__strong_reference(__sync_lock_test_and_set_4_c, __sync_lock_test_and_set_4); -__strong_reference(__sync_val_compare_and_swap_1_c, __sync_val_compare_and_swap_1); -__strong_reference(__sync_val_compare_and_swap_2_c, __sync_val_compare_and_swap_2); -__strong_reference(__sync_val_compare_and_swap_4_c, __sync_val_compare_and_swap_4); -__strong_reference(__sync_fetch_and_add_1_c, __sync_fetch_and_add_1); -__strong_reference(__sync_fetch_and_add_2_c, __sync_fetch_and_add_2); -__strong_reference(__sync_fetch_and_add_4_c, __sync_fetch_and_add_4); -__strong_reference(__sync_fetch_and_and_1_c, __sync_fetch_and_and_1); -__strong_reference(__sync_fetch_and_and_2_c, __sync_fetch_and_and_2); -__strong_reference(__sync_fetch_and_and_4_c, __sync_fetch_and_and_4); -__strong_reference(__sync_fetch_and_sub_1_c, __sync_fetch_and_sub_1); -__strong_reference(__sync_fetch_and_sub_2_c, __sync_fetch_and_sub_2); -__strong_reference(__sync_fetch_and_sub_4_c, __sync_fetch_and_sub_4); -__strong_reference(__sync_fetch_and_or_1_c, __sync_fetch_and_or_1); -__strong_reference(__sync_fetch_and_or_2_c, __sync_fetch_and_or_2); -__strong_reference(__sync_fetch_and_or_4_c, __sync_fetch_and_or_4); -__strong_reference(__sync_fetch_and_xor_1_c, __sync_fetch_and_xor_1); -__strong_reference(__sync_fetch_and_xor_2_c, __sync_fetch_and_xor_2); -__strong_reference(__sync_fetch_and_xor_4_c, __sync_fetch_and_xor_4); -#endif /* __ARM_ARCH */ - -#endif /* _KERNEL */ - #endif #endif /* __SYNC_ATOMICS */ Index: head/sys/arm/arm/sys_machdep.c =================================================================== --- head/sys/arm/arm/sys_machdep.c (revision 368140) +++ head/sys/arm/arm/sys_machdep.c (revision 368141) @@ -1,244 +1,223 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91 */ #include __FBSDID("$FreeBSD$"); #include "opt_capsicum.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef _SYS_SYSPROTO_H_ struct sysarch_args { int op; char *parms; }; #endif /* Prototypes */ static int arm32_sync_icache (struct thread *, void *); static int arm32_drain_writebuf(struct thread *, void *); -#if __ARM_ARCH >= 6 static int sync_icache(uintptr_t addr, size_t len) { size_t size; vm_offset_t rv; /* * Align starting address to even number because value of "1" * is used as return value for success. */ len += addr & 1; addr &= ~1; /* Break whole range to pages. */ do { size = PAGE_SIZE - (addr & PAGE_MASK); size = min(size, len); rv = dcache_wb_pou_checked(addr, size); if (rv == 1) /* see dcache_wb_pou_checked() */ rv = icache_inv_pou_checked(addr, size); if (rv != 1) { if (!useracc((void *)addr, size, VM_PROT_READ)) { /* Invalid access */ return (rv); } /* Valid but unmapped page - skip it. */ } len -= size; addr += size; } while (len > 0); /* Invalidate branch predictor buffer. */ bpb_inv_all(); return (1); } -#endif static int arm32_sync_icache(struct thread *td, void *args) { struct arm_sync_icache_args ua; int error; ksiginfo_t ksi; -#if __ARM_ARCH >= 6 vm_offset_t rv; -#endif if ((error = copyin(args, &ua, sizeof(ua))) != 0) return (error); if (ua.len == 0) { td->td_retval[0] = 0; return (0); } /* * Validate arguments. Address and length are unsigned, * so we can use wrapped overflow check. */ if (((ua.addr + ua.len) < ua.addr) || ((ua.addr + ua.len) > VM_MAXUSER_ADDRESS)) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; ksi.ksi_addr = (void *)max(ua.addr, VM_MAXUSER_ADDRESS); trapsignal(td, &ksi); return (EINVAL); } -#if __ARM_ARCH >= 6 rv = sync_icache(ua.addr, ua.len); if (rv != 1) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_MAPERR; ksi.ksi_addr = (void *)rv; trapsignal(td, &ksi); return (EINVAL); } -#else - cpu_icache_sync_range(ua.addr, ua.len); -#endif td->td_retval[0] = 0; return (0); } static int arm32_drain_writebuf(struct thread *td, void *args) { /* No args. */ -#if __ARM_ARCH < 6 - cpu_drain_writebuf(); -#else dsb(); cpu_l2cache_drain_writebuf(); -#endif td->td_retval[0] = 0; return (0); } static int arm32_set_tp(struct thread *td, void *args) { -#if __ARM_ARCH >= 6 set_tls(args); -#else - td->td_md.md_tp = (register_t)args; - *(register_t *)ARM_TP_ADDRESS = (register_t)args; -#endif return (0); } static int arm32_get_tp(struct thread *td, void *args) { -#if __ARM_ARCH >= 6 td->td_retval[0] = (register_t)get_tls(); -#else - td->td_retval[0] = *(register_t *)ARM_TP_ADDRESS; -#endif return (0); } int sysarch(struct thread *td, struct sysarch_args *uap) { int error; #ifdef CAPABILITY_MODE /* * When adding new operations, add a new case statement here to * explicitly indicate whether or not the operation is safe to * perform in capability mode. */ if (IN_CAPABILITY_MODE(td)) { switch (uap->op) { case ARM_SYNC_ICACHE: case ARM_DRAIN_WRITEBUF: case ARM_SET_TP: case ARM_GET_TP: case ARM_GET_VFPSTATE: break; default: #ifdef KTRACE if (KTRPOINT(td, KTR_CAPFAIL)) ktrcapfail(CAPFAIL_SYSCALL, NULL, NULL); #endif return (ECAPMODE); } } #endif switch (uap->op) { case ARM_SYNC_ICACHE: error = arm32_sync_icache(td, uap->parms); break; case ARM_DRAIN_WRITEBUF: error = arm32_drain_writebuf(td, uap->parms); break; case ARM_SET_TP: error = arm32_set_tp(td, uap->parms); break; case ARM_GET_TP: error = arm32_get_tp(td, uap->parms); break; case ARM_GET_VFPSTATE: error = arm_get_vfpstate(td, uap->parms); break; default: error = EINVAL; break; } return (error); } Index: head/sys/arm/arm/vm_machdep.c =================================================================== --- head/sys/arm/arm/vm_machdep.c (revision 368140) +++ head/sys/arm/arm/vm_machdep.c (revision 368141) @@ -1,360 +1,346 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1982, 1986 The Regents of the University of California. * Copyright (c) 1989, 1990 William Jolitz * Copyright (c) 1994 John Dyson * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department, and William Jolitz. * * Redistribution and use in source and binary :forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * struct switchframe and trapframe must both be a multiple of 8 * for correct stack alignment. */ _Static_assert((sizeof(struct switchframe) % 8) == 0, "Bad alignment"); _Static_assert((sizeof(struct trapframe) % 8) == 0, "Bad alignment"); uint32_t initial_fpscr = VFPSCR_DN | VFPSCR_FZ; /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb, set up the stack so that the child * ready to run and return to user mode. */ void cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) { struct pcb *pcb2; struct trapframe *tf; struct mdproc *mdp2; if ((flags & RFPROC) == 0) return; /* Point the pcb to the top of the stack */ pcb2 = (struct pcb *) (td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1; #ifdef VFP /* Store actual state of VFP */ if (curthread == td1) { critical_enter(); vfp_store(&td1->td_pcb->pcb_vfpstate, false); critical_exit(); } #endif td2->td_pcb = pcb2; /* Clone td1's pcb */ bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); /* Point to mdproc and then copy over td1's contents */ mdp2 = &p2->p_md; bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2)); /* Point the frame to the stack in front of pcb and copy td1's frame */ td2->td_frame = (struct trapframe *)pcb2 - 1; *td2->td_frame = *td1->td_frame; /* * Create a new fresh stack for the new process. * Copy the trap frame for the return to user mode as if from a * syscall. This copies most of the user mode register values. */ pmap_set_pcb_pagedir(vmspace_pmap(p2->p_vmspace), pcb2); pcb2->pcb_regs.sf_r4 = (register_t)fork_return; pcb2->pcb_regs.sf_r5 = (register_t)td2; pcb2->pcb_regs.sf_lr = (register_t)fork_trampoline; pcb2->pcb_regs.sf_sp = STACKALIGN(td2->td_frame); -#if __ARM_ARCH >= 6 pcb2->pcb_regs.sf_tpidrurw = (register_t)get_tls(); -#endif pcb2->pcb_vfpcpu = -1; pcb2->pcb_vfpstate.fpscr = initial_fpscr; tf = td2->td_frame; tf->tf_spsr &= ~PSR_C; tf->tf_r0 = 0; tf->tf_r1 = 0; /* Setup to release spin count in fork_exit(). */ td2->td_md.md_spinlock_count = 1; td2->td_md.md_saved_cspr = PSR_SVC32_MODE; -#if __ARM_ARCH < 6 - td2->td_md.md_tp = *(register_t *)ARM_TP_ADDRESS; -#endif } void cpu_thread_swapin(struct thread *td) { } void cpu_thread_swapout(struct thread *td) { } void cpu_set_syscall_retval(struct thread *td, int error) { struct trapframe *frame; int fixup; #ifdef __ARMEB__ u_int call; #endif frame = td->td_frame; fixup = 0; #ifdef __ARMEB__ /* * __syscall returns an off_t while most other syscalls return an * int. As an off_t is 64-bits and an int is 32-bits we need to * place the returned data into r1. As the lseek and freebsd6_lseek * syscalls also return an off_t they do not need this fixup. */ call = frame->tf_r7; if (call == SYS___syscall) { register_t *ap = &frame->tf_r0; register_t code = ap[_QUAD_LOWWORD]; fixup = (code != SYS_lseek); } #endif switch (error) { case 0: if (fixup) { frame->tf_r0 = 0; frame->tf_r1 = td->td_retval[0]; } else { frame->tf_r0 = td->td_retval[0]; frame->tf_r1 = td->td_retval[1]; } frame->tf_spsr &= ~PSR_C; /* carry bit */ break; case ERESTART: /* * Reconstruct the pc to point at the swi. */ #if __ARM_ARCH >= 7 if ((frame->tf_spsr & PSR_T) != 0) frame->tf_pc -= THUMB_INSN_SIZE; else #endif frame->tf_pc -= INSN_SIZE; break; case EJUSTRETURN: /* nothing to do */ break; default: frame->tf_r0 = error; frame->tf_spsr |= PSR_C; /* carry bit */ break; } } /* * Initialize machine state, mostly pcb and trap frame for a new * thread, about to return to userspace. Put enough state in the new * thread's PCB to get it to go back to the fork_return(), which * finalizes the thread state and handles peculiarities of the first * return to userspace for the new thread. */ void cpu_copy_thread(struct thread *td, struct thread *td0) { bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb)); td->td_pcb->pcb_regs.sf_r4 = (register_t)fork_return; td->td_pcb->pcb_regs.sf_r5 = (register_t)td; td->td_pcb->pcb_regs.sf_lr = (register_t)fork_trampoline; td->td_pcb->pcb_regs.sf_sp = STACKALIGN(td->td_frame); td->td_frame->tf_spsr &= ~PSR_C; td->td_frame->tf_r0 = 0; /* Setup to release spin count in fork_exit(). */ td->td_md.md_spinlock_count = 1; td->td_md.md_saved_cspr = PSR_SVC32_MODE; } /* * Set that machine state for performing an upcall that starts * the entry function with the given argument. */ void cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct trapframe *tf = td->td_frame; tf->tf_usr_sp = STACKALIGN((int)stack->ss_sp + stack->ss_size); tf->tf_pc = (int)entry; tf->tf_r0 = (int)arg; tf->tf_spsr = PSR_USR32_MODE; } int cpu_set_user_tls(struct thread *td, void *tls_base) { -#if __ARM_ARCH >= 6 td->td_pcb->pcb_regs.sf_tpidrurw = (register_t)tls_base; if (td == curthread) set_tls(tls_base); -#else - td->td_md.md_tp = (register_t)tls_base; - if (td == curthread) { - critical_enter(); - *(register_t *)ARM_TP_ADDRESS = (register_t)tls_base; - critical_exit(); - } -#endif return (0); } void cpu_thread_exit(struct thread *td) { } void cpu_thread_alloc(struct thread *td) { td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages * PAGE_SIZE) - 1; /* * Ensure td_frame is aligned to an 8 byte boundary as it will be * placed into the stack pointer which must be 8 byte aligned in * the ARM EABI. */ td->td_frame = (struct trapframe *)((caddr_t)td->td_pcb) - 1; } void cpu_thread_free(struct thread *td) { } void cpu_thread_clean(struct thread *td) { } /* * Intercept the return address from a freshly forked process that has NOT * been scheduled yet. * * This is needed to make kernel threads stay in kernel mode. */ void cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg) { td->td_pcb->pcb_regs.sf_r4 = (register_t)func; /* function */ td->td_pcb->pcb_regs.sf_r5 = (register_t)arg; /* first arg */ } /* * Software interrupt handler for queued VM system processing. */ void swi_vm(void *dummy) { if (busdma_swi_pending) busdma_swi(); } void cpu_exit(struct thread *td) { } bool cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused) { return (true); } int cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused, int com __unused, void *data __unused) { return (EINVAL); } Index: head/sys/arm/freescale/imx/imx_common.c =================================================================== --- head/sys/arm/freescale/imx/imx_common.c (revision 368140) +++ head/sys/arm/freescale/imx/imx_common.c (nonexistent) @@ -1,72 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-3-Clause - * - * Copyright (C) 2008-2011 MARVELL INTERNATIONAL LTD. - * Copyright (c) 2012, 2013 The FreeBSD Foundation - * All rights reserved. - * - * Developed by Semihalf. - * - * Portions of this software were developed by Oleksandr Rybalko - * under sponsorship from the FreeBSD Foundation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of MARVELL nor the names of contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include - -#ifndef INTRNG -static int -fdt_intc_decode_ic(phandle_t node, pcell_t *intr, int *interrupt, int *trig, - int *pol) -{ - - *interrupt = fdt32_to_cpu(intr[0]); - *trig = INTR_TRIGGER_CONFORM; - *pol = INTR_POLARITY_CONFORM; - - return (0); -} - -fdt_pic_decode_t fdt_pic_table[] = { - &fdt_intc_decode_ic, - NULL -}; -#endif /* INTRNG */ Property changes on: head/sys/arm/freescale/imx/imx_common.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/freescale/imx/files.imx5 =================================================================== --- head/sys/arm/freescale/imx/files.imx5 (revision 368140) +++ head/sys/arm/freescale/imx/files.imx5 (revision 368141) @@ -1,56 +1,55 @@ # $FreeBSD$ # Init -arm/freescale/imx/imx_common.c standard arm/freescale/imx/imx_machdep.c standard arm/freescale/imx/imx51_machdep.c optional soc_imx51 arm/freescale/imx/imx53_machdep.c optional soc_imx53 # Special serial console for debuging early boot code #arm/freescale/imx/imx_console.c standard # UART driver (includes serial console support) dev/uart/uart_dev_imx.c optional uart # TrustZone Interrupt Controller arm/freescale/imx/tzic.c standard # IOMUX - external pins multiplexor arm/freescale/imx/imx_iomux.c standard # GPIO arm/freescale/imx/imx_gpio.c optional gpio # Generic Periodic Timer arm/freescale/imx/imx_gpt.c standard # Clock Configuration Manager arm/freescale/imx/imx51_ccm.c standard # i.MX5xx PATA controller dev/ata/chipsets/ata-fsl.c optional imxata # SDHCI/MMC dev/sdhci/fsl_sdhci.c optional sdhci # USB OH3 controller (1 OTG, 3 EHCI) arm/freescale/imx/imx_nop_usbphy.c optional ehci dev/usb/controller/ehci_imx.c optional ehci # Watchdog arm/freescale/imx/imx_wdog.c optional imxwdt # i2c arm/freescale/imx/imx_i2c.c optional fsliic # IPU - Image Processing Unit (frame buffer also) arm/freescale/imx/imx51_ipuv3.c optional sc arm/freescale/imx/imx51_ipuv3_fbd.c optional vt dev/vt/hw/fb/vt_early_fb.c optional vt # Fast Ethernet Controller dev/ffec/if_ffec.c optional ffec # SPI arm/freescale/imx/imx_spi.c optional imx_spi Index: head/sys/arm/include/_align.h =================================================================== --- head/sys/arm/include/_align.h (revision 368140) +++ head/sys/arm/include/_align.h (revision 368141) @@ -1,58 +1,54 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2001 David E. O'Brien * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)param.h 5.8 (Berkeley) 6/28/91 * $FreeBSD$ */ #ifndef _ARM_INCLUDE__ALIGN_H_ #define _ARM_INCLUDE__ALIGN_H_ /* * Round p (pointer or byte index) up to the hardware-required alignment which * is sufficient for any data type, pointer or numeric. The resulting type * is equivelent to arm's uintptr_t (but is purposely spelled "unsigned" here). */ -#if __ARM_ARCH >= 6 #define _ALIGNBYTES (sizeof(int) - 1) -#else -#define _ALIGNBYTES (sizeof(long long) - 1) -#endif #define _ALIGN(p) (((unsigned)(p) + _ALIGNBYTES) & ~_ALIGNBYTES) #endif /* !_ARM_INCLUDE__ALIGN_H_ */ Index: head/sys/arm/include/armreg.h =================================================================== --- head/sys/arm/include/armreg.h (revision 368140) +++ head/sys/arm/include/armreg.h (revision 368141) @@ -1,485 +1,458 @@ /* $NetBSD: armreg.h,v 1.37 2007/01/06 00:50:54 christos Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1998, 2001 Ben Harris * Copyright (c) 1994-1996 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef MACHINE_ARMREG_H #define MACHINE_ARMREG_H #ifndef _SYS_CDEFS_H_ #error Please include sys/cdefs.h before including machine/armreg.h #endif #define INSN_SIZE 4 #define INSN_COND_MASK 0xf0000000 /* Condition mask */ #define PSR_MODE 0x0000001f /* mode mask */ #define PSR_USR32_MODE 0x00000010 #define PSR_FIQ32_MODE 0x00000011 #define PSR_IRQ32_MODE 0x00000012 #define PSR_SVC32_MODE 0x00000013 #define PSR_MON32_MODE 0x00000016 #define PSR_ABT32_MODE 0x00000017 #define PSR_HYP32_MODE 0x0000001a #define PSR_UND32_MODE 0x0000001b #define PSR_SYS32_MODE 0x0000001f #define PSR_32_MODE 0x00000010 #define PSR_T 0x00000020 /* Instruction set bit */ #define PSR_F 0x00000040 /* FIQ disable bit */ #define PSR_I 0x00000080 /* IRQ disable bit */ #define PSR_A 0x00000100 /* Imprecise abort bit */ #define PSR_E 0x00000200 /* Data endianess bit */ #define PSR_GE 0x000f0000 /* Greater than or equal to bits */ #define PSR_J 0x01000000 /* Java bit */ #define PSR_Q 0x08000000 /* Sticky overflow bit */ #define PSR_V 0x10000000 /* Overflow bit */ #define PSR_C 0x20000000 /* Carry bit */ #define PSR_Z 0x40000000 /* Zero bit */ #define PSR_N 0x80000000 /* Negative bit */ #define PSR_FLAGS 0xf0000000 /* Flags mask. */ /* The high-order byte is always the implementor */ #define CPU_ID_IMPLEMENTOR_MASK 0xff000000 #define CPU_ID_ARM_LTD 0x41000000 /* 'A' */ #define CPU_ID_DEC 0x44000000 /* 'D' */ #define CPU_ID_MOTOROLA 0x4D000000 /* 'M' */ #define CPU_ID_QUALCOM 0x51000000 /* 'Q' */ #define CPU_ID_TI 0x54000000 /* 'T' */ #define CPU_ID_MARVELL 0x56000000 /* 'V' */ #define CPU_ID_INTEL 0x69000000 /* 'i' */ #define CPU_ID_FARADAY 0x66000000 /* 'f' */ #define CPU_ID_VARIANT_SHIFT 20 #define CPU_ID_VARIANT_MASK 0x00f00000 /* How to decide what format the CPUID is in. */ #define CPU_ID_ISOLD(x) (((x) & 0x0000f000) == 0x00000000) #define CPU_ID_IS7(x) (((x) & 0x0000f000) == 0x00007000) #define CPU_ID_ISNEW(x) (!CPU_ID_ISOLD(x) && !CPU_ID_IS7(x)) /* On recent ARMs this byte holds the architecture and variant (sub-model) */ #define CPU_ID_ARCH_MASK 0x000f0000 #define CPU_ID_ARCH_V3 0x00000000 #define CPU_ID_ARCH_V4 0x00010000 #define CPU_ID_ARCH_V4T 0x00020000 #define CPU_ID_ARCH_V5 0x00030000 #define CPU_ID_ARCH_V5T 0x00040000 #define CPU_ID_ARCH_V5TE 0x00050000 #define CPU_ID_ARCH_V5TEJ 0x00060000 #define CPU_ID_ARCH_V6 0x00070000 #define CPU_ID_CPUID_SCHEME 0x000f0000 /* Next three nybbles are part number */ #define CPU_ID_PARTNO_MASK 0x0000fff0 /* Intel XScale has sub fields in part number */ #define CPU_ID_XSCALE_COREGEN_MASK 0x0000e000 /* core generation */ #define CPU_ID_XSCALE_COREREV_MASK 0x00001c00 /* core revision */ #define CPU_ID_XSCALE_PRODUCT_MASK 0x000003f0 /* product number */ /* And finally, the revision number. */ #define CPU_ID_REVISION_MASK 0x0000000f /* Individual CPUs are probably best IDed by everything but the revision. */ #define CPU_ID_CPU_MASK 0xfffffff0 /* ARM9 and later CPUs */ #define CPU_ID_ARM920T 0x41129200 #define CPU_ID_ARM920T_ALT 0x41009200 #define CPU_ID_ARM922T 0x41029220 #define CPU_ID_ARM926EJS 0x41069260 #define CPU_ID_ARM940T 0x41029400 /* XXX no MMU */ #define CPU_ID_ARM946ES 0x41049460 /* XXX no MMU */ #define CPU_ID_ARM966ES 0x41049660 /* XXX no MMU */ #define CPU_ID_ARM966ESR1 0x41059660 /* XXX no MMU */ #define CPU_ID_ARM1020E 0x4115a200 /* (AKA arm10 rev 1) */ #define CPU_ID_ARM1022ES 0x4105a220 #define CPU_ID_ARM1026EJS 0x4106a260 #define CPU_ID_ARM1136JS 0x4107b360 #define CPU_ID_ARM1136JSR1 0x4117b360 #define CPU_ID_ARM1176JZS 0x410fb760 /* CPUs that follow the CPUID scheme */ #define CPU_ID_SCHEME_MASK \ (CPU_ID_IMPLEMENTOR_MASK | CPU_ID_ARCH_MASK | CPU_ID_PARTNO_MASK) #define CPU_ID_CORTEXA5 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xc050) #define CPU_ID_CORTEXA7 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xc070) #define CPU_ID_CORTEXA8 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xc080) #define CPU_ID_CORTEXA8R1 (CPU_ID_CORTEXA8 | (1 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA8R2 (CPU_ID_CORTEXA8 | (2 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA8R3 (CPU_ID_CORTEXA8 | (3 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA9 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xc090) #define CPU_ID_CORTEXA9R1 (CPU_ID_CORTEXA9 | (1 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA9R2 (CPU_ID_CORTEXA9 | (2 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA9R3 (CPU_ID_CORTEXA9 | (3 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA9R4 (CPU_ID_CORTEXA9 | (4 << CPU_ID_VARIANT_SHIFT)) /* XXX: Cortex-A12 is the old name for this part, it has been renamed the A17 */ #define CPU_ID_CORTEXA12 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xc0d0) #define CPU_ID_CORTEXA12R0 (CPU_ID_CORTEXA12 | (0 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA15 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xc0f0) #define CPU_ID_CORTEXA15R0 (CPU_ID_CORTEXA15 | (0 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA15R1 (CPU_ID_CORTEXA15 | (1 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA15R2 (CPU_ID_CORTEXA15 | (2 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA15R3 (CPU_ID_CORTEXA15 | (3 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA53 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xd030) #define CPU_ID_CORTEXA57 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xd070) #define CPU_ID_CORTEXA72 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xd080) #define CPU_ID_KRAIT300 (CPU_ID_QUALCOM | CPU_ID_CPUID_SCHEME | 0x06f0) /* Snapdragon S4 Pro/APQ8064 */ #define CPU_ID_KRAIT300R0 (CPU_ID_KRAIT300 | (0 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_KRAIT300R1 (CPU_ID_KRAIT300 | (1 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_TI925T 0x54029250 #define CPU_ID_MV88FR131 0x56251310 /* Marvell Feroceon 88FR131 Core */ #define CPU_ID_MV88FR331 0x56153310 /* Marvell Feroceon 88FR331 Core */ #define CPU_ID_MV88FR571_VD 0x56155710 /* Marvell Feroceon 88FR571-VD Core (ID from datasheet) */ /* * LokiPlus core has also ID set to 0x41159260 and this define cause execution of unsupported * L2-cache instructions so need to disable it. 0x41159260 is a generic ARM926E-S ID. */ #ifdef SOC_MV_LOKIPLUS #define CPU_ID_MV88FR571_41 0x00000000 #else #define CPU_ID_MV88FR571_41 0x41159260 /* Marvell Feroceon 88FR571-VD Core (actual ID from CPU reg) */ #endif #define CPU_ID_MV88SV581X_V7 0x561F5810 /* Marvell Sheeva 88SV581x v7 Core */ #define CPU_ID_MV88SV584X_V7 0x562F5840 /* Marvell Sheeva 88SV584x v7 Core */ /* Marvell's CPUIDs with ARM ID in implementor field */ #define CPU_ID_ARM_88SV581X_V7 0x413FC080 /* Marvell Sheeva 88SV581x v7 Core */ #define CPU_ID_FA526 0x66015260 #define CPU_ID_FA626TE 0x66056260 #define CPU_ID_80200 0x69052000 #define CPU_ID_PXA250 0x69052100 /* sans core revision */ #define CPU_ID_PXA210 0x69052120 #define CPU_ID_PXA250A 0x69052100 /* 1st version Core */ #define CPU_ID_PXA210A 0x69052120 /* 1st version Core */ #define CPU_ID_PXA250B 0x69052900 /* 3rd version Core */ #define CPU_ID_PXA210B 0x69052920 /* 3rd version Core */ #define CPU_ID_PXA250C 0x69052d00 /* 4th version Core */ #define CPU_ID_PXA210C 0x69052d20 /* 4th version Core */ #define CPU_ID_PXA27X 0x69054110 #define CPU_ID_80321_400 0x69052420 #define CPU_ID_80321_600 0x69052430 #define CPU_ID_80321_400_B0 0x69052c20 #define CPU_ID_80321_600_B0 0x69052c30 #define CPU_ID_80219_400 0x69052e20 /* A0 stepping/revision. */ #define CPU_ID_80219_600 0x69052e30 /* A0 stepping/revision. */ #define CPU_ID_81342 0x69056810 #define CPU_ID_IXP425 0x690541c0 #define CPU_ID_IXP425_533 0x690541c0 #define CPU_ID_IXP425_400 0x690541d0 #define CPU_ID_IXP425_266 0x690541f0 #define CPU_ID_IXP435 0x69054040 #define CPU_ID_IXP465 0x69054200 /* CPUID registers */ #define ARM_PFR0_ARM_ISA_MASK 0x0000000f #define ARM_PFR0_THUMB_MASK 0x000000f0 #define ARM_PFR0_THUMB 0x10 #define ARM_PFR0_THUMB2 0x30 #define ARM_PFR0_JAZELLE_MASK 0x00000f00 #define ARM_PFR0_THUMBEE_MASK 0x0000f000 #define ARM_PFR1_ARMV4_MASK 0x0000000f #define ARM_PFR1_SEC_EXT_MASK 0x000000f0 #define ARM_PFR1_MICROCTRL_MASK 0x00000f00 /* * Post-ARM3 CP15 registers: * * 1 Control register * * 2 Translation Table Base * * 3 Domain Access Control * * 4 Reserved * * 5 Fault Status * * 6 Fault Address * * 7 Cache/write-buffer Control * * 8 TLB Control * * 9 Cache Lockdown * * 10 TLB Lockdown * * 11 Reserved * * 12 Reserved * * 13 Process ID (for FCSE) * * 14 Reserved * * 15 Implementation Dependent */ /* Some of the definitions below need cleaning up for V3/V4 architectures */ /* CPU control register (CP15 register 1) */ #define CPU_CONTROL_MMU_ENABLE 0x00000001 /* M: MMU/Protection unit enable */ #define CPU_CONTROL_AFLT_ENABLE 0x00000002 /* A: Alignment fault enable */ #define CPU_CONTROL_DC_ENABLE 0x00000004 /* C: IDC/DC enable */ #define CPU_CONTROL_WBUF_ENABLE 0x00000008 /* W: Write buffer enable */ #define CPU_CONTROL_32BP_ENABLE 0x00000010 /* P: 32-bit exception handlers */ #define CPU_CONTROL_32BD_ENABLE 0x00000020 /* D: 32-bit addressing */ #define CPU_CONTROL_LABT_ENABLE 0x00000040 /* L: Late abort enable */ #define CPU_CONTROL_BEND_ENABLE 0x00000080 /* B: Big-endian mode */ #define CPU_CONTROL_SYST_ENABLE 0x00000100 /* S: System protection bit */ #define CPU_CONTROL_ROM_ENABLE 0x00000200 /* R: ROM protection bit */ #define CPU_CONTROL_CPCLK 0x00000400 /* F: Implementation defined */ #define CPU_CONTROL_SW_ENABLE 0x00000400 /* SW: SWP instruction enable */ #define CPU_CONTROL_BPRD_ENABLE 0x00000800 /* Z: Branch prediction enable */ #define CPU_CONTROL_IC_ENABLE 0x00001000 /* I: IC enable */ #define CPU_CONTROL_VECRELOC 0x00002000 /* V: Vector relocation */ #define CPU_CONTROL_ROUNDROBIN 0x00004000 /* RR: Predictable replacement */ #define CPU_CONTROL_V4COMPAT 0x00008000 /* L4: ARMv4 compat LDR R15 etc */ #define CPU_CONTROL_HAF_ENABLE 0x00020000 /* HA: Hardware Access Flag Enable */ #define CPU_CONTROL_FI_ENABLE 0x00200000 /* FI: Low interrupt latency */ #define CPU_CONTROL_UNAL_ENABLE 0x00400000 /* U: unaligned data access */ #define CPU_CONTROL_V6_EXTPAGE 0x00800000 /* XP: ARMv6 extended page tables */ #define CPU_CONTROL_V_ENABLE 0x01000000 /* VE: Interrupt vectors enable */ #define CPU_CONTROL_EX_BEND 0x02000000 /* EE: exception endianness */ #define CPU_CONTROL_L2_ENABLE 0x04000000 /* L2 Cache enabled */ #define CPU_CONTROL_NMFI 0x08000000 /* NMFI: Non maskable FIQ */ #define CPU_CONTROL_TR_ENABLE 0x10000000 /* TRE: TEX Remap*/ #define CPU_CONTROL_AF_ENABLE 0x20000000 /* AFE: Access Flag enable */ #define CPU_CONTROL_TE_ENABLE 0x40000000 /* TE: Thumb Exception enable */ #define CPU_CONTROL_IDC_ENABLE CPU_CONTROL_DC_ENABLE /* ARM11x6 Auxiliary Control Register (CP15 register 1, opcode2 1) */ #define ARM11X6_AUXCTL_RS 0x00000001 /* return stack */ #define ARM11X6_AUXCTL_DB 0x00000002 /* dynamic branch prediction */ #define ARM11X6_AUXCTL_SB 0x00000004 /* static branch prediction */ #define ARM11X6_AUXCTL_TR 0x00000008 /* MicroTLB replacement strat. */ #define ARM11X6_AUXCTL_EX 0x00000010 /* exclusive L1/L2 cache */ #define ARM11X6_AUXCTL_RA 0x00000020 /* clean entire cache disable */ #define ARM11X6_AUXCTL_RV 0x00000040 /* block transfer cache disable */ #define ARM11X6_AUXCTL_CZ 0x00000080 /* restrict cache size */ /* ARM1136 Auxiliary Control Register (CP15 register 1, opcode2 1) */ #define ARM1136_AUXCTL_PFI 0x80000000 /* PFI: partial FI mode. */ /* This is an undocumented flag * used to work around a cache bug * in r0 steppings. See errata * 364296. */ /* ARM1176 Auxiliary Control Register (CP15 register 1, opcode2 1) */ #define ARM1176_AUXCTL_PHD 0x10000000 /* inst. prefetch halting disable */ #define ARM1176_AUXCTL_BFD 0x20000000 /* branch folding disable */ #define ARM1176_AUXCTL_FSD 0x40000000 /* force speculative ops disable */ #define ARM1176_AUXCTL_FIO 0x80000000 /* low intr latency override */ /* XScale Auxillary Control Register (CP15 register 1, opcode2 1) */ #define XSCALE_AUXCTL_K 0x00000001 /* dis. write buffer coalescing */ #define XSCALE_AUXCTL_P 0x00000002 /* ECC protect page table access */ /* Note: XSCale core 3 uses those for LLR DCcahce attributes */ #define XSCALE_AUXCTL_MD_WB_RA 0x00000000 /* mini-D$ wb, read-allocate */ #define XSCALE_AUXCTL_MD_WB_RWA 0x00000010 /* mini-D$ wb, read/write-allocate */ #define XSCALE_AUXCTL_MD_WT 0x00000020 /* mini-D$ wt, read-allocate */ #define XSCALE_AUXCTL_MD_MASK 0x00000030 /* Xscale Core 3 only */ #define XSCALE_AUXCTL_LLR 0x00000400 /* Enable L2 for LLR Cache */ /* Marvell Extra Features Register (CP15 register 1, opcode2 0) */ #define MV_DC_REPLACE_LOCK 0x80000000 /* Replace DCache Lock */ #define MV_DC_STREAM_ENABLE 0x20000000 /* DCache Streaming Switch */ #define MV_WA_ENABLE 0x10000000 /* Enable Write Allocate */ #define MV_L2_PREFETCH_DISABLE 0x01000000 /* L2 Cache Prefetch Disable */ #define MV_L2_INV_EVICT_ERR 0x00800000 /* L2 Invalidates Uncorrectable Error Line Eviction */ #define MV_L2_ENABLE 0x00400000 /* L2 Cache enable */ #define MV_IC_REPLACE_LOCK 0x00080000 /* Replace ICache Lock */ #define MV_BGH_ENABLE 0x00040000 /* Branch Global History Register Enable */ #define MV_BTB_DISABLE 0x00020000 /* Branch Target Buffer Disable */ #define MV_L1_PARERR_ENABLE 0x00010000 /* L1 Parity Error Enable */ /* Cache type register definitions */ #define CPU_CT_ISIZE(x) ((x) & 0xfff) /* I$ info */ #define CPU_CT_DSIZE(x) (((x) >> 12) & 0xfff) /* D$ info */ #define CPU_CT_S (1U << 24) /* split cache */ #define CPU_CT_CTYPE(x) (((x) >> 25) & 0xf) /* cache type */ #define CPU_CT_FORMAT(x) ((x) >> 29) /* Cache type register definitions for ARM v7 */ #define CPU_CT_IMINLINE(x) ((x) & 0xf) /* I$ min line size */ #define CPU_CT_DMINLINE(x) (((x) >> 16) & 0xf) /* D$ min line size */ #define CPU_CT_CTYPE_WT 0 /* write-through */ #define CPU_CT_CTYPE_WB1 1 /* write-back, clean w/ read */ #define CPU_CT_CTYPE_WB2 2 /* w/b, clean w/ cp15,7 */ #define CPU_CT_CTYPE_WB6 6 /* w/b, cp15,7, lockdown fmt A */ #define CPU_CT_CTYPE_WB7 7 /* w/b, cp15,7, lockdown fmt B */ #define CPU_CT_xSIZE_LEN(x) ((x) & 0x3) /* line size */ #define CPU_CT_xSIZE_M (1U << 2) /* multiplier */ #define CPU_CT_xSIZE_ASSOC(x) (((x) >> 3) & 0x7) /* associativity */ #define CPU_CT_xSIZE_SIZE(x) (((x) >> 6) & 0x7) /* size */ #define CPU_CT_ARMV7 0x4 /* ARM v7 Cache type definitions */ #define CPUV7_CT_CTYPE_WT (1U << 31) #define CPUV7_CT_CTYPE_WB (1 << 30) #define CPUV7_CT_CTYPE_RA (1 << 29) #define CPUV7_CT_CTYPE_WA (1 << 28) #define CPUV7_CT_xSIZE_LEN(x) ((x) & 0x7) /* line size */ #define CPUV7_CT_xSIZE_ASSOC(x) (((x) >> 3) & 0x3ff) /* associativity */ #define CPUV7_CT_xSIZE_SET(x) (((x) >> 13) & 0x7fff) /* num sets */ #define CPUV7_L2CTLR_NPROC_SHIFT 24 #define CPUV7_L2CTLR_NPROC(r) ((((r) >> CPUV7_L2CTLR_NPROC_SHIFT) & 3) + 1) #define CPU_CLIDR_CTYPE(reg,x) (((reg) >> ((x) * 3)) & 0x7) #define CPU_CLIDR_LOUIS(reg) (((reg) >> 21) & 0x7) #define CPU_CLIDR_LOC(reg) (((reg) >> 24) & 0x7) #define CPU_CLIDR_LOUU(reg) (((reg) >> 27) & 0x7) #define CACHE_ICACHE 1 #define CACHE_DCACHE 2 #define CACHE_SEP_CACHE 3 #define CACHE_UNI_CACHE 4 /* Fault status register definitions */ #define FAULT_USER 0x10 -#if __ARM_ARCH < 6 -#define FAULT_TYPE_MASK 0x0f -#define FAULT_WRTBUF_0 0x00 /* Vector Exception */ -#define FAULT_WRTBUF_1 0x02 /* Terminal Exception */ -#define FAULT_BUSERR_0 0x04 /* External Abort on Linefetch -- Section */ -#define FAULT_BUSERR_1 0x06 /* External Abort on Linefetch -- Page */ -#define FAULT_BUSERR_2 0x08 /* External Abort on Non-linefetch -- Section */ -#define FAULT_BUSERR_3 0x0a /* External Abort on Non-linefetch -- Page */ -#define FAULT_BUSTRNL1 0x0c /* External abort on Translation -- Level 1 */ -#define FAULT_BUSTRNL2 0x0e /* External abort on Translation -- Level 2 */ -#define FAULT_ALIGN_0 0x01 /* Alignment */ -#define FAULT_ALIGN_1 0x03 /* Alignment */ -#define FAULT_TRANS_S 0x05 /* Translation -- Section */ -#define FAULT_TRANS_F 0x06 /* Translation -- Flag */ -#define FAULT_TRANS_P 0x07 /* Translation -- Page */ -#define FAULT_DOMAIN_S 0x09 /* Domain -- Section */ -#define FAULT_DOMAIN_P 0x0b /* Domain -- Page */ -#define FAULT_PERM_S 0x0d /* Permission -- Section */ -#define FAULT_PERM_P 0x0f /* Permission -- Page */ - -#define FAULT_IMPRECISE 0x400 /* Imprecise exception (XSCALE) */ -#define FAULT_EXTERNAL 0x400 /* External abort (armv6+) */ -#define FAULT_WNR 0x800 /* Write-not-Read access (armv6+) */ - -#else /* __ARM_ARCH < 6 */ - #define FAULT_ALIGN 0x001 /* Alignment Fault */ #define FAULT_DEBUG 0x002 /* Debug Event */ #define FAULT_ACCESS_L1 0x003 /* Access Bit (L1) */ #define FAULT_ICACHE 0x004 /* Instruction cache maintenance */ #define FAULT_TRAN_L1 0x005 /* Translation Fault (L1) */ #define FAULT_ACCESS_L2 0x006 /* Access Bit (L2) */ #define FAULT_TRAN_L2 0x007 /* Translation Fault (L2) */ #define FAULT_EA_PREC 0x008 /* External Abort */ #define FAULT_DOMAIN_L1 0x009 /* Domain Fault (L1) */ #define FAULT_DOMAIN_L2 0x00B /* Domain Fault (L2) */ #define FAULT_EA_TRAN_L1 0x00C /* External Translation Abort (L1) */ #define FAULT_PERM_L1 0x00D /* Permission Fault (L1) */ #define FAULT_EA_TRAN_L2 0x00E /* External Translation Abort (L2) */ #define FAULT_PERM_L2 0x00F /* Permission Fault (L2) */ #define FAULT_TLB_CONFLICT 0x010 /* TLB Conflict Abort */ #define FAULT_EA_IMPREC 0x016 /* Asynchronous External Abort */ #define FAULT_PE_IMPREC 0x018 /* Asynchronous Parity Error */ #define FAULT_PARITY 0x019 /* Parity Error */ #define FAULT_PE_TRAN_L1 0x01C /* Parity Error on Translation (L1) */ #define FAULT_PE_TRAN_L2 0x01E /* Parity Error on Translation (L2) */ #define FSR_TO_FAULT(fsr) (((fsr) & 0xF) | \ ((((fsr) & (1 << 10)) >> (10 - 4)))) #define FSR_LPAE (1 << 9) /* LPAE indicator */ #define FSR_WNR (1 << 11) /* Write-not-Read access */ #define FSR_EXT (1 << 12) /* DECERR/SLVERR for external*/ #define FSR_CM (1 << 13) /* Cache maintenance fault */ -#endif /* !__ARM_ARCH < 6 */ /* * Address of the vector page, low and high versions. */ #ifndef __ASSEMBLER__ #define ARM_VECTORS_LOW 0x00000000U #define ARM_VECTORS_HIGH 0xffff0000U #else #define ARM_VECTORS_LOW 0 #define ARM_VECTORS_HIGH 0xffff0000 #endif /* * ARM Instructions * * 3 3 2 2 2 * 1 0 9 8 7 0 * +-------+-------------------------------------------------------+ * | cond | instruction dependant | * |c c c c| | * +-------+-------------------------------------------------------+ */ #define INSN_SIZE 4 /* Always 4 bytes */ #define INSN_COND_MASK 0xf0000000 /* Condition mask */ #define INSN_COND_AL 0xe0000000 /* Always condition */ /* ARM register defines */ #define ARM_REG_SIZE 4 #define ARM_REG_NUM_PC 15 #define ARM_REG_NUM_LR 14 #define ARM_REG_NUM_SP 13 #define THUMB_INSN_SIZE 2 /* Some are 4 bytes. */ /* ARM Hypervisor Related Defines */ #define ARM_CP15_HDCR_HPMN 0x0000001f #endif /* !MACHINE_ARMREG_H */ Index: head/sys/arm/include/asmacros.h =================================================================== --- head/sys/arm/include/asmacros.h (revision 368140) +++ head/sys/arm/include/asmacros.h (revision 368141) @@ -1,65 +1,59 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2012 Olivier Houchard * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_ASMACROS_H_ #define _MACHINE_ASMACROS_H_ #include #ifdef _KERNEL #ifdef LOCORE -#if __ARM_ARCH >= 6 #define GET_CURTHREAD_PTR(tmp) \ mrc p15, 0, tmp, c13, c0, 4 -#else -#define GET_CURTHREAD_PTR(tmp) \ - ldr tmp, =_C_LABEL(__pcpu);\ - ldr tmp, [tmp, #PC_CURTHREAD] -#endif #define ELFNOTE(section, type, vendor, desctype, descdata...) \ .pushsection section ; \ .balign 4 ; \ .long 2f - 1f /* namesz */ ; \ .long 4f - 3f /* descsz */ ; \ .long type /* type */ ; \ 1: .asciz vendor /* vendor name */ ; \ 2: .balign 4 ; \ 3: desctype descdata /* node */ ; \ 4: .balign 4 ; \ .popsection #endif /* LOCORE */ #endif /* _KERNEL */ #endif /* !_MACHINE_ASMACROS_H_ */ Index: head/sys/arm/include/atomic-v6.h =================================================================== --- head/sys/arm/include/atomic-v6.h (revision 368140) +++ head/sys/arm/include/atomic-v6.h (revision 368141) @@ -1,1035 +1,1033 @@ /* $NetBSD: atomic.h,v 1.1 2002/10/19 12:22:34 bsh Exp $ */ /*- * Copyright (C) 2003-2004 Olivier Houchard * Copyright (C) 1994-1997 Mark Brinicombe * Copyright (C) 1994 Brini * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of Brini may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_ATOMIC_V6_H_ #define _MACHINE_ATOMIC_V6_H_ #ifndef _MACHINE_ATOMIC_H_ #error Do not include this file directly, use #endif #if __ARM_ARCH >= 7 #define isb() __asm __volatile("isb" : : : "memory") #define dsb() __asm __volatile("dsb" : : : "memory") #define dmb() __asm __volatile("dmb" : : : "memory") -#elif __ARM_ARCH >= 6 +#else #define isb() __asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : "memory") #define dsb() __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "memory") #define dmb() __asm __volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory") -#else -#error Only use this file with ARMv6 and later #endif #define mb() dmb() #define wmb() dmb() #define rmb() dmb() #define ARM_HAVE_ATOMIC64 #define ATOMIC_ACQ_REL_LONG(NAME) \ static __inline void \ atomic_##NAME##_acq_long(__volatile u_long *p, u_long v) \ { \ atomic_##NAME##_long(p, v); \ dmb(); \ } \ \ static __inline void \ atomic_##NAME##_rel_long(__volatile u_long *p, u_long v) \ { \ dmb(); \ atomic_##NAME##_long(p, v); \ } #define ATOMIC_ACQ_REL(NAME, WIDTH) \ static __inline void \ atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\ { \ atomic_##NAME##_##WIDTH(p, v); \ dmb(); \ } \ \ static __inline void \ atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\ { \ dmb(); \ atomic_##NAME##_##WIDTH(p, v); \ } static __inline void atomic_add_32(volatile uint32_t *p, uint32_t val) { uint32_t tmp = 0, tmp2 = 0; __asm __volatile( "1: ldrex %0, [%2] \n" " add %0, %0, %3 \n" " strex %1, %0, [%2] \n" " cmp %1, #0 \n" " it ne \n" " bne 1b \n" : "=&r" (tmp), "+r" (tmp2) ,"+r" (p), "+r" (val) : : "cc", "memory"); } static __inline void atomic_add_64(volatile uint64_t *p, uint64_t val) { uint64_t tmp; uint32_t exflag; __asm __volatile( "1: \n" " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" " adds %Q[tmp], %Q[val] \n" " adc %R[tmp], %R[tmp], %R[val] \n" " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n" " teq %[exf], #0 \n" " it ne \n" " bne 1b \n" : [exf] "=&r" (exflag), [tmp] "=&r" (tmp) : [ptr] "r" (p), [val] "r" (val) : "cc", "memory"); } static __inline void atomic_add_long(volatile u_long *p, u_long val) { atomic_add_32((volatile uint32_t *)p, val); } ATOMIC_ACQ_REL(add, 32) ATOMIC_ACQ_REL(add, 64) ATOMIC_ACQ_REL_LONG(add) static __inline void atomic_clear_32(volatile uint32_t *address, uint32_t setmask) { uint32_t tmp = 0, tmp2 = 0; __asm __volatile( "1: ldrex %0, [%2] \n" " bic %0, %0, %3 \n" " strex %1, %0, [%2] \n" " cmp %1, #0 \n" " it ne \n" " bne 1b \n" : "=&r" (tmp), "+r" (tmp2), "+r" (address), "+r" (setmask) : : "cc", "memory"); } static __inline void atomic_clear_64(volatile uint64_t *p, uint64_t val) { uint64_t tmp; uint32_t exflag; __asm __volatile( "1: \n" " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" " bic %Q[tmp], %Q[val] \n" " bic %R[tmp], %R[val] \n" " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n" " teq %[exf], #0 \n" " it ne \n" " bne 1b \n" : [exf] "=&r" (exflag), [tmp] "=&r" (tmp) : [ptr] "r" (p), [val] "r" (val) : "cc", "memory"); } static __inline void atomic_clear_long(volatile u_long *address, u_long setmask) { atomic_clear_32((volatile uint32_t *)address, setmask); } ATOMIC_ACQ_REL(clear, 32) ATOMIC_ACQ_REL(clear, 64) ATOMIC_ACQ_REL_LONG(clear) #define ATOMIC_FCMPSET_CODE(RET, TYPE, SUF) \ { \ TYPE tmp; \ \ __asm __volatile( \ "1: ldrex" SUF " %[tmp], [%[ptr]] \n" \ " ldr" SUF " %[ret], [%[oldv]] \n" \ " teq %[tmp], %[ret] \n" \ " ittee ne \n" \ " str" SUF "ne %[tmp], [%[oldv]] \n" \ " movne %[ret], #0 \n" \ " strex" SUF "eq %[ret], %[newv], [%[ptr]] \n" \ " eorseq %[ret], #1 \n" \ " beq 1b \n" \ : [ret] "=&r" (RET), \ [tmp] "=&r" (tmp) \ : [ptr] "r" (_ptr), \ [oldv] "r" (_old), \ [newv] "r" (_new) \ : "cc", "memory"); \ } #define ATOMIC_FCMPSET_CODE64(RET) \ { \ uint64_t cmp, tmp; \ \ __asm __volatile( \ "1: ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" \ " ldrd %Q[cmp], %R[cmp], [%[oldv]] \n" \ " teq %Q[tmp], %Q[cmp] \n" \ " it eq \n" \ " teqeq %R[tmp], %R[cmp] \n" \ " ittee ne \n" \ " movne %[ret], #0 \n" \ " strdne %[cmp], [%[oldv]] \n" \ " strexdeq %[ret], %Q[newv], %R[newv], [%[ptr]] \n" \ " eorseq %[ret], #1 \n" \ " beq 1b \n" \ : [ret] "=&r" (RET), \ [cmp] "=&r" (cmp), \ [tmp] "=&r" (tmp) \ : [ptr] "r" (_ptr), \ [oldv] "r" (_old), \ [newv] "r" (_new) \ : "cc", "memory"); \ } static __inline int atomic_fcmpset_8(volatile uint8_t *_ptr, uint8_t *_old, uint8_t _new) { int ret; ATOMIC_FCMPSET_CODE(ret, uint8_t, "b"); return (ret); } #define atomic_fcmpset_8 atomic_fcmpset_8 static __inline int atomic_fcmpset_acq_8(volatile uint8_t *_ptr, uint8_t *_old, uint8_t _new) { int ret; ATOMIC_FCMPSET_CODE(ret, uint8_t, "b"); dmb(); return (ret); } static __inline int atomic_fcmpset_rel_8(volatile uint8_t *_ptr, uint8_t *_old, uint8_t _new) { int ret; dmb(); ATOMIC_FCMPSET_CODE(ret, uint8_t, "b"); return (ret); } static __inline int atomic_fcmpset_16(volatile uint16_t *_ptr, uint16_t *_old, uint16_t _new) { int ret; ATOMIC_FCMPSET_CODE(ret, uint16_t, "h"); return (ret); } #define atomic_fcmpset_16 atomic_fcmpset_16 static __inline int atomic_fcmpset_acq_16(volatile uint16_t *_ptr, uint16_t *_old, uint16_t _new) { int ret; ATOMIC_FCMPSET_CODE(ret, uint16_t, "h"); dmb(); return (ret); } static __inline int atomic_fcmpset_rel_16(volatile uint16_t *_ptr, uint16_t *_old, uint16_t _new) { int ret; dmb(); ATOMIC_FCMPSET_CODE(ret, uint16_t, "h"); return (ret); } static __inline int atomic_fcmpset_32(volatile uint32_t *_ptr, uint32_t *_old, uint32_t _new) { int ret; ATOMIC_FCMPSET_CODE(ret, uint32_t, ""); return (ret); } static __inline int atomic_fcmpset_acq_32(volatile uint32_t *_ptr, uint32_t *_old, uint32_t _new) { int ret; ATOMIC_FCMPSET_CODE(ret, uint32_t, ""); dmb(); return (ret); } static __inline int atomic_fcmpset_rel_32(volatile uint32_t *_ptr, uint32_t *_old, uint32_t _new) { int ret; dmb(); ATOMIC_FCMPSET_CODE(ret, uint32_t, ""); return (ret); } static __inline int atomic_fcmpset_long(volatile u_long *_ptr, u_long *_old, u_long _new) { int ret; ATOMIC_FCMPSET_CODE(ret, u_long, ""); return (ret); } static __inline int atomic_fcmpset_acq_long(volatile u_long *_ptr, u_long *_old, u_long _new) { int ret; ATOMIC_FCMPSET_CODE(ret, u_long, ""); dmb(); return (ret); } static __inline int atomic_fcmpset_rel_long(volatile u_long *_ptr, u_long *_old, u_long _new) { int ret; dmb(); ATOMIC_FCMPSET_CODE(ret, u_long, ""); return (ret); } static __inline int atomic_fcmpset_64(volatile uint64_t *_ptr, uint64_t *_old, uint64_t _new) { int ret; ATOMIC_FCMPSET_CODE64(ret); return (ret); } static __inline int atomic_fcmpset_acq_64(volatile uint64_t *_ptr, uint64_t *_old, uint64_t _new) { int ret; ATOMIC_FCMPSET_CODE64(ret); dmb(); return (ret); } static __inline int atomic_fcmpset_rel_64(volatile uint64_t *_ptr, uint64_t *_old, uint64_t _new) { int ret; dmb(); ATOMIC_FCMPSET_CODE64(ret); return (ret); } #define ATOMIC_CMPSET_CODE(RET, SUF) \ { \ __asm __volatile( \ "1: ldrex" SUF " %[ret], [%[ptr]] \n" \ " teq %[ret], %[oldv] \n" \ " itee ne \n" \ " movne %[ret], #0 \n" \ " strex" SUF "eq %[ret], %[newv], [%[ptr]] \n" \ " eorseq %[ret], #1 \n" \ " beq 1b \n" \ : [ret] "=&r" (RET) \ : [ptr] "r" (_ptr), \ [oldv] "r" (_old), \ [newv] "r" (_new) \ : "cc", "memory"); \ } #define ATOMIC_CMPSET_CODE64(RET) \ { \ uint64_t tmp; \ \ __asm __volatile( \ "1: ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" \ " teq %Q[tmp], %Q[oldv] \n" \ " it eq \n" \ " teqeq %R[tmp], %R[oldv] \n" \ " itee ne \n" \ " movne %[ret], #0 \n" \ " strexdeq %[ret], %Q[newv], %R[newv], [%[ptr]] \n" \ " eorseq %[ret], #1 \n" \ " beq 1b \n" \ : [ret] "=&r" (RET), \ [tmp] "=&r" (tmp) \ : [ptr] "r" (_ptr), \ [oldv] "r" (_old), \ [newv] "r" (_new) \ : "cc", "memory"); \ } static __inline int atomic_cmpset_8(volatile uint8_t *_ptr, uint8_t _old, uint8_t _new) { int ret; ATOMIC_CMPSET_CODE(ret, "b"); return (ret); } #define atomic_cmpset_8 atomic_cmpset_8 static __inline int atomic_cmpset_acq_8(volatile uint8_t *_ptr, uint8_t _old, uint8_t _new) { int ret; ATOMIC_CMPSET_CODE(ret, "b"); dmb(); return (ret); } static __inline int atomic_cmpset_rel_8(volatile uint8_t *_ptr, uint8_t _old, uint8_t _new) { int ret; dmb(); ATOMIC_CMPSET_CODE(ret, "b"); return (ret); } static __inline int atomic_cmpset_16(volatile uint16_t *_ptr, uint16_t _old, uint16_t _new) { int ret; ATOMIC_CMPSET_CODE(ret, "h"); return (ret); } #define atomic_cmpset_16 atomic_cmpset_16 static __inline int atomic_cmpset_acq_16(volatile uint16_t *_ptr, uint16_t _old, uint16_t _new) { int ret; ATOMIC_CMPSET_CODE(ret, "h"); dmb(); return (ret); } static __inline int atomic_cmpset_rel_16(volatile uint16_t *_ptr, uint16_t _old, uint16_t _new) { int ret; dmb(); ATOMIC_CMPSET_CODE(ret, "h"); return (ret); } static __inline int atomic_cmpset_32(volatile uint32_t *_ptr, uint32_t _old, uint32_t _new) { int ret; ATOMIC_CMPSET_CODE(ret, ""); return (ret); } static __inline int atomic_cmpset_acq_32(volatile uint32_t *_ptr, uint32_t _old, uint32_t _new) { int ret; ATOMIC_CMPSET_CODE(ret, ""); dmb(); return (ret); } static __inline int atomic_cmpset_rel_32(volatile uint32_t *_ptr, uint32_t _old, uint32_t _new) { int ret; dmb(); ATOMIC_CMPSET_CODE(ret, ""); return (ret); } static __inline int atomic_cmpset_long(volatile u_long *_ptr, u_long _old, u_long _new) { int ret; ATOMIC_CMPSET_CODE(ret, ""); return (ret); } static __inline int atomic_cmpset_acq_long(volatile u_long *_ptr, u_long _old, u_long _new) { int ret; ATOMIC_CMPSET_CODE(ret, ""); dmb(); return (ret); } static __inline int atomic_cmpset_rel_long(volatile u_long *_ptr, u_long _old, u_long _new) { int ret; dmb(); ATOMIC_CMPSET_CODE(ret, ""); return (ret); } static __inline int atomic_cmpset_64(volatile uint64_t *_ptr, uint64_t _old, uint64_t _new) { int ret; ATOMIC_CMPSET_CODE64(ret); return (ret); } static __inline int atomic_cmpset_acq_64(volatile uint64_t *_ptr, uint64_t _old, uint64_t _new) { int ret; ATOMIC_CMPSET_CODE64(ret); dmb(); return (ret); } static __inline int atomic_cmpset_rel_64(volatile uint64_t *_ptr, uint64_t _old, uint64_t _new) { int ret; dmb(); ATOMIC_CMPSET_CODE64(ret); return (ret); } static __inline uint32_t atomic_fetchadd_32(volatile uint32_t *p, uint32_t val) { uint32_t tmp = 0, tmp2 = 0, ret = 0; __asm __volatile( "1: ldrex %0, [%3] \n" " add %1, %0, %4 \n" " strex %2, %1, [%3] \n" " cmp %2, #0 \n" " it ne \n" " bne 1b \n" : "+r" (ret), "=&r" (tmp), "+r" (tmp2), "+r" (p), "+r" (val) : : "cc", "memory"); return (ret); } static __inline uint64_t atomic_fetchadd_64(volatile uint64_t *p, uint64_t val) { uint64_t ret, tmp; uint32_t exflag; __asm __volatile( "1: \n" " ldrexd %Q[ret], %R[ret], [%[ptr]] \n" " adds %Q[tmp], %Q[ret], %Q[val] \n" " adc %R[tmp], %R[ret], %R[val] \n" " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n" " teq %[exf], #0 \n" " it ne \n" " bne 1b \n" : [ret] "=&r" (ret), [exf] "=&r" (exflag), [tmp] "=&r" (tmp) : [ptr] "r" (p), [val] "r" (val) : "cc", "memory"); return (ret); } static __inline u_long atomic_fetchadd_long(volatile u_long *p, u_long val) { return (atomic_fetchadd_32((volatile uint32_t *)p, val)); } static __inline uint32_t atomic_load_acq_32(volatile uint32_t *p) { uint32_t v; v = *p; dmb(); return (v); } static __inline uint64_t atomic_load_64(volatile uint64_t *p) { uint64_t ret; /* * The only way to atomically load 64 bits is with LDREXD which puts the * exclusive monitor into the exclusive state, so reset it to open state * with CLREX because we don't actually need to store anything. */ __asm __volatile( "ldrexd %Q[ret], %R[ret], [%[ptr]] \n" "clrex \n" : [ret] "=&r" (ret) : [ptr] "r" (p) : "cc", "memory"); return (ret); } static __inline uint64_t atomic_load_acq_64(volatile uint64_t *p) { uint64_t ret; ret = atomic_load_64(p); dmb(); return (ret); } static __inline u_long atomic_load_acq_long(volatile u_long *p) { u_long v; v = *p; dmb(); return (v); } static __inline uint32_t atomic_readandclear_32(volatile uint32_t *p) { uint32_t ret, tmp = 0, tmp2 = 0; __asm __volatile( "1: ldrex %0, [%3] \n" " mov %1, #0 \n" " strex %2, %1, [%3] \n" " cmp %2, #0 \n" " it ne \n" " bne 1b \n" : "=r" (ret), "=&r" (tmp), "+r" (tmp2), "+r" (p) : : "cc", "memory"); return (ret); } static __inline uint64_t atomic_readandclear_64(volatile uint64_t *p) { uint64_t ret, tmp; uint32_t exflag; __asm __volatile( "1: \n" " ldrexd %Q[ret], %R[ret], [%[ptr]] \n" " mov %Q[tmp], #0 \n" " mov %R[tmp], #0 \n" " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n" " teq %[exf], #0 \n" " it ne \n" " bne 1b \n" : [ret] "=&r" (ret), [exf] "=&r" (exflag), [tmp] "=&r" (tmp) : [ptr] "r" (p) : "cc", "memory"); return (ret); } static __inline u_long atomic_readandclear_long(volatile u_long *p) { return (atomic_readandclear_32((volatile uint32_t *)p)); } static __inline void atomic_set_32(volatile uint32_t *address, uint32_t setmask) { uint32_t tmp = 0, tmp2 = 0; __asm __volatile( "1: ldrex %0, [%2] \n" " orr %0, %0, %3 \n" " strex %1, %0, [%2] \n" " cmp %1, #0 \n" " it ne \n" " bne 1b \n" : "=&r" (tmp), "+r" (tmp2), "+r" (address), "+r" (setmask) : : "cc", "memory"); } static __inline void atomic_set_64(volatile uint64_t *p, uint64_t val) { uint64_t tmp; uint32_t exflag; __asm __volatile( "1: \n" " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" " orr %Q[tmp], %Q[val] \n" " orr %R[tmp], %R[val] \n" " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n" " teq %[exf], #0 \n" " it ne \n" " bne 1b \n" : [exf] "=&r" (exflag), [tmp] "=&r" (tmp) : [ptr] "r" (p), [val] "r" (val) : "cc", "memory"); } static __inline void atomic_set_long(volatile u_long *address, u_long setmask) { atomic_set_32((volatile uint32_t *)address, setmask); } ATOMIC_ACQ_REL(set, 32) ATOMIC_ACQ_REL(set, 64) ATOMIC_ACQ_REL_LONG(set) static __inline void atomic_subtract_32(volatile uint32_t *p, uint32_t val) { uint32_t tmp = 0, tmp2 = 0; __asm __volatile( "1: ldrex %0, [%2] \n" " sub %0, %0, %3 \n" " strex %1, %0, [%2] \n" " cmp %1, #0 \n" " it ne \n" " bne 1b \n" : "=&r" (tmp), "+r" (tmp2), "+r" (p), "+r" (val) : : "cc", "memory"); } static __inline void atomic_subtract_64(volatile uint64_t *p, uint64_t val) { uint64_t tmp; uint32_t exflag; __asm __volatile( "1: \n" " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" " subs %Q[tmp], %Q[val] \n" " sbc %R[tmp], %R[tmp], %R[val] \n" " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n" " teq %[exf], #0 \n" " it ne \n" " bne 1b \n" : [exf] "=&r" (exflag), [tmp] "=&r" (tmp) : [ptr] "r" (p), [val] "r" (val) : "cc", "memory"); } static __inline void atomic_subtract_long(volatile u_long *p, u_long val) { atomic_subtract_32((volatile uint32_t *)p, val); } ATOMIC_ACQ_REL(subtract, 32) ATOMIC_ACQ_REL(subtract, 64) ATOMIC_ACQ_REL_LONG(subtract) static __inline void atomic_store_64(volatile uint64_t *p, uint64_t val) { uint64_t tmp; uint32_t exflag; /* * The only way to atomically store 64 bits is with STREXD, which will * succeed only if paired up with a preceeding LDREXD using the same * address, so we read and discard the existing value before storing. */ __asm __volatile( "1: \n" " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" " strexd %[exf], %Q[val], %R[val], [%[ptr]] \n" " teq %[exf], #0 \n" " it ne \n" " bne 1b \n" : [tmp] "=&r" (tmp), [exf] "=&r" (exflag) : [ptr] "r" (p), [val] "r" (val) : "cc", "memory"); } static __inline void atomic_store_rel_32(volatile uint32_t *p, uint32_t v) { dmb(); *p = v; } static __inline void atomic_store_rel_64(volatile uint64_t *p, uint64_t val) { dmb(); atomic_store_64(p, val); } static __inline void atomic_store_rel_long(volatile u_long *p, u_long v) { dmb(); *p = v; } static __inline int atomic_testandclear_32(volatile uint32_t *ptr, u_int bit) { int newv, oldv, result; __asm __volatile( " mov ip, #1 \n" " lsl ip, ip, %[bit] \n" /* Done with %[bit] as input, reuse below as output. */ "1: \n" " ldrex %[oldv], [%[ptr]] \n" " bic %[newv], %[oldv], ip \n" " strex %[bit], %[newv], [%[ptr]] \n" " teq %[bit], #0 \n" " it ne \n" " bne 1b \n" " ands %[bit], %[oldv], ip \n" " it ne \n" " movne %[bit], #1 \n" : [bit] "=&r" (result), [oldv] "=&r" (oldv), [newv] "=&r" (newv) : [ptr] "r" (ptr), "[bit]" (bit) : "cc", "ip", "memory"); return (result); } static __inline int atomic_testandclear_int(volatile u_int *p, u_int v) { return (atomic_testandclear_32((volatile uint32_t *)p, v)); } static __inline int atomic_testandclear_long(volatile u_long *p, u_int v) { return (atomic_testandclear_32((volatile uint32_t *)p, v)); } #define atomic_testandclear_long atomic_testandclear_long static __inline int atomic_testandset_32(volatile uint32_t *ptr, u_int bit) { int newv, oldv, result; __asm __volatile( " mov ip, #1 \n" " lsl ip, ip, %[bit] \n" /* Done with %[bit] as input, reuse below as output. */ "1: \n" " ldrex %[oldv], [%[ptr]] \n" " orr %[newv], %[oldv], ip \n" " strex %[bit], %[newv], [%[ptr]] \n" " teq %[bit], #0 \n" " it ne \n" " bne 1b \n" " ands %[bit], %[oldv], ip \n" " it ne \n" " movne %[bit], #1 \n" : [bit] "=&r" (result), [oldv] "=&r" (oldv), [newv] "=&r" (newv) : [ptr] "r" (ptr), "[bit]" (bit) : "cc", "ip", "memory"); return (result); } static __inline int atomic_testandset_int(volatile u_int *p, u_int v) { return (atomic_testandset_32((volatile uint32_t *)p, v)); } static __inline int atomic_testandset_long(volatile u_long *p, u_int v) { return (atomic_testandset_32((volatile uint32_t *)p, v)); } #define atomic_testandset_long atomic_testandset_long static __inline int atomic_testandset_64(volatile uint64_t *p, u_int v) { volatile uint32_t *p32; p32 = (volatile uint32_t *)p; /* Assume little-endian */ if (v >= 32) { v &= 0x1f; p32++; } return (atomic_testandset_32(p32, v)); } static __inline uint32_t atomic_swap_32(volatile uint32_t *p, uint32_t v) { uint32_t ret, exflag; __asm __volatile( "1: ldrex %[ret], [%[ptr]] \n" " strex %[exf], %[val], [%[ptr]] \n" " teq %[exf], #0 \n" " it ne \n" " bne 1b \n" : [ret] "=&r" (ret), [exf] "=&r" (exflag) : [val] "r" (v), [ptr] "r" (p) : "cc", "memory"); return (ret); } static __inline uint64_t atomic_swap_64(volatile uint64_t *p, uint64_t v) { uint64_t ret; uint32_t exflag; __asm __volatile( "1: ldrexd %Q[ret], %R[ret], [%[ptr]] \n" " strexd %[exf], %Q[val], %R[val], [%[ptr]] \n" " teq %[exf], #0 \n" " it ne \n" " bne 1b \n" : [ret] "=&r" (ret), [exf] "=&r" (exflag) : [val] "r" (v), [ptr] "r" (p) : "cc", "memory"); return (ret); } #undef ATOMIC_ACQ_REL #undef ATOMIC_ACQ_REL_LONG static __inline void atomic_thread_fence_acq(void) { dmb(); } static __inline void atomic_thread_fence_rel(void) { dmb(); } static __inline void atomic_thread_fence_acq_rel(void) { dmb(); } static __inline void atomic_thread_fence_seq_cst(void) { dmb(); } #endif /* _MACHINE_ATOMIC_V6_H_ */ Index: head/sys/arm/include/bus.h =================================================================== --- head/sys/arm/include/bus.h (revision 368140) +++ head/sys/arm/include/bus.h (revision 368141) @@ -1,788 +1,784 @@ /* $NetBSD: bus.h,v 1.11 2003/07/28 17:35:54 thorpej Exp $ */ /*- * Copyright (c) 1996, 1997, 1998, 2001 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, * NASA Ames Research Center. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1996 Charles M. Hannum. All rights reserved. * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Christopher G. Demetriou * for the NetBSD Project. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_BUS_H_ #define _MACHINE_BUS_H_ #include /* * int bus_space_map (bus_space_tag_t t, bus_addr_t addr, * bus_size_t size, int flags, bus_space_handle_t *bshp); * * Map a region of bus space. */ #define BUS_SPACE_MAP_CACHEABLE 0x01 #define BUS_SPACE_MAP_LINEAR 0x02 #define BUS_SPACE_MAP_PREFETCHABLE 0x04 /* * Bus space for ARM. * * The functions used most often are grouped together at the beginning to ensure * that all the data fits into a single cache line. The inline implementations * of single read/write access these values a lot. */ struct bus_space { /* Read/write single and barrier: the most commonly used functions. */ uint8_t (*bs_r_1)(bus_space_tag_t, bus_space_handle_t, bus_size_t); uint32_t (*bs_r_4)(bus_space_tag_t, bus_space_handle_t, bus_size_t); void (*bs_w_1)(bus_space_tag_t, bus_space_handle_t, bus_size_t, uint8_t); void (*bs_w_4)(bus_space_tag_t, bus_space_handle_t, bus_size_t, uint32_t); void (*bs_barrier)(bus_space_tag_t, bus_space_handle_t, bus_size_t, bus_size_t, int); /* Backlink to parent (if copied), and implementation private data. */ struct bus_space *bs_parent; void *bs_privdata; /* mapping/unmapping */ int (*bs_map) (bus_space_tag_t, bus_addr_t, bus_size_t, int, bus_space_handle_t *); void (*bs_unmap) (bus_space_tag_t, bus_space_handle_t, bus_size_t); int (*bs_subregion) (bus_space_tag_t, bus_space_handle_t, bus_size_t, bus_size_t, bus_space_handle_t *); /* allocation/deallocation */ int (*bs_alloc) (bus_space_tag_t, bus_addr_t, bus_addr_t, bus_size_t, bus_size_t, bus_size_t, int, bus_addr_t *, bus_space_handle_t *); void (*bs_free) (bus_space_tag_t, bus_space_handle_t, bus_size_t); /* Read single, the less commonly used functions. */ uint16_t (*bs_r_2) (bus_space_tag_t, bus_space_handle_t, bus_size_t); uint64_t (*bs_r_8) (bus_space_tag_t, bus_space_handle_t, bus_size_t); /* read multiple */ void (*bs_rm_1) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint8_t *, bus_size_t); void (*bs_rm_2) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t); void (*bs_rm_4) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t); void (*bs_rm_8) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t); /* read region */ void (*bs_rr_1) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint8_t *, bus_size_t); void (*bs_rr_2) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t); void (*bs_rr_4) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t); void (*bs_rr_8) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t); /* Write single, the less commonly used functions. */ void (*bs_w_2) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint16_t); void (*bs_w_8) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint64_t); /* write multiple */ void (*bs_wm_1) (bus_space_tag_t, bus_space_handle_t, bus_size_t, const uint8_t *, bus_size_t); void (*bs_wm_2) (bus_space_tag_t, bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t); void (*bs_wm_4) (bus_space_tag_t, bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t); void (*bs_wm_8) (bus_space_tag_t, bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t); /* write region */ void (*bs_wr_1) (bus_space_tag_t, bus_space_handle_t, bus_size_t, const uint8_t *, bus_size_t); void (*bs_wr_2) (bus_space_tag_t, bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t); void (*bs_wr_4) (bus_space_tag_t, bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t); void (*bs_wr_8) (bus_space_tag_t, bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t); /* set multiple */ void (*bs_sm_1) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint8_t, bus_size_t); void (*bs_sm_2) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint16_t, bus_size_t); void (*bs_sm_4) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint32_t, bus_size_t); void (*bs_sm_8) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint64_t, bus_size_t); /* set region */ void (*bs_sr_1) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint8_t, bus_size_t); void (*bs_sr_2) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint16_t, bus_size_t); void (*bs_sr_4) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint32_t, bus_size_t); void (*bs_sr_8) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint64_t, bus_size_t); /* copy */ void (*bs_c_1) (bus_space_tag_t, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t); void (*bs_c_2) (bus_space_tag_t, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t); void (*bs_c_4) (bus_space_tag_t, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t); void (*bs_c_8) (bus_space_tag_t, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t); /* read stream (single) */ uint8_t (*bs_r_1_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t); uint16_t (*bs_r_2_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t); uint32_t (*bs_r_4_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t); uint64_t (*bs_r_8_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t); /* read multiple stream */ void (*bs_rm_1_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint8_t *, bus_size_t); void (*bs_rm_2_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t); void (*bs_rm_4_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t); void (*bs_rm_8_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t); /* read region stream */ void (*bs_rr_1_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint8_t *, bus_size_t); void (*bs_rr_2_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t); void (*bs_rr_4_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t); void (*bs_rr_8_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t); /* write stream (single) */ void (*bs_w_1_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint8_t); void (*bs_w_2_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint16_t); void (*bs_w_4_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint32_t); void (*bs_w_8_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, uint64_t); /* write multiple stream */ void (*bs_wm_1_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, const uint8_t *, bus_size_t); void (*bs_wm_2_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t); void (*bs_wm_4_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t); void (*bs_wm_8_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t); /* write region stream */ void (*bs_wr_1_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, const uint8_t *, bus_size_t); void (*bs_wr_2_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t); void (*bs_wr_4_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t); void (*bs_wr_8_s) (bus_space_tag_t, bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t); }; -#if __ARM_ARCH < 6 -extern bus_space_tag_t arm_base_bs_tag; -#endif - /* * Utility macros; INTERNAL USE ONLY. */ #define __bs_c(a,b) __CONCAT(a,b) #define __bs_opname(op,size) __bs_c(__bs_c(__bs_c(bs_,op),_),size) #define __bs_nonsingle(type, sz, t, h, o, a, c) \ (*(t)->__bs_opname(type,sz))((t), h, o, a, c) #define __bs_set(type, sz, t, h, o, v, c) \ (*(t)->__bs_opname(type,sz))((t), h, o, v, c) #define __bs_copy(sz, t, h1, o1, h2, o2, cnt) \ (*(t)->__bs_opname(c,sz))((t), h1, o1, h2, o2, cnt) #define __bs_opname_s(op,size) __bs_c(__bs_c(__bs_c(__bs_c(bs_,op),_),size),_s) #define __bs_rs_s(sz, t, h, o) \ (*(t)->__bs_opname_s(r,sz))((t), h, o) #define __bs_ws_s(sz, t, h, o, v) \ (*(t)->__bs_opname_s(w,sz))((t), h, o, v) #define __bs_nonsingle_s(type, sz, t, h, o, a, c) \ (*(t)->__bs_opname_s(type,sz))((t), h, o, a, c) #define __generate_inline_bs_rs(IFN, MBR, TYP) \ static inline TYP \ IFN(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o) \ { \ \ if (__predict_true(t->MBR == NULL)) \ return (*(volatile TYP *)(h + o)); \ else \ return (t->MBR(t, h, o)); \ } #define __generate_inline_bs_ws(IFN, MBR, TYP) \ static inline void \ IFN(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o, TYP v)\ { \ \ if (__predict_true(t->MBR == NULL)) \ *(volatile TYP *)(h + o) = v; \ else \ t->MBR(t, h, o, v); \ } /* * Mapping and unmapping operations. */ #define bus_space_map(t, a, s, c, hp) \ (*(t)->bs_map)((t), (a), (s), (c), (hp)) #define bus_space_unmap(t, h, s) \ (*(t)->bs_unmap)((t), (h), (s)) #define bus_space_subregion(t, h, o, s, hp) \ (*(t)->bs_subregion)((t), (h), (o), (s), (hp)) /* * Allocation and deallocation operations. */ #define bus_space_alloc(t, rs, re, s, a, b, c, ap, hp) \ (*(t)->bs_alloc)((t), (rs), (re), (s), (a), (b), \ (c), (ap), (hp)) #define bus_space_free(t, h, s) \ (*(t)->bs_free)((t), (h), (s)) /* * Bus barrier operations. */ #define bus_space_barrier(t, h, o, l, f) \ (*(t)->bs_barrier)((t), (h), (o), (l), (f)) #define BUS_SPACE_BARRIER_READ 0x01 #define BUS_SPACE_BARRIER_WRITE 0x02 /* * Bus read (single) operations. */ __generate_inline_bs_rs(bus_space_read_1, bs_r_1, uint8_t); __generate_inline_bs_rs(bus_space_read_2, bs_r_2, uint16_t); __generate_inline_bs_rs(bus_space_read_4, bs_r_4, uint32_t); __generate_inline_bs_rs(bus_space_read_8, bs_r_8, uint64_t); __generate_inline_bs_rs(bus_space_read_stream_1, bs_r_1_s, uint8_t); __generate_inline_bs_rs(bus_space_read_stream_2, bs_r_2_s, uint16_t); __generate_inline_bs_rs(bus_space_read_stream_4, bs_r_4_s, uint32_t); __generate_inline_bs_rs(bus_space_read_stream_8, bs_r_8_s, uint64_t); /* * Bus read multiple operations. */ #define bus_space_read_multi_1(t, h, o, a, c) \ __bs_nonsingle(rm,1,(t),(h),(o),(a),(c)) #define bus_space_read_multi_2(t, h, o, a, c) \ __bs_nonsingle(rm,2,(t),(h),(o),(a),(c)) #define bus_space_read_multi_4(t, h, o, a, c) \ __bs_nonsingle(rm,4,(t),(h),(o),(a),(c)) #define bus_space_read_multi_8(t, h, o, a, c) \ __bs_nonsingle(rm,8,(t),(h),(o),(a),(c)) #define bus_space_read_multi_stream_1(t, h, o, a, c) \ __bs_nonsingle_s(rm,1,(t),(h),(o),(a),(c)) #define bus_space_read_multi_stream_2(t, h, o, a, c) \ __bs_nonsingle_s(rm,2,(t),(h),(o),(a),(c)) #define bus_space_read_multi_stream_4(t, h, o, a, c) \ __bs_nonsingle_s(rm,4,(t),(h),(o),(a),(c)) #define bus_space_read_multi_stream_8(t, h, o, a, c) \ __bs_nonsingle_s(rm,8,(t),(h),(o),(a),(c)) /* * Bus read region operations. */ #define bus_space_read_region_1(t, h, o, a, c) \ __bs_nonsingle(rr,1,(t),(h),(o),(a),(c)) #define bus_space_read_region_2(t, h, o, a, c) \ __bs_nonsingle(rr,2,(t),(h),(o),(a),(c)) #define bus_space_read_region_4(t, h, o, a, c) \ __bs_nonsingle(rr,4,(t),(h),(o),(a),(c)) #define bus_space_read_region_8(t, h, o, a, c) \ __bs_nonsingle(rr,8,(t),(h),(o),(a),(c)) #define bus_space_read_region_stream_1(t, h, o, a, c) \ __bs_nonsingle_s(rr,1,(t),(h),(o),(a),(c)) #define bus_space_read_region_stream_2(t, h, o, a, c) \ __bs_nonsingle_s(rr,2,(t),(h),(o),(a),(c)) #define bus_space_read_region_stream_4(t, h, o, a, c) \ __bs_nonsingle_s(rr,4,(t),(h),(o),(a),(c)) #define bus_space_read_region_stream_8(t, h, o, a, c) \ __bs_nonsingle_s(rr,8,(t),(h),(o),(a),(c)) /* * Bus write (single) operations. */ __generate_inline_bs_ws(bus_space_write_1, bs_w_1, uint8_t); __generate_inline_bs_ws(bus_space_write_2, bs_w_2, uint16_t); __generate_inline_bs_ws(bus_space_write_4, bs_w_4, uint32_t); __generate_inline_bs_ws(bus_space_write_8, bs_w_8, uint64_t); __generate_inline_bs_ws(bus_space_write_stream_1, bs_w_1_s, uint8_t); __generate_inline_bs_ws(bus_space_write_stream_2, bs_w_2_s, uint16_t); __generate_inline_bs_ws(bus_space_write_stream_4, bs_w_4_s, uint32_t); __generate_inline_bs_ws(bus_space_write_stream_8, bs_w_8_s, uint64_t); /* * Bus write multiple operations. */ #define bus_space_write_multi_1(t, h, o, a, c) \ __bs_nonsingle(wm,1,(t),(h),(o),(a),(c)) #define bus_space_write_multi_2(t, h, o, a, c) \ __bs_nonsingle(wm,2,(t),(h),(o),(a),(c)) #define bus_space_write_multi_4(t, h, o, a, c) \ __bs_nonsingle(wm,4,(t),(h),(o),(a),(c)) #define bus_space_write_multi_8(t, h, o, a, c) \ __bs_nonsingle(wm,8,(t),(h),(o),(a),(c)) #define bus_space_write_multi_stream_1(t, h, o, a, c) \ __bs_nonsingle_s(wm,1,(t),(h),(o),(a),(c)) #define bus_space_write_multi_stream_2(t, h, o, a, c) \ __bs_nonsingle_s(wm,2,(t),(h),(o),(a),(c)) #define bus_space_write_multi_stream_4(t, h, o, a, c) \ __bs_nonsingle_s(wm,4,(t),(h),(o),(a),(c)) #define bus_space_write_multi_stream_8(t, h, o, a, c) \ __bs_nonsingle_s(wm,8,(t),(h),(o),(a),(c)) /* * Bus write region operations. */ #define bus_space_write_region_1(t, h, o, a, c) \ __bs_nonsingle(wr,1,(t),(h),(o),(a),(c)) #define bus_space_write_region_2(t, h, o, a, c) \ __bs_nonsingle(wr,2,(t),(h),(o),(a),(c)) #define bus_space_write_region_4(t, h, o, a, c) \ __bs_nonsingle(wr,4,(t),(h),(o),(a),(c)) #define bus_space_write_region_8(t, h, o, a, c) \ __bs_nonsingle(wr,8,(t),(h),(o),(a),(c)) #define bus_space_write_region_stream_1(t, h, o, a, c) \ __bs_nonsingle_s(wr,1,(t),(h),(o),(a),(c)) #define bus_space_write_region_stream_2(t, h, o, a, c) \ __bs_nonsingle_s(wr,2,(t),(h),(o),(a),(c)) #define bus_space_write_region_stream_4(t, h, o, a, c) \ __bs_nonsingle_s(wr,4,(t),(h),(o),(a),(c)) #define bus_space_write_region_stream_8(t, h, o, a, c) \ __bs_nonsingle_s(wr,8,(t),(h),(o),(a),(c)) /* * Set multiple operations. */ #define bus_space_set_multi_1(t, h, o, v, c) \ __bs_set(sm,1,(t),(h),(o),(v),(c)) #define bus_space_set_multi_2(t, h, o, v, c) \ __bs_set(sm,2,(t),(h),(o),(v),(c)) #define bus_space_set_multi_4(t, h, o, v, c) \ __bs_set(sm,4,(t),(h),(o),(v),(c)) #define bus_space_set_multi_8(t, h, o, v, c) \ __bs_set(sm,8,(t),(h),(o),(v),(c)) /* * Set region operations. */ #define bus_space_set_region_1(t, h, o, v, c) \ __bs_set(sr,1,(t),(h),(o),(v),(c)) #define bus_space_set_region_2(t, h, o, v, c) \ __bs_set(sr,2,(t),(h),(o),(v),(c)) #define bus_space_set_region_4(t, h, o, v, c) \ __bs_set(sr,4,(t),(h),(o),(v),(c)) #define bus_space_set_region_8(t, h, o, v, c) \ __bs_set(sr,8,(t),(h),(o),(v),(c)) /* * Copy operations. */ #define bus_space_copy_region_1(t, h1, o1, h2, o2, c) \ __bs_copy(1, t, h1, o1, h2, o2, c) #define bus_space_copy_region_2(t, h1, o1, h2, o2, c) \ __bs_copy(2, t, h1, o1, h2, o2, c) #define bus_space_copy_region_4(t, h1, o1, h2, o2, c) \ __bs_copy(4, t, h1, o1, h2, o2, c) #define bus_space_copy_region_8(t, h1, o1, h2, o2, c) \ __bs_copy(8, t, h1, o1, h2, o2, c) /* * Macros to provide prototypes for all the functions used in the * bus_space structure */ #define bs_map_proto(f) \ int __bs_c(f,_bs_map) (bus_space_tag_t t, bus_addr_t addr, \ bus_size_t size, int cacheable, bus_space_handle_t *bshp); #define bs_unmap_proto(f) \ void __bs_c(f,_bs_unmap) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t size); #define bs_subregion_proto(f) \ int __bs_c(f,_bs_subregion) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, bus_size_t size, \ bus_space_handle_t *nbshp); #define bs_alloc_proto(f) \ int __bs_c(f,_bs_alloc) (bus_space_tag_t t, bus_addr_t rstart, \ bus_addr_t rend, bus_size_t size, bus_size_t align, \ bus_size_t boundary, int cacheable, bus_addr_t *addrp, \ bus_space_handle_t *bshp); #define bs_free_proto(f) \ void __bs_c(f,_bs_free) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t size); #define bs_mmap_proto(f) \ int __bs_c(f,_bs_mmap) (struct cdev *, vm_offset_t, vm_paddr_t *, int); #define bs_barrier_proto(f) \ void __bs_c(f,_bs_barrier) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, bus_size_t len, int flags); #define bs_r_1_proto(f) \ uint8_t __bs_c(f,_bs_r_1) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset); #define bs_r_2_proto(f) \ uint16_t __bs_c(f,_bs_r_2) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset); #define bs_r_4_proto(f) \ uint32_t __bs_c(f,_bs_r_4) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset); #define bs_r_8_proto(f) \ uint64_t __bs_c(f,_bs_r_8) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset); #define bs_r_1_s_proto(f) \ uint8_t __bs_c(f,_bs_r_1_s) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset); #define bs_r_2_s_proto(f) \ uint16_t __bs_c(f,_bs_r_2_s) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset); #define bs_r_4_s_proto(f) \ uint32_t __bs_c(f,_bs_r_4_s) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset); #define bs_w_1_proto(f) \ void __bs_c(f,_bs_w_1) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint8_t value); #define bs_w_2_proto(f) \ void __bs_c(f,_bs_w_2) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint16_t value); #define bs_w_4_proto(f) \ void __bs_c(f,_bs_w_4) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint32_t value); #define bs_w_8_proto(f) \ void __bs_c(f,_bs_w_8) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint64_t value); #define bs_w_1_s_proto(f) \ void __bs_c(f,_bs_w_1_s) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint8_t value); #define bs_w_2_s_proto(f) \ void __bs_c(f,_bs_w_2_s) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint16_t value); #define bs_w_4_s_proto(f) \ void __bs_c(f,_bs_w_4_s) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint32_t value); #define bs_rm_1_proto(f) \ void __bs_c(f,_bs_rm_1) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint8_t *addr, bus_size_t count); #define bs_rm_2_proto(f) \ void __bs_c(f,_bs_rm_2) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint16_t *addr, bus_size_t count); #define bs_rm_4_proto(f) \ void __bs_c(f,_bs_rm_4) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint32_t *addr, bus_size_t count); #define bs_rm_8_proto(f) \ void __bs_c(f,_bs_rm_8) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint64_t *addr, bus_size_t count); #define bs_wm_1_proto(f) \ void __bs_c(f,_bs_wm_1) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, const uint8_t *addr, bus_size_t count); #define bs_wm_2_proto(f) \ void __bs_c(f,_bs_wm_2) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, const uint16_t *addr, bus_size_t count); #define bs_wm_4_proto(f) \ void __bs_c(f,_bs_wm_4) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, const uint32_t *addr, bus_size_t count); #define bs_wm_8_proto(f) \ void __bs_c(f,_bs_wm_8) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, const uint64_t *addr, bus_size_t count); #define bs_rr_1_proto(f) \ void __bs_c(f, _bs_rr_1) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint8_t *addr, bus_size_t count); #define bs_rr_2_proto(f) \ void __bs_c(f, _bs_rr_2) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint16_t *addr, bus_size_t count); #define bs_rr_4_proto(f) \ void __bs_c(f, _bs_rr_4) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint32_t *addr, bus_size_t count); #define bs_rr_8_proto(f) \ void __bs_c(f, _bs_rr_8) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint64_t *addr, bus_size_t count); #define bs_wr_1_proto(f) \ void __bs_c(f, _bs_wr_1) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, const uint8_t *addr, bus_size_t count); #define bs_wr_2_proto(f) \ void __bs_c(f, _bs_wr_2) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, const uint16_t *addr, bus_size_t count); #define bs_wr_4_proto(f) \ void __bs_c(f, _bs_wr_4) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, const uint32_t *addr, bus_size_t count); #define bs_wr_8_proto(f) \ void __bs_c(f, _bs_wr_8) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, const uint64_t *addr, bus_size_t count); #define bs_sm_1_proto(f) \ void __bs_c(f,_bs_sm_1) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint8_t value, bus_size_t count); #define bs_sm_2_proto(f) \ void __bs_c(f,_bs_sm_2) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint16_t value, bus_size_t count); #define bs_sm_4_proto(f) \ void __bs_c(f,_bs_sm_4) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint32_t value, bus_size_t count); #define bs_sm_8_proto(f) \ void __bs_c(f,_bs_sm_8) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint64_t value, bus_size_t count); #define bs_sr_1_proto(f) \ void __bs_c(f,_bs_sr_1) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint8_t value, bus_size_t count); #define bs_sr_2_proto(f) \ void __bs_c(f,_bs_sr_2) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint16_t value, bus_size_t count); #define bs_sr_4_proto(f) \ void __bs_c(f,_bs_sr_4) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint32_t value, bus_size_t count); #define bs_sr_8_proto(f) \ void __bs_c(f,_bs_sr_8) (bus_space_tag_t t, bus_space_handle_t bsh, \ bus_size_t offset, uint64_t value, bus_size_t count); #define bs_c_1_proto(f) \ void __bs_c(f,_bs_c_1) (bus_space_tag_t t, bus_space_handle_t bsh1, \ bus_size_t offset1, bus_space_handle_t bsh2, \ bus_size_t offset2, bus_size_t count); #define bs_c_2_proto(f) \ void __bs_c(f,_bs_c_2) (bus_space_tag_t t, bus_space_handle_t bsh1, \ bus_size_t offset1, bus_space_handle_t bsh2, \ bus_size_t offset2, bus_size_t count); #define bs_c_4_proto(f) \ void __bs_c(f,_bs_c_4) (bus_space_tag_t t, bus_space_handle_t bsh1, \ bus_size_t offset1, bus_space_handle_t bsh2, \ bus_size_t offset2, bus_size_t count); #define bs_c_8_proto(f) \ void __bs_c(f,_bs_c_8) (bus_space_tag_t t, bus_space_handle_t bsh1, \ bus_size_t offset1, bus_space_handle_t bsh2, \ bus_size_t offset2, bus_size_t count); #define bs_protos(f) \ bs_map_proto(f); \ bs_unmap_proto(f); \ bs_subregion_proto(f); \ bs_alloc_proto(f); \ bs_free_proto(f); \ bs_mmap_proto(f); \ bs_barrier_proto(f); \ bs_r_1_proto(f); \ bs_r_2_proto(f); \ bs_r_4_proto(f); \ bs_r_8_proto(f); \ bs_r_1_s_proto(f); \ bs_r_2_s_proto(f); \ bs_r_4_s_proto(f); \ bs_w_1_proto(f); \ bs_w_2_proto(f); \ bs_w_4_proto(f); \ bs_w_8_proto(f); \ bs_w_1_s_proto(f); \ bs_w_2_s_proto(f); \ bs_w_4_s_proto(f); \ bs_rm_1_proto(f); \ bs_rm_2_proto(f); \ bs_rm_4_proto(f); \ bs_rm_8_proto(f); \ bs_wm_1_proto(f); \ bs_wm_2_proto(f); \ bs_wm_4_proto(f); \ bs_wm_8_proto(f); \ bs_rr_1_proto(f); \ bs_rr_2_proto(f); \ bs_rr_4_proto(f); \ bs_rr_8_proto(f); \ bs_wr_1_proto(f); \ bs_wr_2_proto(f); \ bs_wr_4_proto(f); \ bs_wr_8_proto(f); \ bs_sm_1_proto(f); \ bs_sm_2_proto(f); \ bs_sm_4_proto(f); \ bs_sm_8_proto(f); \ bs_sr_1_proto(f); \ bs_sr_2_proto(f); \ bs_sr_4_proto(f); \ bs_sr_8_proto(f); \ bs_c_1_proto(f); \ bs_c_2_proto(f); \ bs_c_4_proto(f); \ bs_c_8_proto(f); void generic_bs_unimplemented(void); #define BS_UNIMPLEMENTED (void *)generic_bs_unimplemented #define BUS_SPACE_ALIGNED_POINTER(p, t) ALIGNED_POINTER(p, t) #define BUS_SPACE_MAXADDR_24BIT 0xFFFFFF #define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFF #define BUS_SPACE_MAXADDR 0xFFFFFFFF #define BUS_SPACE_MAXSIZE_24BIT 0xFFFFFF #define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFF #define BUS_SPACE_MAXSIZE 0xFFFFFFFF #define BUS_SPACE_UNRESTRICTED (~0) #define BUS_PEEK_FUNC(width, type) \ static inline int \ bus_space_peek_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset, type *value) \ { \ type tmp; \ tmp = bus_space_read_##width(tag, hnd, offset); \ *value = (type)tmp; \ return (0); \ } BUS_PEEK_FUNC(1, uint8_t) BUS_PEEK_FUNC(2, uint16_t) BUS_PEEK_FUNC(4, uint32_t) BUS_PEEK_FUNC(8, uint64_t) #define BUS_POKE_FUNC(width, type) \ static inline int \ bus_space_poke_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset, type value) \ { \ bus_space_write_##width(tag, hnd, offset, value); \ return (0); \ } BUS_POKE_FUNC(1, uint8_t) BUS_POKE_FUNC(2, uint16_t) BUS_POKE_FUNC(4, uint32_t) BUS_POKE_FUNC(8, uint64_t) #include /* * Get the physical address of a bus space memory-mapped resource. * Doing this as a macro is a temporary solution until a more robust fix is * designed. It also serves to mark the locations needing that fix. */ #define BUS_SPACE_PHYSADDR(res, offs) \ ((u_int)(rman_get_start(res)+(offs))) #endif /* _MACHINE_BUS_H_ */ Index: head/sys/arm/include/bus_dma.h =================================================================== --- head/sys/arm/include/bus_dma.h (revision 368140) +++ head/sys/arm/include/bus_dma.h (revision 368141) @@ -1,102 +1,78 @@ /* $NetBSD: bus.h,v 1.11 2003/07/28 17:35:54 thorpej Exp $ */ /*- * SPDX-License-Identifier: BSD-2-Clause-NetBSD * * Copyright (c) 1996, 1997, 1998, 2001 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, * NASA Ames Research Center. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 1996 Charles M. Hannum. All rights reserved. * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Christopher G. Demetriou * for the NetBSD Project. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _ARM_BUS_DMA_H #define _ARM_BUS_DMA_H #include #include /* Bus Space DMA macros */ #define BUS_DMA_TAG_VALID(t) ((t) != (bus_dma_tag_t)0) -#if defined(_ARM32_BUS_DMA_PRIVATE) && __ARM_ARCH < 6 -/* - * arm32_dma_range - * - * This structure describes a valid DMA range. - */ -struct arm32_dma_range { - bus_addr_t dr_sysbase; /* system base address */ - bus_addr_t dr_busbase; /* appears here on bus */ - bus_size_t dr_len; /* length of range */ -}; - -/* _dm_buftype */ -#define ARM32_BUFTYPE_INVALID 0 -#define ARM32_BUFTYPE_LINEAR 1 -#define ARM32_BUFTYPE_MBUF 2 -#define ARM32_BUFTYPE_UIO 3 -#define ARM32_BUFTYPE_RAW 4 - -struct arm32_dma_range *bus_dma_get_range(void); -int bus_dma_get_range_nb(void); - -#endif /* _ARM32_BUS_DMA_PRIVATE */ - #endif /* _ARM_BUS_DMA_H */ Index: head/sys/arm/include/cpu-v6.h =================================================================== --- head/sys/arm/include/cpu-v6.h (revision 368140) +++ head/sys/arm/include/cpu-v6.h (revision 368141) @@ -1,689 +1,685 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef MACHINE_CPU_V6_H #define MACHINE_CPU_V6_H /* There are no user serviceable parts here, they may change without notice */ #ifndef _KERNEL #error Only include this file in the kernel #endif #include #include #include #include -#if __ARM_ARCH < 6 -#error Only include this file for ARMv6 -#endif - /* * Some kernel modules (dtrace all for example) are compiled * unconditionally with -DSMP. Although it looks like a bug, * handle this case here and in #elif condition in ARM_SMP_UP macro. */ #if __ARM_ARCH <= 6 && defined(SMP) && !defined(KLD_MODULE) #error SMP option is not supported on ARMv6 #endif #if __ARM_ARCH <= 6 && defined(SMP_ON_UP) #error SMP_ON_UP option is only supported on ARMv7+ CPUs #endif #if !defined(SMP) && defined(SMP_ON_UP) #error SMP option must be defined for SMP_ON_UP option #endif #define CPU_ASID_KERNEL 0 #if defined(SMP_ON_UP) #define ARM_SMP_UP(smp_code, up_code) \ do { \ if (cpuinfo.mp_ext != 0) { \ smp_code; \ } else { \ up_code; \ } \ } while (0) #elif defined(SMP) && __ARM_ARCH > 6 #define ARM_SMP_UP(smp_code, up_code) \ do { \ smp_code; \ } while (0) #else #define ARM_SMP_UP(smp_code, up_code) \ do { \ up_code; \ } while (0) #endif void dcache_wbinv_poc_all(void); /* !!! NOT SMP coherent function !!! */ vm_offset_t dcache_wb_pou_checked(vm_offset_t, vm_size_t); vm_offset_t icache_inv_pou_checked(vm_offset_t, vm_size_t); #ifdef DEV_PMU #include #define PMU_OVSR_C 0x80000000 /* Cycle Counter */ extern uint32_t ccnt_hi[MAXCPU]; extern int pmu_attched; #endif /* DEV_PMU */ #define sev() __asm __volatile("sev" : : : "memory") #define wfe() __asm __volatile("wfe" : : : "memory") /* * Macros to generate CP15 (system control processor) read/write functions. */ #define _FX(s...) #s #define _RF0(fname, aname...) \ static __inline uint32_t \ fname(void) \ { \ uint32_t reg; \ __asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \ return(reg); \ } #define _R64F0(fname, aname) \ static __inline uint64_t \ fname(void) \ { \ uint64_t reg; \ __asm __volatile("mrrc\t" _FX(aname): "=r" (reg)); \ return(reg); \ } #define _WF0(fname, aname...) \ static __inline void \ fname(void) \ { \ __asm __volatile("mcr\t" _FX(aname)); \ } #define _WF1(fname, aname...) \ static __inline void \ fname(uint32_t reg) \ { \ __asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \ } #define _W64F1(fname, aname...) \ static __inline void \ fname(uint64_t reg) \ { \ __asm __volatile("mcrr\t" _FX(aname):: "r" (reg)); \ } /* * Raw CP15 maintenance operations * !!! not for external use !!! */ /* TLB */ _WF0(_CP15_TLBIALL, CP15_TLBIALL) /* Invalidate entire unified TLB */ #if __ARM_ARCH >= 7 && defined(SMP) _WF0(_CP15_TLBIALLIS, CP15_TLBIALLIS) /* Invalidate entire unified TLB IS */ #endif _WF1(_CP15_TLBIASID, CP15_TLBIASID(%0)) /* Invalidate unified TLB by ASID */ #if __ARM_ARCH >= 7 && defined(SMP) _WF1(_CP15_TLBIASIDIS, CP15_TLBIASIDIS(%0)) /* Invalidate unified TLB by ASID IS */ #endif _WF1(_CP15_TLBIMVAA, CP15_TLBIMVAA(%0)) /* Invalidate unified TLB by MVA, all ASID */ #if __ARM_ARCH >= 7 && defined(SMP) _WF1(_CP15_TLBIMVAAIS, CP15_TLBIMVAAIS(%0)) /* Invalidate unified TLB by MVA, all ASID IS */ #endif _WF1(_CP15_TLBIMVA, CP15_TLBIMVA(%0)) /* Invalidate unified TLB by MVA */ _WF1(_CP15_TTB_SET, CP15_TTBR0(%0)) /* Cache and Branch predictor */ _WF0(_CP15_BPIALL, CP15_BPIALL) /* Branch predictor invalidate all */ #if __ARM_ARCH >= 7 && defined(SMP) _WF0(_CP15_BPIALLIS, CP15_BPIALLIS) /* Branch predictor invalidate all IS */ #endif _WF1(_CP15_BPIMVA, CP15_BPIMVA(%0)) /* Branch predictor invalidate by MVA */ _WF1(_CP15_DCCIMVAC, CP15_DCCIMVAC(%0)) /* Data cache clean and invalidate by MVA PoC */ _WF1(_CP15_DCCISW, CP15_DCCISW(%0)) /* Data cache clean and invalidate by set/way */ _WF1(_CP15_DCCMVAC, CP15_DCCMVAC(%0)) /* Data cache clean by MVA PoC */ #if __ARM_ARCH >= 7 _WF1(_CP15_DCCMVAU, CP15_DCCMVAU(%0)) /* Data cache clean by MVA PoU */ #endif _WF1(_CP15_DCCSW, CP15_DCCSW(%0)) /* Data cache clean by set/way */ _WF1(_CP15_DCIMVAC, CP15_DCIMVAC(%0)) /* Data cache invalidate by MVA PoC */ _WF1(_CP15_DCISW, CP15_DCISW(%0)) /* Data cache invalidate by set/way */ _WF0(_CP15_ICIALLU, CP15_ICIALLU) /* Instruction cache invalidate all PoU */ #if __ARM_ARCH >= 7 && defined(SMP) _WF0(_CP15_ICIALLUIS, CP15_ICIALLUIS) /* Instruction cache invalidate all PoU IS */ #endif _WF1(_CP15_ICIMVAU, CP15_ICIMVAU(%0)) /* Instruction cache invalidate */ /* * Publicly accessible functions */ /* CP14 Debug Registers */ _RF0(cp14_dbgdidr_get, CP14_DBGDIDR(%0)) _RF0(cp14_dbgprsr_get, CP14_DBGPRSR(%0)) _RF0(cp14_dbgoslsr_get, CP14_DBGOSLSR(%0)) _RF0(cp14_dbgosdlr_get, CP14_DBGOSDLR(%0)) _RF0(cp14_dbgdscrint_get, CP14_DBGDSCRint(%0)) _WF1(cp14_dbgdscr_v6_set, CP14_DBGDSCRext_V6(%0)) _WF1(cp14_dbgdscr_v7_set, CP14_DBGDSCRext_V7(%0)) _WF1(cp14_dbgvcr_set, CP14_DBGVCR(%0)) _WF1(cp14_dbgoslar_set, CP14_DBGOSLAR(%0)) /* Various control registers */ _RF0(cp15_cpacr_get, CP15_CPACR(%0)) _WF1(cp15_cpacr_set, CP15_CPACR(%0)) _RF0(cp15_dfsr_get, CP15_DFSR(%0)) _RF0(cp15_ifsr_get, CP15_IFSR(%0)) _WF1(cp15_prrr_set, CP15_PRRR(%0)) _WF1(cp15_nmrr_set, CP15_NMRR(%0)) _RF0(cp15_ttbr_get, CP15_TTBR0(%0)) _RF0(cp15_dfar_get, CP15_DFAR(%0)) #if __ARM_ARCH >= 7 _RF0(cp15_ifar_get, CP15_IFAR(%0)) _RF0(cp15_l2ctlr_get, CP15_L2CTLR(%0)) #endif _RF0(cp15_actlr_get, CP15_ACTLR(%0)) _WF1(cp15_actlr_set, CP15_ACTLR(%0)) _WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0)) _WF1(cp15_ats1cpw_set, CP15_ATS1CPW(%0)) _WF1(cp15_ats1cur_set, CP15_ATS1CUR(%0)) _WF1(cp15_ats1cuw_set, CP15_ATS1CUW(%0)) _RF0(cp15_par_get, CP15_PAR(%0)) _RF0(cp15_sctlr_get, CP15_SCTLR(%0)) /*CPU id registers */ _RF0(cp15_midr_get, CP15_MIDR(%0)) _RF0(cp15_ctr_get, CP15_CTR(%0)) _RF0(cp15_tcmtr_get, CP15_TCMTR(%0)) _RF0(cp15_tlbtr_get, CP15_TLBTR(%0)) _RF0(cp15_mpidr_get, CP15_MPIDR(%0)) _RF0(cp15_revidr_get, CP15_REVIDR(%0)) _RF0(cp15_ccsidr_get, CP15_CCSIDR(%0)) _RF0(cp15_clidr_get, CP15_CLIDR(%0)) _RF0(cp15_aidr_get, CP15_AIDR(%0)) _WF1(cp15_csselr_set, CP15_CSSELR(%0)) _RF0(cp15_id_pfr0_get, CP15_ID_PFR0(%0)) _RF0(cp15_id_pfr1_get, CP15_ID_PFR1(%0)) _RF0(cp15_id_dfr0_get, CP15_ID_DFR0(%0)) _RF0(cp15_id_afr0_get, CP15_ID_AFR0(%0)) _RF0(cp15_id_mmfr0_get, CP15_ID_MMFR0(%0)) _RF0(cp15_id_mmfr1_get, CP15_ID_MMFR1(%0)) _RF0(cp15_id_mmfr2_get, CP15_ID_MMFR2(%0)) _RF0(cp15_id_mmfr3_get, CP15_ID_MMFR3(%0)) _RF0(cp15_id_isar0_get, CP15_ID_ISAR0(%0)) _RF0(cp15_id_isar1_get, CP15_ID_ISAR1(%0)) _RF0(cp15_id_isar2_get, CP15_ID_ISAR2(%0)) _RF0(cp15_id_isar3_get, CP15_ID_ISAR3(%0)) _RF0(cp15_id_isar4_get, CP15_ID_ISAR4(%0)) _RF0(cp15_id_isar5_get, CP15_ID_ISAR5(%0)) _RF0(cp15_cbar_get, CP15_CBAR(%0)) /* Performance Monitor registers */ #if __ARM_ARCH == 6 && defined(CPU_ARM1176) _RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) _WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) _RF0(cp15_pmcr_get, CP15_PMCR(%0)) _WF1(cp15_pmcr_set, CP15_PMCR(%0)) _RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) _WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) #elif __ARM_ARCH > 6 _RF0(cp15_pmcr_get, CP15_PMCR(%0)) _WF1(cp15_pmcr_set, CP15_PMCR(%0)) _RF0(cp15_pmcnten_get, CP15_PMCNTENSET(%0)) _WF1(cp15_pmcnten_set, CP15_PMCNTENSET(%0)) _WF1(cp15_pmcnten_clr, CP15_PMCNTENCLR(%0)) _RF0(cp15_pmovsr_get, CP15_PMOVSR(%0)) _WF1(cp15_pmovsr_set, CP15_PMOVSR(%0)) _WF1(cp15_pmswinc_set, CP15_PMSWINC(%0)) _RF0(cp15_pmselr_get, CP15_PMSELR(%0)) _WF1(cp15_pmselr_set, CP15_PMSELR(%0)) _RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) _WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) _RF0(cp15_pmxevtyper_get, CP15_PMXEVTYPER(%0)) _WF1(cp15_pmxevtyper_set, CP15_PMXEVTYPER(%0)) _RF0(cp15_pmxevcntr_get, CP15_PMXEVCNTRR(%0)) _WF1(cp15_pmxevcntr_set, CP15_PMXEVCNTRR(%0)) _RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) _WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) _RF0(cp15_pminten_get, CP15_PMINTENSET(%0)) _WF1(cp15_pminten_set, CP15_PMINTENSET(%0)) _WF1(cp15_pminten_clr, CP15_PMINTENCLR(%0)) #endif _RF0(cp15_tpidrurw_get, CP15_TPIDRURW(%0)) _WF1(cp15_tpidrurw_set, CP15_TPIDRURW(%0)) _RF0(cp15_tpidruro_get, CP15_TPIDRURO(%0)) _WF1(cp15_tpidruro_set, CP15_TPIDRURO(%0)) _RF0(cp15_tpidrpwr_get, CP15_TPIDRPRW(%0)) _WF1(cp15_tpidrpwr_set, CP15_TPIDRPRW(%0)) /* Generic Timer registers - only use when you know the hardware is available */ _RF0(cp15_cntfrq_get, CP15_CNTFRQ(%0)) _WF1(cp15_cntfrq_set, CP15_CNTFRQ(%0)) _RF0(cp15_cntkctl_get, CP15_CNTKCTL(%0)) _WF1(cp15_cntkctl_set, CP15_CNTKCTL(%0)) _RF0(cp15_cntp_tval_get, CP15_CNTP_TVAL(%0)) _WF1(cp15_cntp_tval_set, CP15_CNTP_TVAL(%0)) _RF0(cp15_cntp_ctl_get, CP15_CNTP_CTL(%0)) _WF1(cp15_cntp_ctl_set, CP15_CNTP_CTL(%0)) _RF0(cp15_cntv_tval_get, CP15_CNTV_TVAL(%0)) _WF1(cp15_cntv_tval_set, CP15_CNTV_TVAL(%0)) _RF0(cp15_cntv_ctl_get, CP15_CNTV_CTL(%0)) _WF1(cp15_cntv_ctl_set, CP15_CNTV_CTL(%0)) _RF0(cp15_cnthctl_get, CP15_CNTHCTL(%0)) _WF1(cp15_cnthctl_set, CP15_CNTHCTL(%0)) _RF0(cp15_cnthp_tval_get, CP15_CNTHP_TVAL(%0)) _WF1(cp15_cnthp_tval_set, CP15_CNTHP_TVAL(%0)) _RF0(cp15_cnthp_ctl_get, CP15_CNTHP_CTL(%0)) _WF1(cp15_cnthp_ctl_set, CP15_CNTHP_CTL(%0)) _R64F0(cp15_cntpct_get, CP15_CNTPCT(%Q0, %R0)) _R64F0(cp15_cntvct_get, CP15_CNTVCT(%Q0, %R0)) _R64F0(cp15_cntp_cval_get, CP15_CNTP_CVAL(%Q0, %R0)) _W64F1(cp15_cntp_cval_set, CP15_CNTP_CVAL(%Q0, %R0)) _R64F0(cp15_cntv_cval_get, CP15_CNTV_CVAL(%Q0, %R0)) _W64F1(cp15_cntv_cval_set, CP15_CNTV_CVAL(%Q0, %R0)) _R64F0(cp15_cntvoff_get, CP15_CNTVOFF(%Q0, %R0)) _W64F1(cp15_cntvoff_set, CP15_CNTVOFF(%Q0, %R0)) _R64F0(cp15_cnthp_cval_get, CP15_CNTHP_CVAL(%Q0, %R0)) _W64F1(cp15_cnthp_cval_set, CP15_CNTHP_CVAL(%Q0, %R0)) #undef _FX #undef _RF0 #undef _WF0 #undef _WF1 /* * TLB maintenance operations. */ /* Local (i.e. not broadcasting ) operations. */ /* Flush all TLB entries (even global). */ static __inline void tlb_flush_all_local(void) { dsb(); _CP15_TLBIALL(); dsb(); } /* Flush all not global TLB entries. */ static __inline void tlb_flush_all_ng_local(void) { dsb(); _CP15_TLBIASID(CPU_ASID_KERNEL); dsb(); } /* Flush single TLB entry (even global). */ static __inline void tlb_flush_local(vm_offset_t va) { KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); dsb(); _CP15_TLBIMVA(va | CPU_ASID_KERNEL); dsb(); } /* Flush range of TLB entries (even global). */ static __inline void tlb_flush_range_local(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, size)); dsb(); for (; va < eva; va += PAGE_SIZE) _CP15_TLBIMVA(va | CPU_ASID_KERNEL); dsb(); } /* Broadcasting operations. */ #if __ARM_ARCH >= 7 && defined(SMP) static __inline void tlb_flush_all(void) { dsb(); ARM_SMP_UP( _CP15_TLBIALLIS(), _CP15_TLBIALL() ); dsb(); } static __inline void tlb_flush_all_ng(void) { dsb(); ARM_SMP_UP( _CP15_TLBIASIDIS(CPU_ASID_KERNEL), _CP15_TLBIASID(CPU_ASID_KERNEL) ); dsb(); } static __inline void tlb_flush(vm_offset_t va) { KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); dsb(); ARM_SMP_UP( _CP15_TLBIMVAAIS(va), _CP15_TLBIMVA(va | CPU_ASID_KERNEL) ); dsb(); } static __inline void tlb_flush_range(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, size)); dsb(); ARM_SMP_UP( { for (; va < eva; va += PAGE_SIZE) _CP15_TLBIMVAAIS(va); }, { for (; va < eva; va += PAGE_SIZE) _CP15_TLBIMVA(va | CPU_ASID_KERNEL); } ); dsb(); } #else /* __ARM_ARCH < 7 */ #define tlb_flush_all() tlb_flush_all_local() #define tlb_flush_all_ng() tlb_flush_all_ng_local() #define tlb_flush(va) tlb_flush_local(va) #define tlb_flush_range(va, size) tlb_flush_range_local(va, size) #endif /* __ARM_ARCH < 7 */ /* * Cache maintenance operations. */ /* Sync I and D caches to PoU */ static __inline void icache_sync(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { #if __ARM_ARCH >= 7 _CP15_DCCMVAU(va); #else _CP15_DCCMVAC(va); #endif } dsb(); ARM_SMP_UP( _CP15_ICIALLUIS(), _CP15_ICIALLU() ); dsb(); isb(); } /* Invalidate I cache */ static __inline void icache_inv_all(void) { ARM_SMP_UP( _CP15_ICIALLUIS(), _CP15_ICIALLU() ); dsb(); isb(); } /* Invalidate branch predictor buffer */ static __inline void bpb_inv_all(void) { ARM_SMP_UP( _CP15_BPIALLIS(), _CP15_BPIALL() ); dsb(); isb(); } /* Write back D-cache to PoU */ static __inline void dcache_wb_pou(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { #if __ARM_ARCH >= 7 _CP15_DCCMVAU(va); #else _CP15_DCCMVAC(va); #endif } dsb(); } /* * Invalidate D-cache to PoC * * Caches are invalidated from outermost to innermost as fresh cachelines * flow in this direction. In given range, if there was no dirty cacheline * in any cache before, no stale cacheline should remain in them after this * operation finishes. */ static __inline void dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { vm_offset_t eva = va + size; dsb(); /* invalidate L2 first */ cpu_l2cache_inv_range(pa, size); /* then L1 */ va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCIMVAC(va); } dsb(); } /* * Discard D-cache lines to PoC, prior to overwrite by DMA engine. * * Normal invalidation does L2 then L1 to ensure that stale data from L2 doesn't * flow into L1 while invalidating. This routine is intended to be used only * when invalidating a buffer before a DMA operation loads new data into memory. * The concern in this case is that dirty lines are not evicted to main memory, * overwriting the DMA data. For that reason, the L1 is done first to ensure * that an evicted L1 line doesn't flow to L2 after the L2 has been cleaned. */ static __inline void dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { vm_offset_t eva = va + size; /* invalidate L1 first */ dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCIMVAC(va); } dsb(); /* then L2 */ cpu_l2cache_inv_range(pa, size); } /* * Write back D-cache to PoC * * Caches are written back from innermost to outermost as dirty cachelines * flow in this direction. In given range, no dirty cacheline should remain * in any cache after this operation finishes. */ static __inline void dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { vm_offset_t eva = va + size; dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCCMVAC(va); } dsb(); cpu_l2cache_wb_range(pa, size); } /* Write back and invalidate D-cache to PoC */ static __inline void dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size) { vm_offset_t va; vm_offset_t eva = sva + size; dsb(); /* write back L1 first */ va = sva & ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCCMVAC(va); } dsb(); /* then write back and invalidate L2 */ cpu_l2cache_wbinv_range(pa, size); /* then invalidate L1 */ va = sva & ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCIMVAC(va); } dsb(); } /* Set TTB0 register */ static __inline void cp15_ttbr_set(uint32_t reg) { dsb(); _CP15_TTB_SET(reg); dsb(); _CP15_BPIALL(); dsb(); isb(); tlb_flush_all_ng_local(); } /* * Functions for address checking: * * cp15_ats1cpr_check() ... check stage 1 privileged (PL1) read access * cp15_ats1cpw_check() ... check stage 1 privileged (PL1) write access * cp15_ats1cur_check() ... check stage 1 unprivileged (PL0) read access * cp15_ats1cuw_check() ... check stage 1 unprivileged (PL0) write access * * They must be called while interrupts are disabled to get consistent result. */ static __inline int cp15_ats1cpr_check(vm_offset_t addr) { cp15_ats1cpr_set(addr); isb(); return (cp15_par_get() & 0x01 ? EFAULT : 0); } static __inline int cp15_ats1cpw_check(vm_offset_t addr) { cp15_ats1cpw_set(addr); isb(); return (cp15_par_get() & 0x01 ? EFAULT : 0); } static __inline int cp15_ats1cur_check(vm_offset_t addr) { cp15_ats1cur_set(addr); isb(); return (cp15_par_get() & 0x01 ? EFAULT : 0); } static __inline int cp15_ats1cuw_check(vm_offset_t addr) { cp15_ats1cuw_set(addr); isb(); return (cp15_par_get() & 0x01 ? EFAULT : 0); } #endif /* !MACHINE_CPU_V6_H */ Index: head/sys/arm/include/cpufunc.h =================================================================== --- head/sys/arm/include/cpufunc.h (revision 368140) +++ head/sys/arm/include/cpufunc.h (revision 368141) @@ -1,435 +1,312 @@ /* $NetBSD: cpufunc.h,v 1.29 2003/09/06 09:08:35 rearnsha Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1997 Mark Brinicombe. * Copyright (c) 1997 Causality Limited * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Causality Limited. * 4. The name of Causality Limited may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * cpufunc.h * * Prototypes for cpu, mmu and tlb related functions. * * $FreeBSD$ */ #ifndef _MACHINE_CPUFUNC_H_ #define _MACHINE_CPUFUNC_H_ #ifdef _KERNEL #include #include static __inline void breakpoint(void) { __asm("udf 0xffff"); } struct cpu_functions { /* CPU functions */ -#if __ARM_ARCH < 6 - void (*cf_cpwait) (void); - - /* MMU functions */ - - u_int (*cf_control) (u_int bic, u_int eor); - void (*cf_setttb) (u_int ttb); - - /* TLB functions */ - - void (*cf_tlb_flushID) (void); - void (*cf_tlb_flushID_SE) (u_int va); - void (*cf_tlb_flushD) (void); - void (*cf_tlb_flushD_SE) (u_int va); - - /* - * Cache operations: - * - * We define the following primitives: - * - * icache_sync_range Synchronize I-cache range - * - * dcache_wbinv_all Write-back and Invalidate D-cache - * dcache_wbinv_range Write-back and Invalidate D-cache range - * dcache_inv_range Invalidate D-cache range - * dcache_wb_range Write-back D-cache range - * - * idcache_wbinv_all Write-back and Invalidate D-cache, - * Invalidate I-cache - * idcache_wbinv_range Write-back and Invalidate D-cache, - * Invalidate I-cache range - * - * Note that the ARM term for "write-back" is "clean". We use - * the term "write-back" since it's a more common way to describe - * the operation. - * - * There are some rules that must be followed: - * - * ID-cache Invalidate All: - * Unlike other functions, this one must never write back. - * It is used to intialize the MMU when it is in an unknown - * state (such as when it may have lines tagged as valid - * that belong to a previous set of mappings). - * - * I-cache Sync range: - * The goal is to synchronize the instruction stream, - * so you may beed to write-back dirty D-cache blocks - * first. If a range is requested, and you can't - * synchronize just a range, you have to hit the whole - * thing. - * - * D-cache Write-Back and Invalidate range: - * If you can't WB-Inv a range, you must WB-Inv the - * entire D-cache. - * - * D-cache Invalidate: - * If you can't Inv the D-cache, you must Write-Back - * and Invalidate. Code that uses this operation - * MUST NOT assume that the D-cache will not be written - * back to memory. - * - * D-cache Write-Back: - * If you can't Write-back without doing an Inv, - * that's fine. Then treat this as a WB-Inv. - * Skipping the invalidate is merely an optimization. - * - * All operations: - * Valid virtual addresses must be passed to each - * cache operation. - */ - void (*cf_icache_sync_range) (vm_offset_t, vm_size_t); - - void (*cf_dcache_wbinv_all) (void); - void (*cf_dcache_wbinv_range) (vm_offset_t, vm_size_t); - void (*cf_dcache_inv_range) (vm_offset_t, vm_size_t); - void (*cf_dcache_wb_range) (vm_offset_t, vm_size_t); - - void (*cf_idcache_inv_all) (void); - void (*cf_idcache_wbinv_all) (void); - void (*cf_idcache_wbinv_range) (vm_offset_t, vm_size_t); -#endif void (*cf_l2cache_wbinv_all) (void); void (*cf_l2cache_wbinv_range) (vm_offset_t, vm_size_t); void (*cf_l2cache_inv_range) (vm_offset_t, vm_size_t); void (*cf_l2cache_wb_range) (vm_offset_t, vm_size_t); void (*cf_l2cache_drain_writebuf) (void); /* Other functions */ -#if __ARM_ARCH < 6 - void (*cf_drain_writebuf) (void); -#endif - void (*cf_sleep) (int mode); -#if __ARM_ARCH < 6 - /* Soft functions */ - - void (*cf_context_switch) (void); -#endif - void (*cf_setup) (void); }; extern struct cpu_functions cpufuncs; extern u_int cputype; -#if __ARM_ARCH < 6 -#define cpu_cpwait() cpufuncs.cf_cpwait() - -#define cpu_control(c, e) cpufuncs.cf_control(c, e) -#define cpu_setttb(t) cpufuncs.cf_setttb(t) - -#define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID() -#define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e) -#define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD() -#define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e) - -#define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s)) - -#define cpu_dcache_wbinv_all() cpufuncs.cf_dcache_wbinv_all() -#define cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s)) -#define cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s)) -#define cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s)) - -#define cpu_idcache_inv_all() cpufuncs.cf_idcache_inv_all() -#define cpu_idcache_wbinv_all() cpufuncs.cf_idcache_wbinv_all() -#define cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s)) -#endif - #define cpu_l2cache_wbinv_all() cpufuncs.cf_l2cache_wbinv_all() #define cpu_l2cache_wb_range(a, s) cpufuncs.cf_l2cache_wb_range((a), (s)) #define cpu_l2cache_inv_range(a, s) cpufuncs.cf_l2cache_inv_range((a), (s)) #define cpu_l2cache_wbinv_range(a, s) cpufuncs.cf_l2cache_wbinv_range((a), (s)) #define cpu_l2cache_drain_writebuf() cpufuncs.cf_l2cache_drain_writebuf() -#if __ARM_ARCH < 6 -#define cpu_drain_writebuf() cpufuncs.cf_drain_writebuf() -#endif #define cpu_sleep(m) cpufuncs.cf_sleep(m) #define cpu_setup() cpufuncs.cf_setup() int set_cpufuncs (void); #define ARCHITECTURE_NOT_PRESENT 1 /* known but not configured */ #define ARCHITECTURE_NOT_SUPPORTED 2 /* not known */ void cpufunc_nullop (void); u_int cpufunc_control (u_int clear, u_int bic); void cpu_domains (u_int domains); #if defined(CPU_ARM9E) void arm9_tlb_flushID_SE (u_int va); void arm9_context_switch (void); u_int sheeva_control_ext (u_int, u_int); void sheeva_cpu_sleep (int); void sheeva_setttb (u_int); void sheeva_dcache_wbinv_range (vm_offset_t, vm_size_t); void sheeva_dcache_inv_range (vm_offset_t, vm_size_t); void sheeva_dcache_wb_range (vm_offset_t, vm_size_t); void sheeva_idcache_wbinv_range (vm_offset_t, vm_size_t); void sheeva_l2cache_wbinv_range (vm_offset_t, vm_size_t); void sheeva_l2cache_inv_range (vm_offset_t, vm_size_t); void sheeva_l2cache_wb_range (vm_offset_t, vm_size_t); void sheeva_l2cache_wbinv_all (void); #endif #if defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT) void armv7_cpu_sleep (int); #endif #if defined(CPU_MV_PJ4B) void pj4b_config (void); #endif #if defined(CPU_ARM1176) void arm11x6_sleep (int); /* no ref. for errata */ #endif #if defined(CPU_ARM9E) void armv5_ec_setttb(u_int); void armv5_ec_icache_sync_range(vm_offset_t, vm_size_t); void armv5_ec_dcache_wbinv_all(void); void armv5_ec_dcache_wbinv_range(vm_offset_t, vm_size_t); void armv5_ec_dcache_inv_range(vm_offset_t, vm_size_t); void armv5_ec_dcache_wb_range(vm_offset_t, vm_size_t); void armv5_ec_idcache_wbinv_all(void); void armv5_ec_idcache_wbinv_range(vm_offset_t, vm_size_t); void armv4_tlb_flushID (void); void armv4_tlb_flushD (void); void armv4_tlb_flushD_SE (u_int va); void armv4_drain_writebuf (void); void armv4_idcache_inv_all (void); #endif /* * Macros for manipulating CPU interrupts */ -#if __ARM_ARCH < 6 -#define __ARM_INTR_BITS (PSR_I | PSR_F) -#else #define __ARM_INTR_BITS (PSR_I | PSR_F | PSR_A) -#endif static __inline uint32_t __set_cpsr(uint32_t bic, uint32_t eor) { uint32_t tmp, ret; __asm __volatile( "mrs %0, cpsr\n" /* Get the CPSR */ "bic %1, %0, %2\n" /* Clear bits */ "eor %1, %1, %3\n" /* XOR bits */ "msr cpsr_xc, %1\n" /* Set the CPSR */ : "=&r" (ret), "=&r" (tmp) : "r" (bic), "r" (eor) : "memory"); return ret; } static __inline uint32_t disable_interrupts(uint32_t mask) { return (__set_cpsr(mask & __ARM_INTR_BITS, mask & __ARM_INTR_BITS)); } static __inline uint32_t enable_interrupts(uint32_t mask) { return (__set_cpsr(mask & __ARM_INTR_BITS, 0)); } static __inline uint32_t restore_interrupts(uint32_t old_cpsr) { return (__set_cpsr(__ARM_INTR_BITS, old_cpsr & __ARM_INTR_BITS)); } static __inline register_t intr_disable(void) { return (disable_interrupts(PSR_I | PSR_F)); } static __inline void intr_restore(register_t s) { restore_interrupts(s); } #undef __ARM_INTR_BITS /* * Functions to manipulate cpu r13 * (in arm/arm32/setstack.S) */ void set_stackptr (u_int mode, u_int address); u_int get_stackptr (u_int mode); /* * CPU functions from locore.S */ void cpu_reset (void) __attribute__((__noreturn__)); /* * Cache info variables. */ /* PRIMARY CACHE VARIABLES */ extern int arm_picache_size; extern int arm_picache_line_size; extern int arm_picache_ways; extern int arm_pdcache_size; /* and unified */ extern int arm_pdcache_line_size; extern int arm_pdcache_ways; extern int arm_pcache_type; extern int arm_pcache_unified; extern int arm_dcache_align; extern int arm_dcache_align_mask; extern u_int arm_cache_level; extern u_int arm_cache_loc; extern u_int arm_cache_type[14]; -#if __ARM_ARCH >= 6 #define HAVE_INLINE_FFS static __inline __pure2 int ffs(int mask) { return (__builtin_ffs(mask)); } #define HAVE_INLINE_FFSL static __inline __pure2 int ffsl(long mask) { return (__builtin_ffsl(mask)); } #define HAVE_INLINE_FFSLL static __inline __pure2 int ffsll(long long mask) { return (__builtin_ffsll(mask)); } #define HAVE_INLINE_FLS static __inline __pure2 int fls(int mask) { return (mask == 0 ? 0 : 8 * sizeof(mask) - __builtin_clz((u_int)mask)); } #define HAVE_INLINE_FLSL static __inline __pure2 int flsl(long mask) { return (mask == 0 ? 0 : 8 * sizeof(mask) - __builtin_clzl((u_long)mask)); } #define HAVE_INLINE_FLSLL static __inline __pure2 int flsll(long long mask) { return (mask == 0 ? 0 : 8 * sizeof(mask) - __builtin_clzll((unsigned long long)mask)); } -#endif #else /* !_KERNEL */ static __inline void breakpoint(void) { /* * This matches the instruction used by GDB for software * breakpoints. */ __asm("udf 0xfdee"); } #endif /* _KERNEL */ #endif /* _MACHINE_CPUFUNC_H_ */ /* End of cpufunc.h */ Index: head/sys/arm/include/cpuinfo.h =================================================================== --- head/sys/arm/include/cpuinfo.h (revision 368140) +++ head/sys/arm/include/cpuinfo.h (revision 368141) @@ -1,131 +1,129 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_CPUINFO_H_ #define _MACHINE_CPUINFO_H_ #include #define CPU_IMPLEMENTER_ARM 0x41 #define CPU_IMPLEMENTER_QCOM 0x51 #define CPU_IMPLEMENTER_MRVL 0x56 /* ARM */ #define CPU_ARCH_ARM1176 0xB76 #define CPU_ARCH_CORTEX_A5 0xC05 #define CPU_ARCH_CORTEX_A7 0xC07 #define CPU_ARCH_CORTEX_A8 0xC08 #define CPU_ARCH_CORTEX_A9 0xC09 #define CPU_ARCH_CORTEX_A12 0xC0D #define CPU_ARCH_CORTEX_A15 0xC0F #define CPU_ARCH_CORTEX_A17 0xC11 #define CPU_ARCH_CORTEX_A53 0xD03 #define CPU_ARCH_CORTEX_A57 0xD07 #define CPU_ARCH_CORTEX_A72 0xD08 #define CPU_ARCH_CORTEX_A73 0xD09 #define CPU_ARCH_CORTEX_A75 0xD0A /* QCOM */ #define CPU_ARCH_KRAIT_300 0x06F /* MRVL */ #define CPU_ARCH_SHEEVA_581 0x581 /* PJ4/PJ4B */ #define CPU_ARCH_SHEEVA_584 0x584 /* PJ4B-MP/PJ4C */ struct cpuinfo { /* raw id registers */ uint32_t midr; uint32_t ctr; uint32_t tcmtr; uint32_t tlbtr; uint32_t mpidr; uint32_t revidr; uint32_t id_pfr0; uint32_t id_pfr1; uint32_t id_dfr0; uint32_t id_afr0; uint32_t id_mmfr0; uint32_t id_mmfr1; uint32_t id_mmfr2; uint32_t id_mmfr3; uint32_t id_isar0; uint32_t id_isar1; uint32_t id_isar2; uint32_t id_isar3; uint32_t id_isar4; uint32_t id_isar5; uint32_t cbar; uint32_t ccsidr; uint32_t clidr; /* Parsed bits of above registers... */ /* midr */ int implementer; int revision; int architecture; int part_number; int patch; /* id_mmfr0 */ int outermost_shareability; int shareability_levels; int auxiliary_registers; int innermost_shareability; /* id_mmfr1 */ int mem_barrier; /* id_mmfr3 */ int coherent_walk; int maintenance_broadcast; /* id_pfr1 */ int generic_timer_ext; int virtualization_ext; int security_ext; /* L1 cache info */ int dcache_line_size; int dcache_line_mask; int icache_line_size; int icache_line_mask; /* mpidr */ int mp_ext; }; extern struct cpuinfo cpuinfo; void cpuinfo_init(void); -#if __ARM_ARCH >= 6 void cpuinfo_init_bp_hardening(void); void cpuinfo_reinit_mmu(uint32_t ttb); -#endif #endif /* _MACHINE_CPUINFO_H_ */ Index: head/sys/arm/include/db_machdep.h =================================================================== --- head/sys/arm/include/db_machdep.h (revision 368140) +++ head/sys/arm/include/db_machdep.h (revision 368141) @@ -1,98 +1,94 @@ /*- * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. * * from: FreeBSD: src/sys/i386/include/db_machdep.h,v 1.16 1999/10/04 * $FreeBSD$ */ #ifndef _MACHINE_DB_MACHDEP_H_ #define _MACHINE_DB_MACHDEP_H_ #include #include #include #define T_BREAKPOINT (1) #define T_WATCHPOINT (2) typedef vm_offset_t db_addr_t; typedef int db_expr_t; #define PC_REGS() ((db_addr_t)kdb_thrctx->pcb_regs.sf_pc) #define BKPT_INST (KERNEL_BREAKPOINT) #define BKPT_SIZE (INSN_SIZE) #define BKPT_SET(inst) (BKPT_INST) #define BKPT_SKIP do { \ kdb_frame->tf_pc += BKPT_SIZE; \ } while (0) -#if __ARM_ARCH >= 6 #define db_clear_single_step kdb_cpu_clear_singlestep #define db_set_single_step kdb_cpu_set_singlestep #define db_pc_is_singlestep kdb_cpu_pc_is_singlestep -#else -#define SOFTWARE_SSTEP 1 -#endif #define IS_BREAKPOINT_TRAP(type, code) (type == T_BREAKPOINT) #define IS_WATCHPOINT_TRAP(type, code) (type == T_WATCHPOINT) #define inst_trap_return(ins) (0) /* ldmxx reg, {..., pc} 01800000 stack mode 000f0000 register 0000ffff register list */ /* mov pc, reg 0000000f register */ #define inst_return(ins) (((ins) & 0x0e108000) == 0x08108000 || \ ((ins) & 0x0ff0fff0) == 0x01a0f000 || \ ((ins) & 0x0ffffff0) == 0x012fff10) /* bx */ /* bl ... 00ffffff offset>>2 */ #define inst_call(ins) (((ins) & 0x0f000000) == 0x0b000000) /* b ... 00ffffff offset>>2 */ /* ldr pc, [pc, reg, lsl #2] 0000000f register */ #define inst_branch(ins) (((ins) & 0x0f000000) == 0x0a000000 || \ ((ins) & 0x0fdffff0) == 0x079ff100 || \ ((ins) & 0x0cd0f000) == 0x0490f000 || \ ((ins) & 0x0ffffff0) == 0x012fff30 || /* blx */ \ ((ins) & 0x0de0f000) == 0x0080f000) #define inst_load(ins) (0) #define inst_store(ins) (0) #define next_instr_address(pc, bd) ((bd) ? (pc) : ((pc) + INSN_SIZE)) #define DB_ELFSIZE 32 int db_validate_address(vm_offset_t); u_int branch_taken (u_int insn, db_addr_t pc); #endif /* !_MACHINE_DB_MACHDEP_H_ */ Index: head/sys/arm/include/debug_monitor.h =================================================================== --- head/sys/arm/include/debug_monitor.h (revision 368140) +++ head/sys/arm/include/debug_monitor.h (revision 368141) @@ -1,90 +1,60 @@ /*- * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_DEBUG_MONITOR_H_ #define _MACHINE_DEBUG_MONITOR_H_ #ifdef DDB #include enum dbg_access_t { HW_BREAKPOINT_X = 0, HW_WATCHPOINT_R = 1, HW_WATCHPOINT_W = 2, HW_WATCHPOINT_RW = HW_WATCHPOINT_R | HW_WATCHPOINT_W, }; -#if __ARM_ARCH >= 6 void dbg_monitor_init(void); void dbg_monitor_init_secondary(void); void dbg_show_watchpoint(void); int dbg_setup_watchpoint(db_expr_t, db_expr_t, enum dbg_access_t); int dbg_remove_watchpoint(db_expr_t, db_expr_t); void dbg_resume_dbreg(void); -#else /* __ARM_ARCH >= 6 */ -static __inline void -dbg_show_watchpoint(void) -{ -} -static __inline int -dbg_setup_watchpoint(db_expr_t addr __unused, db_expr_t size __unused, - enum dbg_access_t access __unused) -{ - return (ENXIO); -} -static __inline int -dbg_remove_watchpoint(db_expr_t addr __unused, db_expr_t size __unused) -{ - return (ENXIO); -} -static __inline void -dbg_monitor_init(void) -{ -} -static __inline void -dbg_monitor_init_secondary(void) -{ -} -static __inline void -dbg_resume_dbreg(void) -{ -} -#endif /* __ARM_ARCH < 6 */ #else /* DDB */ static __inline void dbg_monitor_init(void) { } #endif #endif /* _MACHINE_DEBUG_MONITOR_H_ */ Index: head/sys/arm/include/fdt.h =================================================================== --- head/sys/arm/include/fdt.h (revision 368140) +++ head/sys/arm/include/fdt.h (revision 368141) @@ -1,54 +1,44 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_FDT_H_ #define _MACHINE_FDT_H_ #include -#ifndef INTRNG - -/* Max interrupt number */ -#define FDT_INTR_MAX NIRQ - -/* Map phandle/intpin pair to global IRQ number */ -#define FDT_MAP_IRQ(node, pin) (pin) - -#endif - /* * Bus space tag. XXX endianess info needs to be derived from the blob. */ extern bus_space_tag_t fdtbus_bs_tag; #endif /* _MACHINE_FDT_H_ */ Index: head/sys/arm/include/frame.h =================================================================== --- head/sys/arm/include/frame.h (revision 368140) +++ head/sys/arm/include/frame.h (revision 368141) @@ -1,139 +1,137 @@ /* $NetBSD: frame.h,v 1.5 2002/10/19 00:10:54 bjh21 Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1994-1997 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * frame.h * * Stack frames structures * * Created : 30/09/94 * * $FreeBSD$ * */ #ifndef _MACHINE_FRAME_H_ #define _MACHINE_FRAME_H_ #ifndef _LOCORE #include #include /* * Trap frame. Pushed onto the kernel stack on a trap (synchronous exception). */ struct trapframe { register_t tf_spsr; /* Zero on arm26 */ register_t tf_r0; register_t tf_r1; register_t tf_r2; register_t tf_r3; register_t tf_r4; register_t tf_r5; register_t tf_r6; register_t tf_r7; register_t tf_r8; register_t tf_r9; register_t tf_r10; register_t tf_r11; register_t tf_r12; register_t tf_usr_sp; register_t tf_usr_lr; register_t tf_svc_sp; /* Not used on arm26 */ register_t tf_svc_lr; /* Not used on arm26 */ register_t tf_pc; register_t tf_pad; }; /* Register numbers */ #define tf_r13 tf_usr_sp #define tf_r14 tf_usr_lr #define tf_r15 tf_pc /* * Signal frame. Pushed onto user stack before calling sigcode. * The pointers are used in the trampoline code to locate the ucontext. */ struct sigframe { siginfo_t sf_si; /* actual saved siginfo */ ucontext_t sf_uc; /* actual saved ucontext */ mcontext_vfp_t sf_vfp; /* actual saved VFP context */ }; /* * Switch frame. * * It is important this is a multiple of 8 bytes so the stack is correctly * aligned when we create new threads. */ struct switchframe { register_t sf_r4; register_t sf_r5; register_t sf_r6; register_t sf_r7; register_t sf_r8; register_t sf_r9; register_t sf_r10; register_t sf_r11; register_t sf_r12; register_t sf_sp; register_t sf_lr; register_t sf_pc; -#if __ARM_ARCH >= 6 register_t sf_tpidrurw; register_t sf_spare0; -#endif }; /* * Stack frame. Used during stack traces (db_trace.c) */ struct frame { u_int fr_fp; u_int fr_sp; u_int fr_lr; u_int fr_pc; }; #endif /* !_LOCORE */ #endif /* _MACHINE_FRAME_H_ */ Index: head/sys/arm/include/intr.h =================================================================== --- head/sys/arm/include/intr.h (revision 368140) +++ head/sys/arm/include/intr.h (revision 368141) @@ -1,107 +1,69 @@ /* $NetBSD: intr.h,v 1.7 2003/06/16 20:01:00 thorpej Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1997 Mark Brinicombe. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * for the NetBSD Project. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef _MACHINE_INTR_H_ #define _MACHINE_INTR_H_ #ifdef FDT #include #endif -#ifdef INTRNG - #ifndef NIRQ #define NIRQ 1024 /* XXX - It should be an option. */ #endif #include #ifdef SMP typedef void intr_ipi_send_t(void *, cpuset_t, u_int); typedef void intr_ipi_handler_t(void *); void intr_ipi_dispatch(u_int, struct trapframe *); void intr_ipi_send(cpuset_t, u_int); void intr_ipi_setup(u_int, const char *, intr_ipi_handler_t *, void *, intr_ipi_send_t *, void *); int intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *, void *); #endif -#else /* INTRNG */ - -/* XXX move to std.* files? */ -#if defined(SOC_MV_DISCOVERY) -#define NIRQ 96 -#elif defined(SOC_MV_KIRKWOOD) -#define NIRQ 64 -#elif defined(CPU_CORTEXA) -#define NIRQ 1020 -#elif defined(CPU_KRAIT) -#define NIRQ 288 -#elif defined(CPU_ARM1176) -#define NIRQ 128 -#else -#define NIRQ 32 -#endif - -int arm_get_next_irq(int); -void arm_mask_irq(uintptr_t); -void arm_unmask_irq(uintptr_t); -void arm_intrnames_init(void); -void arm_setup_irqhandler(const char *, int (*)(void*), void (*)(void*), - void *, int, int, void **); -int arm_remove_irqhandler(int, void *); -extern void (*arm_post_filter)(void *); -extern int (*arm_config_irq)(int irq, enum intr_trigger trig, - enum intr_polarity pol); - -void intr_pic_init_secondary(void); - -#ifdef FDT -int gic_decode_fdt(phandle_t, pcell_t *, int *, int *, int *); -int intr_fdt_map_irq(phandle_t, pcell_t *, int); -#endif - -#endif /* INTRNG */ void arm_irq_memory_barrier(uintptr_t); #endif /* _MACHINE_INTR_H */ Index: head/sys/arm/include/kdb.h =================================================================== --- head/sys/arm/include/kdb.h (revision 368140) +++ head/sys/arm/include/kdb.h (revision 368141) @@ -1,69 +1,57 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2004 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_KDB_H_ #define _MACHINE_KDB_H_ #include #include #include #include #define KDB_STOPPEDPCB(pc) &stoppcbs[pc->pc_cpuid] -#if __ARM_ARCH >= 6 extern void kdb_cpu_clear_singlestep(void); extern void kdb_cpu_set_singlestep(void); boolean_t kdb_cpu_pc_is_singlestep(db_addr_t); -#else -static __inline void -kdb_cpu_clear_singlestep(void) -{ -} - -static __inline void -kdb_cpu_set_singlestep(void) -{ -} -#endif static __inline void kdb_cpu_sync_icache(unsigned char *addr, size_t size) { icache_sync((vm_offset_t)addr, size); } static __inline void kdb_cpu_trap(int type, int code) { } #endif /* _MACHINE_KDB_H_ */ Index: head/sys/arm/include/machdep.h =================================================================== --- head/sys/arm/include/machdep.h (revision 368140) +++ head/sys/arm/include/machdep.h (revision 368141) @@ -1,83 +1,76 @@ /* $NetBSD: machdep.h,v 1.7 2002/02/21 02:52:21 thorpej Exp $ */ /* $FreeBSD$ */ #ifndef _MACHDEP_BOOT_MACHDEP_H_ #define _MACHDEP_BOOT_MACHDEP_H_ /* Structs that need to be initialised by initarm */ -#if __ARM_ARCH >= 6 extern vm_offset_t irqstack; extern vm_offset_t undstack; extern vm_offset_t abtstack; -#else -struct pv_addr; -extern struct pv_addr irqstack; -extern struct pv_addr undstack; -extern struct pv_addr abtstack; -#endif /* Define various stack sizes in pages */ #define IRQ_STACK_SIZE 1 #define ABT_STACK_SIZE 1 #define UND_STACK_SIZE 1 /* misc prototypes used by the many arm machdeps */ struct trapframe; void arm_lock_cache_line(vm_offset_t); void init_proc0(vm_offset_t kstack); void halt(void); void abort_handler(struct trapframe *, int ); void set_stackptrs(int cpu); void undefinedinstruction_bounce(struct trapframe *); /* Early boot related helper functions */ struct arm_boot_params; vm_offset_t default_parse_boot_param(struct arm_boot_params *abp); vm_offset_t fake_preload_metadata(struct arm_boot_params *abp, void *dtb_ptr, size_t dtb_size); vm_offset_t parse_boot_param(struct arm_boot_params *abp); void arm_parse_fdt_bootargs(void); void arm_print_kenv(void); int arm_get_vfpstate(struct thread *td, void *args); /* Board-specific attributes */ void board_set_serial(uint64_t); void board_set_revision(uint32_t); int arm_predict_branch(void *, u_int, register_t, register_t *, u_int (*)(void*, int), u_int (*)(void*, vm_offset_t, u_int*)); #ifdef PLATFORM typedef void delay_func(int, void *); void arm_set_delay(delay_func *, void *); #endif #ifdef EFI struct efi_map_header; struct mem_region; void arm_add_efi_map_entries(struct efi_map_header *efihdr, struct mem_region *mr, int *mrcnt); #endif /* * Symbols created by ldscript.arm which are accessible in the kernel as global * symbols. They have uint8 type because they mark the byte location where the * corresponding data starts or ends (in the end case, it's the next byte * following the data, so the data size is end-start). These are listed below * in the order they occur within the kernel (i.e., the address of each variable * should be greater than any of the ones before it). */ extern uint8_t _start; /* Kernel entry point in locore.S */ extern uint8_t _etext; /* text segment end */ extern uint8_t _extab_start; /* unwind table start */ extern uint8_t _exidx_start; /* unwind index start */ extern uint8_t _exidx_end; /* unwind index end */ extern uint8_t _start_ctors; /* ctors data start */ extern uint8_t _stop_ctors; /* ctors data end */ extern uint8_t _edata; /* data segment end */ extern uint8_t __bss_start; /* bss segment start */ extern uint8_t _ebss; /* bss segment end */ extern uint8_t _end; /* End of kernel (text+ctors+unwind+data+bss) */ #endif /* !_MACHINE_MACHDEP_H_ */ Index: head/sys/arm/include/pcpu.h =================================================================== --- head/sys/arm/include/pcpu.h (revision 368140) +++ head/sys/arm/include/pcpu.h (revision 368141) @@ -1,154 +1,145 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1999 Luoqi Chen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: FreeBSD: src/sys/i386/include/globaldata.h,v 1.27 2001/04/27 * $FreeBSD$ */ #ifndef _MACHINE_PCPU_H_ #define _MACHINE_PCPU_H_ #ifdef _KERNEL #include #include #define ALT_STACK_SIZE 128 struct vmspace; #endif /* _KERNEL */ -#if __ARM_ARCH >= 6 /* Branch predictor hardening method */ #define PCPU_BP_HARDEN_KIND_NONE 0 #define PCPU_BP_HARDEN_KIND_BPIALL 1 #define PCPU_BP_HARDEN_KIND_ICIALLU 2 #define PCPU_MD_FIELDS \ unsigned int pc_vfpsid; \ unsigned int pc_vfpmvfr0; \ unsigned int pc_vfpmvfr1; \ struct pmap *pc_curpmap; \ struct mtx pc_cmap_lock; \ void *pc_cmap1_pte2p; \ void *pc_cmap2_pte2p; \ caddr_t pc_cmap1_addr; \ caddr_t pc_cmap2_addr; \ vm_offset_t pc_qmap_addr; \ void *pc_qmap_pte2p; \ unsigned int pc_dbreg[32]; \ int pc_dbreg_cmd; \ int pc_bp_harden_kind; \ uint32_t pc_original_actlr; \ uint64_t pc_clock; \ char __pad[139] -#else -#define PCPU_MD_FIELDS \ - char __pad[93] -#endif #ifdef _KERNEL #define PC_DBREG_CMD_NONE 0 #define PC_DBREG_CMD_LOAD 1 struct pcb; struct pcpu; extern struct pcpu *pcpup; -#if __ARM_ARCH >= 6 #define CPU_MASK (0xf) #ifndef SMP #define get_pcpu() (pcpup) #else #define get_pcpu() __extension__ ({ \ int id; \ __asm __volatile("mrc p15, 0, %0, c0, c0, 5" : "=r" (id)); \ (pcpup + (id & CPU_MASK)); \ }) #endif static inline struct thread * get_curthread(void) { void *ret; __asm __volatile("mrc p15, 0, %0, c13, c0, 4" : "=r" (ret)); return (ret); } static inline void set_curthread(struct thread *td) { __asm __volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (td)); } static inline void * get_tls(void) { void *tls; /* TPIDRURW contains the authoritative value. */ __asm __volatile("mrc p15, 0, %0, c13, c0, 2" : "=r" (tls)); return (tls); } static inline void set_tls(void *tls) { /* * Update both TPIDRURW and TPIDRURO. TPIDRURW needs to be written * first to ensure that a context switch between the two writes will * still give the desired result of updating both. */ __asm __volatile( "mcr p15, 0, %0, c13, c0, 2\n" "mcr p15, 0, %0, c13, c0, 3\n" : : "r" (tls)); } #define curthread get_curthread() -#else -#define get_pcpu() pcpup -#endif #define PCPU_GET(member) (get_pcpu()->pc_ ## member) #define PCPU_ADD(member, value) (get_pcpu()->pc_ ## member += (value)) #define PCPU_INC(member) PCPU_ADD(member, 1) #define PCPU_PTR(member) (&get_pcpu()->pc_ ## member) #define PCPU_SET(member,value) (get_pcpu()->pc_ ## member = (value)) void pcpu0_init(void); #endif /* _KERNEL */ #endif /* !_MACHINE_PCPU_H_ */ Index: head/sys/arm/include/proc.h =================================================================== --- head/sys/arm/include/proc.h (revision 368140) +++ head/sys/arm/include/proc.h (revision 368141) @@ -1,87 +1,82 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)proc.h 7.1 (Berkeley) 5/15/91 * from: FreeBSD: src/sys/i386/include/proc.h,v 1.11 2001/06/29 * $FreeBSD$ */ #ifndef _MACHINE_PROC_H_ #define _MACHINE_PROC_H_ #include struct md_utrap { utrap_entry_t *ut_precise[UT_MAX]; /* must be first */ int ut_refcnt; }; struct mdthread { int md_spinlock_count; /* (k) */ register_t md_saved_cspr; /* (k) */ register_t md_spurflt_addr; /* (k) Spurious page fault address. */ int md_ptrace_instr; int md_ptrace_addr; int md_ptrace_instr_alt; int md_ptrace_addr_alt; -#if __ARM_ARCH < 6 - register_t md_tp; - void *md_ras_start; - void *md_ras_end; -#endif }; struct mdproc { struct md_utrap *md_utrap; void *md_sigtramp; }; #define KINFO_PROC_SIZE 816 #define MAXARGS 8 /* * This holds the syscall state for a single system call. * As some syscall arguments may be 64-bit aligned we need to ensure the * args value is 64-bit aligned. The ABI will then ensure any 64-bit * arguments are already correctly aligned, even if they were passed in * via registers, we just need to make sure we copy them to an aligned * buffer. */ struct syscall_args { u_int code; struct sysent *callp; register_t args[MAXARGS]; } __aligned(8); #endif /* !_MACHINE_PROC_H_ */ Index: head/sys/arm/include/sf_buf.h =================================================================== --- head/sys/arm/include/sf_buf.h (revision 368140) +++ head/sys/arm/include/sf_buf.h (revision 368141) @@ -1,56 +1,48 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 Alan L. Cox * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_SF_BUF_H_ #define _MACHINE_SF_BUF_H_ static inline void sf_buf_map(struct sf_buf *sf, int flags) { -#if __ARM_ARCH >= 6 pmap_qenter(sf->kva, &(sf->m), 1); -#else - pmap_kenter(sf->kva, VM_PAGE_TO_PHYS(sf->m)); -#endif } static inline int sf_buf_unmap(struct sf_buf *sf) { -#if __ARM_ARCH >= 6 pmap_qremove(sf->kva, 1); -#else - pmap_kremove(sf->kva); -#endif return (1); } #endif /* !_MACHINE_SF_BUF_H_ */ Index: head/sys/arm/include/sysarch.h =================================================================== --- head/sys/arm/include/sysarch.h (revision 368140) +++ head/sys/arm/include/sysarch.h (revision 368141) @@ -1,104 +1,83 @@ /* $NetBSD: sysarch.h,v 1.5 2003/09/11 09:40:12 kleink Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1996-1997 Mark Brinicombe. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #ifndef _ARM_SYSARCH_H_ #define _ARM_SYSARCH_H_ #include -/* - * The ARM_TP_ADDRESS points to a special purpose page, which is used as local - * store for the ARM per-thread data and Restartable Atomic Sequences support. - * Put it just above the "high" vectors' page. - * The cpu_switch() code assumes ARM_RAS_START is ARM_TP_ADDRESS + 4, and - * ARM_RAS_END is ARM_TP_ADDRESS + 8, so if that ever changes, be sure to - * update the cpu_switch() (and cpu_throw()) code as well. - * In addition, code in arm/include/atomic.h and arm/arm/exception.S - * assumes that ARM_RAS_END is at ARM_RAS_START+4, so be sure to update those - * if ARM_RAS_END moves in relation to ARM_RAS_START (look for occurrences - * of ldr/str rm,[rn, #4]). - */ - -/* ARM_TP_ADDRESS is needed for processors that don't support - * the exclusive-access opcodes introduced with ARMv6K. */ -#if __ARM_ARCH <= 5 -#define ARM_TP_ADDRESS (ARM_VECTORS_HIGH + 0x1000) -#define ARM_RAS_START (ARM_TP_ADDRESS + 4) -#define ARM_RAS_END (ARM_TP_ADDRESS + 8) -#endif - #ifndef LOCORE #ifndef __ASSEMBLER__ /* * Pickup definition of various __types. */ #include /* * Architecture specific syscalls (arm) */ #define ARM_SYNC_ICACHE 0 #define ARM_DRAIN_WRITEBUF 1 #define ARM_SET_TP 2 #define ARM_GET_TP 3 #define ARM_GET_VFPSTATE 4 struct arm_sync_icache_args { __uintptr_t addr; /* Virtual start address */ __size_t len; /* Region size */ }; struct arm_get_vfpstate_args { __size_t mc_vfp_size; void *mc_vfp; }; #ifndef _KERNEL __BEGIN_DECLS int arm_sync_icache(unsigned int, int); int arm_drain_writebuf(void); int sysarch(int, void *); __END_DECLS #endif #endif /* __ASSEMBLER__ */ #endif /* LOCORE */ #endif /* !_ARM_SYSARCH_H_ */ Index: head/sys/arm/include/sysreg.h =================================================================== --- head/sys/arm/include/sysreg.h (revision 368140) +++ head/sys/arm/include/sysreg.h (revision 368141) @@ -1,335 +1,325 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Macros to make working with the System Control Registers simpler. * * Note that when register r0 is hard-coded in these definitions it means the * cp15 operation neither reads nor writes the register, and r0 is used only * because some syntatically-valid register name has to appear at that point to * keep the asm parser happy. */ #ifndef MACHINE_SYSREG_H #define MACHINE_SYSREG_H /* * CP14 registers */ -#if __ARM_ARCH >= 6 - #define CP14_DBGDIDR(rr) p14, 0, rr, c0, c0, 0 /* Debug ID Register */ #define CP14_DBGDSCRext_V6(rr) p14, 0, rr, c0, c1, 0 /* Debug Status and Ctrl Register v6 */ #define CP14_DBGDSCRext_V7(rr) p14, 0, rr, c0, c2, 2 /* Debug Status and Ctrl Register v7 */ #define CP14_DBGVCR(rr) p14, 0, rr, c0, c7, 0 /* Vector Catch Register */ #define CP14_DBGOSLAR(rr) p14, 0, rr, c1, c0, 4 /* OS Lock Access Register */ #define CP14_DBGOSLSR(rr) p14, 0, rr, c1, c1, 4 /* OS Lock Status Register */ #define CP14_DBGOSDLR(rr) p14, 0, rr, c1, c3, 4 /* OS Double Lock Register */ #define CP14_DBGPRSR(rr) p14, 0, rr, c1, c5, 4 /* Device Powerdown and Reset Status */ #define CP14_DBGDSCRint(rr) CP14_DBGDSCRext_V6(rr) /* Debug Status and Ctrl internal view */ -#endif - /* * CP15 C0 registers */ #define CP15_MIDR(rr) p15, 0, rr, c0, c0, 0 /* Main ID Register */ #define CP15_CTR(rr) p15, 0, rr, c0, c0, 1 /* Cache Type Register */ #define CP15_TCMTR(rr) p15, 0, rr, c0, c0, 2 /* TCM Type Register */ #define CP15_TLBTR(rr) p15, 0, rr, c0, c0, 3 /* TLB Type Register */ #define CP15_MPIDR(rr) p15, 0, rr, c0, c0, 5 /* Multiprocessor Affinity Register */ #define CP15_REVIDR(rr) p15, 0, rr, c0, c0, 6 /* Revision ID Register */ #define CP15_ID_PFR0(rr) p15, 0, rr, c0, c1, 0 /* Processor Feature Register 0 */ #define CP15_ID_PFR1(rr) p15, 0, rr, c0, c1, 1 /* Processor Feature Register 1 */ #define CP15_ID_DFR0(rr) p15, 0, rr, c0, c1, 2 /* Debug Feature Register 0 */ #define CP15_ID_AFR0(rr) p15, 0, rr, c0, c1, 3 /* Auxiliary Feature Register 0 */ #define CP15_ID_MMFR0(rr) p15, 0, rr, c0, c1, 4 /* Memory Model Feature Register 0 */ #define CP15_ID_MMFR1(rr) p15, 0, rr, c0, c1, 5 /* Memory Model Feature Register 1 */ #define CP15_ID_MMFR2(rr) p15, 0, rr, c0, c1, 6 /* Memory Model Feature Register 2 */ #define CP15_ID_MMFR3(rr) p15, 0, rr, c0, c1, 7 /* Memory Model Feature Register 3 */ #define CP15_ID_ISAR0(rr) p15, 0, rr, c0, c2, 0 /* Instruction Set Attribute Register 0 */ #define CP15_ID_ISAR1(rr) p15, 0, rr, c0, c2, 1 /* Instruction Set Attribute Register 1 */ #define CP15_ID_ISAR2(rr) p15, 0, rr, c0, c2, 2 /* Instruction Set Attribute Register 2 */ #define CP15_ID_ISAR3(rr) p15, 0, rr, c0, c2, 3 /* Instruction Set Attribute Register 3 */ #define CP15_ID_ISAR4(rr) p15, 0, rr, c0, c2, 4 /* Instruction Set Attribute Register 4 */ #define CP15_ID_ISAR5(rr) p15, 0, rr, c0, c2, 5 /* Instruction Set Attribute Register 5 */ #define CP15_CCSIDR(rr) p15, 1, rr, c0, c0, 0 /* Cache Size ID Registers */ #define CP15_CLIDR(rr) p15, 1, rr, c0, c0, 1 /* Cache Level ID Register */ #define CP15_AIDR(rr) p15, 1, rr, c0, c0, 7 /* Auxiliary ID Register */ #define CP15_CSSELR(rr) p15, 2, rr, c0, c0, 0 /* Cache Size Selection Register */ #define CP15_VPIDR(rr) p15, 4, rr, c0, c0, 0 /* Virtualization Processor ID Register */ #define CP15_VMPIDR(rr) p15, 4, rr, c0, c0, 5 /* Virtualization Multiprocessor ID Register */ /* * CP15 C1 registers */ #define CP15_SCTLR(rr) p15, 0, rr, c1, c0, 0 /* System Control Register */ #define CP15_ACTLR(rr) p15, 0, rr, c1, c0, 1 /* IMPLEMENTATION DEFINED Auxiliary Control Register */ #define CP15_CPACR(rr) p15, 0, rr, c1, c0, 2 /* Coprocessor Access Control Register */ #define CP15_SCR(rr) p15, 0, rr, c1, c1, 0 /* Secure Configuration Register */ #define CP15_SDER(rr) p15, 0, rr, c1, c1, 1 /* Secure Debug Enable Register */ #define CP15_NSACR(rr) p15, 0, rr, c1, c1, 2 /* Non-Secure Access Control Register */ #define CP15_HSCTLR(rr) p15, 4, rr, c1, c0, 0 /* Hyp System Control Register */ #define CP15_HCR(rr) p15, 4, rr, c1, c1, 0 /* Hyp Configuration Register */ #define CP15_HDCR(rr) p15, 4, rr, c1, c1, 1 /* Hyp Debug Configuration Register */ #define CP15_HCPTR(rr) p15, 4, rr, c1, c1, 2 /* Hyp Coprocessor Trap Register */ #define CP15_HSTR(rr) p15, 4, rr, c1, c1, 3 /* Hyp System Trap Register */ /* * CP15 C2 registers */ #define CP15_TTBR0(rr) p15, 0, rr, c2, c0, 0 /* Translation Table Base Register 0 */ #define CP15_TTBR1(rr) p15, 0, rr, c2, c0, 1 /* Translation Table Base Register 1 */ #define CP15_TTBCR(rr) p15, 0, rr, c2, c0, 2 /* Translation Table Base Control Register */ #define CP15_HTCR(rr) p15, 4, rr, c2, c0, 2 /* Hyp Translation Control Register */ #define CP15_VTCR(rr) p15, 4, rr, c2, c1, 2 /* Virtualization Translation Control Register */ /* * CP15 C3 registers */ #define CP15_DACR(rr) p15, 0, rr, c3, c0, 0 /* Domain Access Control Register */ /* * CP15 C5 registers */ #define CP15_DFSR(rr) p15, 0, rr, c5, c0, 0 /* Data Fault Status Register */ #define CP15_HSR(rr) p15, 4, rr, c5, c2, 0 /* Hyp Syndrome Register */ -#if __ARM_ARCH >= 6 /* From ARMv6: */ #define CP15_IFSR(rr) p15, 0, rr, c5, c0, 1 /* Instruction Fault Status Register */ -#endif #if __ARM_ARCH >= 7 /* From ARMv7: */ #define CP15_ADFSR(rr) p15, 0, rr, c5, c1, 0 /* Auxiliary Data Fault Status Register */ #define CP15_AIFSR(rr) p15, 0, rr, c5, c1, 1 /* Auxiliary Instruction Fault Status Register */ #endif /* * CP15 C6 registers */ #define CP15_DFAR(rr) p15, 0, rr, c6, c0, 0 /* Data Fault Address Register */ #define CP15_HDFAR(rr) p15, 4, rr, c6, c0, 0 /* Hyp Data Fault Address Register */ #define CP15_HIFAR(rr) p15, 4, rr, c6, c0, 2 /* Hyp Instruction Fault Address Register */ #define CP15_HPFAR(rr) p15, 4, rr, c6, c0, 4 /* Hyp IPA Fault Address Register */ -#if __ARM_ARCH >= 6 /* From ARMv6k: */ #define CP15_IFAR(rr) p15, 0, rr, c6, c0, 2 /* Instruction Fault Address Register */ -#endif /* * CP15 C7 registers */ #if __ARM_ARCH >= 7 && defined(SMP) /* From ARMv7: */ #define CP15_ICIALLUIS p15, 0, r0, c7, c1, 0 /* Instruction cache invalidate all PoU, IS */ #define CP15_BPIALLIS p15, 0, r0, c7, c1, 6 /* Branch predictor invalidate all IS */ #endif #define CP15_PAR(rr) p15, 0, rr, c7, c4, 0 /* Physical Address Register */ #define CP15_ICIALLU p15, 0, r0, c7, c5, 0 /* Instruction cache invalidate all PoU */ #define CP15_ICIMVAU(rr) p15, 0, rr, c7, c5, 1 /* Instruction cache invalidate */ #if __ARM_ARCH == 6 /* Deprecated in ARMv7 */ #define CP15_CP15ISB p15, 0, r0, c7, c5, 4 /* ISB */ #endif #define CP15_BPIALL p15, 0, r0, c7, c5, 6 /* Branch predictor invalidate all */ #define CP15_BPIMVA p15, 0, rr, c7, c5, 7 /* Branch predictor invalidate by MVA */ #if __ARM_ARCH == 6 /* Only ARMv6: */ #define CP15_DCIALL p15, 0, r0, c7, c6, 0 /* Data cache invalidate all */ #endif #define CP15_DCIMVAC(rr) p15, 0, rr, c7, c6, 1 /* Data cache invalidate by MVA PoC */ #define CP15_DCISW(rr) p15, 0, rr, c7, c6, 2 /* Data cache invalidate by set/way */ #define CP15_ATS1CPR(rr) p15, 0, rr, c7, c8, 0 /* Stage 1 Current state PL1 read */ #define CP15_ATS1CPW(rr) p15, 0, rr, c7, c8, 1 /* Stage 1 Current state PL1 write */ #define CP15_ATS1CUR(rr) p15, 0, rr, c7, c8, 2 /* Stage 1 Current state unprivileged read */ #define CP15_ATS1CUW(rr) p15, 0, rr, c7, c8, 3 /* Stage 1 Current state unprivileged write */ #if __ARM_ARCH >= 7 /* From ARMv7: */ #define CP15_ATS12NSOPR(rr) p15, 0, rr, c7, c8, 4 /* Stages 1 and 2 Non-secure only PL1 read */ #define CP15_ATS12NSOPW(rr) p15, 0, rr, c7, c8, 5 /* Stages 1 and 2 Non-secure only PL1 write */ #define CP15_ATS12NSOUR(rr) p15, 0, rr, c7, c8, 6 /* Stages 1 and 2 Non-secure only unprivileged read */ #define CP15_ATS12NSOUW(rr) p15, 0, rr, c7, c8, 7 /* Stages 1 and 2 Non-secure only unprivileged write */ #endif #if __ARM_ARCH == 6 /* Only ARMv6: */ #define CP15_DCCALL p15, 0, r0, c7, c10, 0 /* Data cache clean all */ #endif #define CP15_DCCMVAC(rr) p15, 0, rr, c7, c10, 1 /* Data cache clean by MVA PoC */ #define CP15_DCCSW(rr) p15, 0, rr, c7, c10, 2 /* Data cache clean by set/way */ #if __ARM_ARCH == 6 /* Only ARMv6: */ #define CP15_CP15DSB p15, 0, r0, c7, c10, 4 /* DSB */ #define CP15_CP15DMB p15, 0, r0, c7, c10, 5 /* DMB */ #define CP15_CP15WFI p15, 0, r0, c7, c0, 4 /* WFI */ #endif #if __ARM_ARCH >= 7 /* From ARMv7: */ #define CP15_DCCMVAU(rr) p15, 0, rr, c7, c11, 1 /* Data cache clean by MVA PoU */ #endif #if __ARM_ARCH == 6 /* Only ARMv6: */ #define CP15_DCCIALL p15, 0, r0, c7, c14, 0 /* Data cache clean and invalidate all */ #endif #define CP15_DCCIMVAC(rr) p15, 0, rr, c7, c14, 1 /* Data cache clean and invalidate by MVA PoC */ #define CP15_DCCISW(rr) p15, 0, rr, c7, c14, 2 /* Data cache clean and invalidate by set/way */ /* * CP15 C8 registers */ #if __ARM_ARCH >= 7 && defined(SMP) /* From ARMv7: */ #define CP15_TLBIALLIS p15, 0, r0, c8, c3, 0 /* Invalidate entire unified TLB IS */ #define CP15_TLBIMVAIS(rr) p15, 0, rr, c8, c3, 1 /* Invalidate unified TLB by MVA IS */ #define CP15_TLBIASIDIS(rr) p15, 0, rr, c8, c3, 2 /* Invalidate unified TLB by ASID IS */ #define CP15_TLBIMVAAIS(rr) p15, 0, rr, c8, c3, 3 /* Invalidate unified TLB by MVA, all ASID IS */ #endif #define CP15_TLBIALL p15, 0, r0, c8, c7, 0 /* Invalidate entire unified TLB */ #define CP15_TLBIMVA(rr) p15, 0, rr, c8, c7, 1 /* Invalidate unified TLB by MVA */ #define CP15_TLBIASID(rr) p15, 0, rr, c8, c7, 2 /* Invalidate unified TLB by ASID */ #define CP15_TLBIALLH(rr) p15, 4, rr, c8, c7, 0 /* Invalidate Entire Hyp Unified TLB */ -#if __ARM_ARCH >= 6 /* From ARMv6: */ #define CP15_TLBIMVAA(rr) p15, 0, rr, c8, c7, 3 /* Invalidate unified TLB by MVA, all ASID */ -#endif /* * CP15 C9 registers */ #if __ARM_ARCH == 6 && defined(CPU_ARM1176) #define CP15_PMUSERENR(rr) p15, 0, rr, c15, c9, 0 /* Access Validation Control Register */ #define CP15_PMCR(rr) p15, 0, rr, c15, c12, 0 /* Performance Monitor Control Register */ #define CP15_PMCCNTR(rr) p15, 0, rr, c15, c12, 1 /* PM Cycle Count Register */ -#elif __ARM_ARCH > 6 +#else #define CP15_L2CTLR(rr) p15, 1, rr, c9, c0, 2 /* L2 Control Register */ #define CP15_PMCR(rr) p15, 0, rr, c9, c12, 0 /* Performance Monitor Control Register */ #define CP15_PMCNTENSET(rr) p15, 0, rr, c9, c12, 1 /* PM Count Enable Set Register */ #define CP15_PMCNTENCLR(rr) p15, 0, rr, c9, c12, 2 /* PM Count Enable Clear Register */ #define CP15_PMOVSR(rr) p15, 0, rr, c9, c12, 3 /* PM Overflow Flag Status Register */ #define CP15_PMSWINC(rr) p15, 0, rr, c9, c12, 4 /* PM Software Increment Register */ #define CP15_PMSELR(rr) p15, 0, rr, c9, c12, 5 /* PM Event Counter Selection Register */ #define CP15_PMCCNTR(rr) p15, 0, rr, c9, c13, 0 /* PM Cycle Count Register */ #define CP15_PMXEVTYPER(rr) p15, 0, rr, c9, c13, 1 /* PM Event Type Select Register */ #define CP15_PMXEVCNTRR(rr) p15, 0, rr, c9, c13, 2 /* PM Event Count Register */ #define CP15_PMUSERENR(rr) p15, 0, rr, c9, c14, 0 /* PM User Enable Register */ #define CP15_PMINTENSET(rr) p15, 0, rr, c9, c14, 1 /* PM Interrupt Enable Set Register */ #define CP15_PMINTENCLR(rr) p15, 0, rr, c9, c14, 2 /* PM Interrupt Enable Clear Register */ #endif /* * CP15 C10 registers */ /* Without LPAE this is PRRR, with LPAE it's MAIR0 */ #define CP15_PRRR(rr) p15, 0, rr, c10, c2, 0 /* Primary Region Remap Register */ #define CP15_MAIR0(rr) p15, 0, rr, c10, c2, 0 /* Memory Attribute Indirection Register 0 */ /* Without LPAE this is NMRR, with LPAE it's MAIR1 */ #define CP15_NMRR(rr) p15, 0, rr, c10, c2, 1 /* Normal Memory Remap Register */ #define CP15_MAIR1(rr) p15, 0, rr, c10, c2, 1 /* Memory Attribute Indirection Register 1 */ #define CP15_AMAIR0(rr) p15, 0, rr, c10, c3, 0 /* Auxiliary Memory Attribute Indirection Register 0 */ #define CP15_AMAIR1(rr) p15, 0, rr, c10, c3, 1 /* Auxiliary Memory Attribute Indirection Register 1 */ #define CP15_HMAIR0(rr) p15, 4, rr, c10, c2, 0 /* Hyp Memory Attribute Indirection Register 0 */ #define CP15_HMAIR1(rr) p15, 4, rr, c10, c2, 1 /* Hyp Memory Attribute Indirection Register 1 */ /* * CP15 C12 registers */ #define CP15_VBAR(rr) p15, 0, rr, c12, c0, 0 /* Vector Base Address Register */ #define CP15_MVBAR(rr) p15, 0, rr, c12, c0, 1 /* Monitor Vector Base Address Register */ #define CP15_ISR(rr) p15, 0, rr, c12, c1, 0 /* Interrupt Status Register */ #define CP15_HVBAR(rr) p15, 4, rr, c12, c0, 0 /* Hyp Vector Base Address Register*/ /* * CP15 C13 registers */ #define CP15_FCSEIDR(rr) p15, 0, rr, c13, c0, 0 /* FCSE Process ID Register */ #define CP15_CONTEXTIDR(rr) p15, 0, rr, c13, c0, 1 /* Context ID Register */ #define CP15_TPIDRURW(rr) p15, 0, rr, c13, c0, 2 /* User Read/Write Thread ID Register */ #define CP15_TPIDRURO(rr) p15, 0, rr, c13, c0, 3 /* User Read-Only Thread ID Register */ #define CP15_TPIDRPRW(rr) p15, 0, rr, c13, c0, 4 /* PL1 only Thread ID Register */ #define CP15_HTPIDR(rr) p15, 4, rr, c13, c0, 2 /* Hyp Software Thread ID Register */ /* * CP15 C14 registers * These are the Generic Timer registers and may be unallocated on some SoCs. * Only use these when you know the Generic Timer is available. */ #define CP15_CNTFRQ(rr) p15, 0, rr, c14, c0, 0 /* Counter Frequency Register */ #define CP15_CNTKCTL(rr) p15, 0, rr, c14, c1, 0 /* Timer PL1 Control Register */ #define CP15_CNTP_TVAL(rr) p15, 0, rr, c14, c2, 0 /* PL1 Physical Timer Value Register */ #define CP15_CNTP_CTL(rr) p15, 0, rr, c14, c2, 1 /* PL1 Physical Timer Control Register */ #define CP15_CNTV_TVAL(rr) p15, 0, rr, c14, c3, 0 /* Virtual Timer Value Register */ #define CP15_CNTV_CTL(rr) p15, 0, rr, c14, c3, 1 /* Virtual Timer Control Register */ #define CP15_CNTHCTL(rr) p15, 4, rr, c14, c1, 0 /* Timer PL2 Control Register */ #define CP15_CNTHP_TVAL(rr) p15, 4, rr, c14, c2, 0 /* PL2 Physical Timer Value Register */ #define CP15_CNTHP_CTL(rr) p15, 4, rr, c14, c2, 1 /* PL2 Physical Timer Control Register */ /* 64-bit registers for use with mcrr/mrrc */ #define CP15_CNTPCT(rq, rr) p15, 0, rq, rr, c14 /* Physical Count Register */ #define CP15_CNTVCT(rq, rr) p15, 1, rq, rr, c14 /* Virtual Count Register */ #define CP15_CNTP_CVAL(rq, rr) p15, 2, rq, rr, c14 /* PL1 Physical Timer Compare Value Register */ #define CP15_CNTV_CVAL(rq, rr) p15, 3, rq, rr, c14 /* Virtual Timer Compare Value Register */ #define CP15_CNTVOFF(rq, rr) p15, 4, rq, rr, c14 /* Virtual Offset Register */ #define CP15_CNTHP_CVAL(rq, rr) p15, 6, rq, rr, c14 /* PL2 Physical Timer Compare Value Register */ #define CP15_VTTBR(rq, rr) p15, 6, rq, rr, c2 /* Virtualization Translation Table Base Register */ #define CP15_HTTBR(rq, rr) p15, 4, rq, rr, c2 /* Hyp Translation Table Base Register */ #define CP15_TTBR0_2(rq, rr) p15, 0, rq, rr, c2 /* Translation Table Base Register 0 */ #define CP15_TTBR1_2(rq, rr) p15, 1, rq, rr, c2 /* Translation Table Base Register 1 */ #define CP15_PAR_2(rq, rr) p15, 0, rq, rr, c7 /* Physical Address Register */ /* * CP15 C15 registers */ #define CP15_CBAR(rr) p15, 4, rr, c15, c0, 0 /* Configuration Base Address Register */ #endif /* !MACHINE_SYSREG_H */ Index: head/sys/arm/include/vm.h =================================================================== --- head/sys/arm/include/vm.h (revision 368140) +++ head/sys/arm/include/vm.h (revision 368141) @@ -1,54 +1,48 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Alan L. Cox * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_VM_H_ #define _MACHINE_VM_H_ -#if __ARM_ARCH >= 6 #define VM_MEMATTR_WB_WA ((vm_memattr_t)0) #define VM_MEMATTR_NOCACHE ((vm_memattr_t)1) #define VM_MEMATTR_DEVICE ((vm_memattr_t)2) #define VM_MEMATTR_SO ((vm_memattr_t)3) #define VM_MEMATTR_WRITE_THROUGH ((vm_memattr_t)4) #define VM_MEMATTR_DEFAULT VM_MEMATTR_WB_WA #define VM_MEMATTR_UNCACHEABLE VM_MEMATTR_SO /* misused by DMA */ #ifdef _KERNEL /* Don't export aliased VM_MEMATTR to userland */ #define VM_MEMATTR_WRITE_COMBINING VM_MEMATTR_WRITE_THROUGH /* for DRM */ #define VM_MEMATTR_WRITE_BACK VM_MEMATTR_WB_WA /* for DRM */ -#endif -#else -/* Memory attribute configuration. */ -#define VM_MEMATTR_DEFAULT 0 -#define VM_MEMATTR_UNCACHEABLE 1 #endif #endif /* !_MACHINE_VM_H_ */ Index: head/sys/arm/mv/gpio.c =================================================================== --- head/sys/arm/mv/gpio.c (revision 368140) +++ head/sys/arm/mv/gpio.c (revision 368141) @@ -1,1215 +1,1213 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Benno Rice. * Copyright (C) 2008 MARVELL INTERNATIONAL LTD. * Copyright (c) 2017 Semihalf. * All rights reserved. * * Adapted and extended for Marvell SoCs by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: FreeBSD: //depot/projects/arm/src/sys/arm/xscale/pxa2x0/pxa2x0_gpio.c, rev 1 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "gpio_if.h" #define GPIO_MAX_INTR_COUNT 8 #define GPIO_PINS_PER_REG 32 #define GPIO_GENERIC_CAP (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | \ GPIO_PIN_OPENDRAIN | GPIO_PIN_PUSHPULL | \ GPIO_PIN_TRISTATE | GPIO_PIN_PULLUP | \ GPIO_PIN_PULLDOWN | GPIO_PIN_INVIN | \ GPIO_PIN_INVOUT) #define DEBOUNCE_CHECK_MS 1 #define DEBOUNCE_LO_HI_MS 2 #define DEBOUNCE_HI_LO_MS 2 #define DEBOUNCE_CHECK_TICKS ((hz / 1000) * DEBOUNCE_CHECK_MS) struct mv_gpio_softc { device_t dev; device_t sc_busdev; struct resource * mem_res; int mem_rid; struct resource * irq_res[GPIO_MAX_INTR_COUNT]; int irq_rid[GPIO_MAX_INTR_COUNT]; struct intr_event * gpio_events[MV_GPIO_MAX_NPINS]; void *ih_cookie[GPIO_MAX_INTR_COUNT]; bus_space_tag_t bst; bus_space_handle_t bsh; uint32_t offset; struct mtx mutex; uint8_t pin_num; /* number of GPIO pins */ uint8_t irq_num; /* number of real IRQs occupied by GPIO controller */ struct gpio_pin gpio_setup[MV_GPIO_MAX_NPINS]; /* Used for debouncing. */ uint32_t debounced_state_lo; uint32_t debounced_state_hi; struct callout **debounce_callouts; int *debounce_counters; }; struct mv_gpio_pindev { device_t dev; int pin; }; static int mv_gpio_probe(device_t); static int mv_gpio_attach(device_t); static int mv_gpio_intr(device_t, void *); static void mv_gpio_double_edge_init(device_t, int); static int mv_gpio_debounce_setup(device_t, int); static int mv_gpio_debounce_prepare(device_t, int); static int mv_gpio_debounce_init(device_t, int); static void mv_gpio_debounce_start(device_t, int); static void mv_gpio_debounce(void *); static void mv_gpio_debounced_state_set(device_t, int, uint8_t); static uint32_t mv_gpio_debounced_state_get(device_t, int); static void mv_gpio_exec_intr_handlers(device_t, uint32_t, int); static void mv_gpio_intr_handler(device_t, int); static uint32_t mv_gpio_reg_read(device_t, uint32_t); static void mv_gpio_reg_write(device_t, uint32_t, uint32_t); static void mv_gpio_reg_set(device_t, uint32_t, uint32_t); static void mv_gpio_reg_clear(device_t, uint32_t, uint32_t); static void mv_gpio_blink(device_t, uint32_t, uint8_t); static void mv_gpio_polarity(device_t, uint32_t, uint8_t, uint8_t); static void mv_gpio_level(device_t, uint32_t, uint8_t); static void mv_gpio_edge(device_t, uint32_t, uint8_t); static void mv_gpio_out_en(device_t, uint32_t, uint8_t); static void mv_gpio_int_ack(struct mv_gpio_pindev *); static void mv_gpio_value_set(device_t, uint32_t, uint8_t); static uint32_t mv_gpio_value_get(device_t, uint32_t, uint8_t); static void mv_gpio_intr_mask(struct mv_gpio_pindev *); static void mv_gpio_intr_unmask(struct mv_gpio_pindev *); void mv_gpio_finish_intrhandler(struct mv_gpio_pindev *); int mv_gpio_setup_intrhandler(device_t, const char *, driver_filter_t *, void (*)(void *), void *, int, int, void **); int mv_gpio_configure(device_t, uint32_t, uint32_t, uint32_t); void mv_gpio_out(device_t, uint32_t, uint8_t, uint8_t); uint8_t mv_gpio_in(device_t, uint32_t); /* * GPIO interface */ static device_t mv_gpio_get_bus(device_t); static int mv_gpio_pin_max(device_t, int *); static int mv_gpio_pin_getcaps(device_t, uint32_t, uint32_t *); static int mv_gpio_pin_getflags(device_t, uint32_t, uint32_t *); static int mv_gpio_pin_getname(device_t, uint32_t, char *); static int mv_gpio_pin_setflags(device_t, uint32_t, uint32_t); static int mv_gpio_pin_set(device_t, uint32_t, unsigned int); static int mv_gpio_pin_get(device_t, uint32_t, unsigned int *); static int mv_gpio_pin_toggle(device_t, uint32_t); static int mv_gpio_map_gpios(device_t, phandle_t, phandle_t, int, pcell_t *, uint32_t *, uint32_t *); #define MV_GPIO_LOCK() mtx_lock_spin(&sc->mutex) #define MV_GPIO_UNLOCK() mtx_unlock_spin(&sc->mutex) #define MV_GPIO_ASSERT_LOCKED() mtx_assert(&sc->mutex, MA_OWNED) static device_method_t mv_gpio_methods[] = { DEVMETHOD(device_probe, mv_gpio_probe), DEVMETHOD(device_attach, mv_gpio_attach), /* GPIO protocol */ DEVMETHOD(gpio_get_bus, mv_gpio_get_bus), DEVMETHOD(gpio_pin_max, mv_gpio_pin_max), DEVMETHOD(gpio_pin_getname, mv_gpio_pin_getname), DEVMETHOD(gpio_pin_getflags, mv_gpio_pin_getflags), DEVMETHOD(gpio_pin_getcaps, mv_gpio_pin_getcaps), DEVMETHOD(gpio_pin_setflags, mv_gpio_pin_setflags), DEVMETHOD(gpio_pin_get, mv_gpio_pin_get), DEVMETHOD(gpio_pin_set, mv_gpio_pin_set), DEVMETHOD(gpio_pin_toggle, mv_gpio_pin_toggle), DEVMETHOD(gpio_map_gpios, mv_gpio_map_gpios), DEVMETHOD_END }; static driver_t mv_gpio_driver = { "gpio", mv_gpio_methods, sizeof(struct mv_gpio_softc), }; static devclass_t mv_gpio_devclass; EARLY_DRIVER_MODULE(mv_gpio, simplebus, mv_gpio_driver, mv_gpio_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LAST); struct ofw_compat_data compat_data[] = { { "mrvl,gpio", 1 }, { "marvell,orion-gpio", 1 }, { NULL, 0 } }; static int mv_gpio_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Marvell Integrated GPIO Controller"); return (0); } static int mv_gpio_setup_interrupts(struct mv_gpio_softc *sc, phandle_t node) { phandle_t iparent; pcell_t irq_cells; int i, size; /* Find root interrupt controller */ iparent = ofw_bus_find_iparent(node); if (iparent == 0) { device_printf(sc->dev, "No interrupt-parrent found. " "Error in DTB\n"); return (ENXIO); } else { /* While at parent - store interrupt cells prop */ if (OF_searchencprop(OF_node_from_xref(iparent), "#interrupt-cells", &irq_cells, sizeof(irq_cells)) == -1) { device_printf(sc->dev, "DTB: Missing #interrupt-cells " "property in interrupt parent node\n"); return (ENXIO); } } size = OF_getproplen(node, "interrupts"); if (size != -1) { size = size / sizeof(pcell_t); size = size / irq_cells; sc->irq_num = size; device_printf(sc->dev, "%d IRQs available\n", sc->irq_num); } else { device_printf(sc->dev, "ERROR: no interrupts entry found!\n"); return (ENXIO); } for (i = 0; i < sc->irq_num; i++) { sc->irq_rid[i] = i; sc->irq_res[i] = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid[i], RF_ACTIVE); if (!sc->irq_res[i]) { mtx_destroy(&sc->mutex); device_printf(sc->dev, "could not allocate gpio%d interrupt\n", i+1); return (ENXIO); } } device_printf(sc->dev, "Disable interrupts (offset = %x + EDGE(0x18)\n", sc->offset); /* Disable all interrupts */ bus_space_write_4(sc->bst, sc->bsh, sc->offset + GPIO_INT_EDGE_MASK, 0); device_printf(sc->dev, "Disable interrupts (offset = %x + LEV(0x1C))\n", sc->offset); bus_space_write_4(sc->bst, sc->bsh, sc->offset + GPIO_INT_LEV_MASK, 0); for (i = 0; i < sc->irq_num; i++) { device_printf(sc->dev, "Setup intr %d\n", i); if (bus_setup_intr(sc->dev, sc->irq_res[i], INTR_TYPE_MISC, (driver_filter_t *)mv_gpio_intr, NULL, sc, &sc->ih_cookie[i]) != 0) { mtx_destroy(&sc->mutex); bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid[i], sc->irq_res[i]); device_printf(sc->dev, "could not set up intr %d\n", i); return (ENXIO); } } /* Clear interrupt status. */ device_printf(sc->dev, "Clear int status (offset = %x)\n", sc->offset); bus_space_write_4(sc->bst, sc->bsh, sc->offset + GPIO_INT_CAUSE, 0); sc->debounce_callouts = (struct callout **)malloc(sc->pin_num * sizeof(struct callout *), M_DEVBUF, M_WAITOK | M_ZERO); if (sc->debounce_callouts == NULL) return (ENOMEM); sc->debounce_counters = (int *)malloc(sc->pin_num * sizeof(int), M_DEVBUF, M_WAITOK); if (sc->debounce_counters == NULL) return (ENOMEM); return (0); } static int mv_gpio_attach(device_t dev) { int i, rv; struct mv_gpio_softc *sc; phandle_t node; pcell_t pincnt = 0; sc = (struct mv_gpio_softc *)device_get_softc(dev); if (sc == NULL) return (ENXIO); node = ofw_bus_get_node(dev); sc->dev = dev; if (OF_getencprop(node, "pin-count", &pincnt, sizeof(pcell_t)) >= 0 || OF_getencprop(node, "ngpios", &pincnt, sizeof(pcell_t)) >= 0) { sc->pin_num = MIN(pincnt, MV_GPIO_MAX_NPINS); if (bootverbose) device_printf(dev, "%d pins available\n", sc->pin_num); } else { device_printf(dev, "ERROR: no pin-count or ngpios entry found!\n"); return (ENXIO); } if (OF_getencprop(node, "offset", &sc->offset, sizeof(sc->offset)) == -1) sc->offset = 0; /* Assign generic capabilities to every gpio pin */ for(i = 0; i < sc->pin_num; i++) sc->gpio_setup[i].gp_caps = GPIO_GENERIC_CAP; mtx_init(&sc->mutex, device_get_nameunit(dev), NULL, MTX_SPIN); sc->mem_rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE | RF_SHAREABLE ); if (!sc->mem_res) { mtx_destroy(&sc->mutex); device_printf(dev, "could not allocate memory window\n"); return (ENXIO); } sc->bst = rman_get_bustag(sc->mem_res); sc->bsh = rman_get_bushandle(sc->mem_res); rv = mv_gpio_setup_interrupts(sc, node); if (rv != 0) return (rv); sc->sc_busdev = gpiobus_attach_bus(dev); if (sc->sc_busdev == NULL) { mtx_destroy(&sc->mutex); bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid[i], sc->irq_res[i]); return (ENXIO); } return (0); } static int mv_gpio_intr(device_t dev, void *arg) { uint32_t int_cause, gpio_val; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_LOCK(); /* * According to documentation, edge sensitive interrupts are asserted * when unmasked GPIO_INT_CAUSE register bits are set. */ int_cause = mv_gpio_reg_read(dev, GPIO_INT_CAUSE); int_cause &= mv_gpio_reg_read(dev, GPIO_INT_EDGE_MASK); /* * Level sensitive interrupts are asserted when unmasked GPIO_DATA_IN * register bits are set. */ gpio_val = mv_gpio_reg_read(dev, GPIO_DATA_IN); gpio_val &= mv_gpio_reg_read(dev, GPIO_INT_LEV_MASK); mv_gpio_exec_intr_handlers(dev, int_cause | gpio_val, 0); MV_GPIO_UNLOCK(); return (FILTER_HANDLED); } /* * GPIO interrupt handling */ void mv_gpio_finish_intrhandler(struct mv_gpio_pindev *s) { /* When we acheive full interrupt support * This function will be opposite to * mv_gpio_setup_intrhandler */ /* Now it exists only to remind that * there should be place to free mv_gpio_pindev * allocated by mv_gpio_setup_intrhandler */ free(s, M_DEVBUF); } int mv_gpio_setup_intrhandler(device_t dev, const char *name, driver_filter_t *filt, void (*hand)(void *), void *arg, int pin, int flags, void **cookiep) { struct intr_event *event; int error; struct mv_gpio_pindev *s; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); s = malloc(sizeof(struct mv_gpio_pindev), M_DEVBUF, M_NOWAIT | M_ZERO); if (pin < 0 || pin >= sc->pin_num) return (ENXIO); event = sc->gpio_events[pin]; if (event == NULL) { MV_GPIO_LOCK(); if (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_DEBOUNCE) { error = mv_gpio_debounce_init(dev, pin); if (error != 0) { MV_GPIO_UNLOCK(); return (error); } } else if (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_IRQ_DOUBLE_EDGE) mv_gpio_double_edge_init(dev, pin); MV_GPIO_UNLOCK(); error = intr_event_create(&event, (void *)s, 0, pin, (void (*)(void *))mv_gpio_intr_mask, (void (*)(void *))mv_gpio_intr_unmask, (void (*)(void *))mv_gpio_int_ack, NULL, "gpio%d:", pin); if (error != 0) return (error); sc->gpio_events[pin] = event; } intr_event_add_handler(event, name, filt, hand, arg, intr_priority(flags), flags, cookiep); return (0); } static void mv_gpio_intr_mask(struct mv_gpio_pindev *s) { struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(s->dev); if (s->pin >= sc->pin_num) return; MV_GPIO_LOCK(); if (sc->gpio_setup[s->pin].gp_flags & (MV_GPIO_IN_IRQ_EDGE | MV_GPIO_IN_IRQ_DOUBLE_EDGE)) mv_gpio_edge(s->dev, s->pin, 0); else mv_gpio_level(s->dev, s->pin, 0); /* * The interrupt has to be acknowledged before scheduling an interrupt * thread. This way we allow for interrupt source to trigger again * (which can happen with shared IRQs e.g. PCI) while processing the * current event. */ mv_gpio_int_ack(s); MV_GPIO_UNLOCK(); return; } static void mv_gpio_intr_unmask(struct mv_gpio_pindev *s) { struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(s->dev); if (s->pin >= sc->pin_num) return; MV_GPIO_LOCK(); if (sc->gpio_setup[s->pin].gp_flags & (MV_GPIO_IN_IRQ_EDGE | MV_GPIO_IN_IRQ_DOUBLE_EDGE)) mv_gpio_edge(s->dev, s->pin, 1); else mv_gpio_level(s->dev, s->pin, 1); MV_GPIO_UNLOCK(); return; } static void mv_gpio_exec_intr_handlers(device_t dev, uint32_t status, int high) { int i, pin; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); i = 0; while (status != 0) { if (status & 1) { pin = (high ? (i + GPIO_PINS_PER_REG) : i); if (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_DEBOUNCE) mv_gpio_debounce_start(dev, pin); else if (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_IRQ_DOUBLE_EDGE) { mv_gpio_polarity(dev, pin, 0, 1); mv_gpio_intr_handler(dev, pin); } else mv_gpio_intr_handler(dev, pin); } status >>= 1; i++; } } static void mv_gpio_intr_handler(device_t dev, int pin) { -#ifdef INTRNG struct intr_irqsrc isrc; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); #ifdef INTR_SOLO isrc.isrc_filter = NULL; #endif isrc.isrc_event = sc->gpio_events[pin]; if (isrc.isrc_event == NULL || CK_SLIST_EMPTY(&isrc.isrc_event->ie_handlers)) return; intr_isrc_dispatch(&isrc, NULL); -#endif } int mv_gpio_configure(device_t dev, uint32_t pin, uint32_t flags, uint32_t mask) { int error; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); error = 0; if (pin >= sc->pin_num) return (EINVAL); /* check flags consistency */ if (((flags & mask) & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) == (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) return (EINVAL); if (mask & MV_GPIO_IN_DEBOUNCE) { if (sc->irq_num == 0) return (EINVAL); error = mv_gpio_debounce_prepare(dev, pin); if (error != 0) return (error); } MV_GPIO_LOCK(); if ((mask & flags) & GPIO_PIN_INPUT) mv_gpio_out_en(dev, pin, 0); if ((mask & flags) & GPIO_PIN_OUTPUT) { if ((flags & mask) & GPIO_PIN_OPENDRAIN) mv_gpio_value_set(dev, pin, 0); else mv_gpio_value_set(dev, pin, 1); mv_gpio_out_en(dev, pin, 1); } if (mask & MV_GPIO_OUT_BLINK) mv_gpio_blink(dev, pin, flags & MV_GPIO_OUT_BLINK); if (mask & MV_GPIO_IN_POL_LOW) mv_gpio_polarity(dev, pin, flags & MV_GPIO_IN_POL_LOW, 0); if (mask & MV_GPIO_IN_DEBOUNCE) { error = mv_gpio_debounce_setup(dev, pin); if (error) { MV_GPIO_UNLOCK(); return (error); } } sc->gpio_setup[pin].gp_flags &= ~(mask); sc->gpio_setup[pin].gp_flags |= (flags & mask); MV_GPIO_UNLOCK(); return (0); } static void mv_gpio_double_edge_init(device_t dev, int pin) { uint8_t raw_read; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); raw_read = (mv_gpio_value_get(dev, pin, 1) ? 1 : 0); if (raw_read) mv_gpio_polarity(dev, pin, 1, 0); else mv_gpio_polarity(dev, pin, 0, 0); } static int mv_gpio_debounce_setup(device_t dev, int pin) { struct callout *c; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); c = sc->debounce_callouts[pin]; if (c == NULL) return (ENXIO); if (callout_active(c)) callout_deactivate(c); callout_stop(c); return (0); } static int mv_gpio_debounce_prepare(device_t dev, int pin) { struct callout *c; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); c = sc->debounce_callouts[pin]; if (c == NULL) { c = (struct callout *)malloc(sizeof(struct callout), M_DEVBUF, M_WAITOK); sc->debounce_callouts[pin] = c; if (c == NULL) return (ENOMEM); callout_init(c, 1); } return (0); } static int mv_gpio_debounce_init(device_t dev, int pin) { uint8_t raw_read; int *cnt; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); cnt = &sc->debounce_counters[pin]; raw_read = (mv_gpio_value_get(dev, pin, 1) ? 1 : 0); if (raw_read) { mv_gpio_polarity(dev, pin, 1, 0); *cnt = DEBOUNCE_HI_LO_MS / DEBOUNCE_CHECK_MS; } else { mv_gpio_polarity(dev, pin, 0, 0); *cnt = DEBOUNCE_LO_HI_MS / DEBOUNCE_CHECK_MS; } mv_gpio_debounced_state_set(dev, pin, raw_read); return (0); } static void mv_gpio_debounce_start(device_t dev, int pin) { struct callout *c; struct mv_gpio_pindev s = {dev, pin}; struct mv_gpio_pindev *sd; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); c = sc->debounce_callouts[pin]; if (c == NULL) { mv_gpio_int_ack(&s); return; } if (callout_pending(c) || callout_active(c)) { mv_gpio_int_ack(&s); return; } sd = (struct mv_gpio_pindev *)malloc(sizeof(struct mv_gpio_pindev), M_DEVBUF, M_WAITOK); if (sd == NULL) { mv_gpio_int_ack(&s); return; } sd->pin = pin; sd->dev = dev; callout_reset(c, DEBOUNCE_CHECK_TICKS, mv_gpio_debounce, sd); } static void mv_gpio_debounce(void *arg) { uint8_t raw_read, last_state; int pin; device_t dev; int *debounce_counter; struct mv_gpio_softc *sc; struct mv_gpio_pindev *s; s = (struct mv_gpio_pindev *)arg; dev = s->dev; pin = s->pin; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_LOCK(); raw_read = (mv_gpio_value_get(dev, pin, 1) ? 1 : 0); last_state = (mv_gpio_debounced_state_get(dev, pin) ? 1 : 0); debounce_counter = &sc->debounce_counters[pin]; if (raw_read == last_state) { if (last_state) *debounce_counter = DEBOUNCE_HI_LO_MS / DEBOUNCE_CHECK_MS; else *debounce_counter = DEBOUNCE_LO_HI_MS / DEBOUNCE_CHECK_MS; callout_reset(sc->debounce_callouts[pin], DEBOUNCE_CHECK_TICKS, mv_gpio_debounce, arg); } else { *debounce_counter = *debounce_counter - 1; if (*debounce_counter != 0) callout_reset(sc->debounce_callouts[pin], DEBOUNCE_CHECK_TICKS, mv_gpio_debounce, arg); else { mv_gpio_debounced_state_set(dev, pin, raw_read); if (last_state) *debounce_counter = DEBOUNCE_HI_LO_MS / DEBOUNCE_CHECK_MS; else *debounce_counter = DEBOUNCE_LO_HI_MS / DEBOUNCE_CHECK_MS; if (((sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_POL_LOW) && (raw_read == 0)) || (((sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_POL_LOW) == 0) && raw_read) || (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_IRQ_DOUBLE_EDGE)) mv_gpio_intr_handler(dev, pin); /* Toggle polarity for next edge. */ mv_gpio_polarity(dev, pin, 0, 1); free(arg, M_DEVBUF); callout_deactivate(sc->debounce_callouts[pin]); } } MV_GPIO_UNLOCK(); } static void mv_gpio_debounced_state_set(device_t dev, int pin, uint8_t new_state) { uint32_t *old_state; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); if (pin >= GPIO_PINS_PER_REG) { old_state = &sc->debounced_state_hi; pin -= GPIO_PINS_PER_REG; } else old_state = &sc->debounced_state_lo; if (new_state) *old_state |= (1 << pin); else *old_state &= ~(1 << pin); } static uint32_t mv_gpio_debounced_state_get(device_t dev, int pin) { uint32_t *state; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); if (pin >= GPIO_PINS_PER_REG) { state = &sc->debounced_state_hi; pin -= GPIO_PINS_PER_REG; } else state = &sc->debounced_state_lo; return (*state & (1 << pin)); } void mv_gpio_out(device_t dev, uint32_t pin, uint8_t val, uint8_t enable) { struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_LOCK(); mv_gpio_value_set(dev, pin, val); mv_gpio_out_en(dev, pin, enable); MV_GPIO_UNLOCK(); } uint8_t mv_gpio_in(device_t dev, uint32_t pin) { uint8_t state; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); if (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_DEBOUNCE) { if (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_POL_LOW) state = (mv_gpio_debounced_state_get(dev, pin) ? 0 : 1); else state = (mv_gpio_debounced_state_get(dev, pin) ? 1 : 0); } else if (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_IRQ_DOUBLE_EDGE) { if (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_POL_LOW) state = (mv_gpio_value_get(dev, pin, 1) ? 0 : 1); else state = (mv_gpio_value_get(dev, pin, 1) ? 1 : 0); } else state = (mv_gpio_value_get(dev, pin, 0) ? 1 : 0); return (state); } static uint32_t mv_gpio_reg_read(device_t dev, uint32_t reg) { struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); return (bus_space_read_4(sc->bst, sc->bsh, sc->offset + reg)); } static void mv_gpio_reg_write(device_t dev, uint32_t reg, uint32_t val) { struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); bus_space_write_4(sc->bst, sc->bsh, sc->offset + reg, val); } static void mv_gpio_reg_set(device_t dev, uint32_t reg, uint32_t pin) { uint32_t reg_val; reg_val = mv_gpio_reg_read(dev, reg); reg_val |= GPIO(pin); mv_gpio_reg_write(dev, reg, reg_val); } static void mv_gpio_reg_clear(device_t dev, uint32_t reg, uint32_t pin) { uint32_t reg_val; reg_val = mv_gpio_reg_read(dev, reg); reg_val &= ~(GPIO(pin)); mv_gpio_reg_write(dev, reg, reg_val); } static void mv_gpio_out_en(device_t dev, uint32_t pin, uint8_t enable) { uint32_t reg; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); if (pin >= sc->pin_num) return; reg = GPIO_DATA_OUT_EN_CTRL; if (enable) mv_gpio_reg_clear(dev, reg, pin); else mv_gpio_reg_set(dev, reg, pin); } static void mv_gpio_blink(device_t dev, uint32_t pin, uint8_t enable) { uint32_t reg; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); if (pin >= sc->pin_num) return; reg = GPIO_BLINK_EN; if (enable) mv_gpio_reg_set(dev, reg, pin); else mv_gpio_reg_clear(dev, reg, pin); } static void mv_gpio_polarity(device_t dev, uint32_t pin, uint8_t enable, uint8_t toggle) { uint32_t reg, reg_val; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); if (pin >= sc->pin_num) return; reg = GPIO_DATA_IN_POLAR; if (toggle) { reg_val = mv_gpio_reg_read(dev, reg) & GPIO(pin); if (reg_val) mv_gpio_reg_clear(dev, reg, pin); else mv_gpio_reg_set(dev, reg, pin); } else if (enable) mv_gpio_reg_set(dev, reg, pin); else mv_gpio_reg_clear(dev, reg, pin); } static void mv_gpio_level(device_t dev, uint32_t pin, uint8_t enable) { uint32_t reg; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); if (pin >= sc->pin_num) return; reg = GPIO_INT_LEV_MASK; if (enable) mv_gpio_reg_set(dev, reg, pin); else mv_gpio_reg_clear(dev, reg, pin); } static void mv_gpio_edge(device_t dev, uint32_t pin, uint8_t enable) { uint32_t reg; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); if (pin >= sc->pin_num) return; reg = GPIO_INT_EDGE_MASK; if (enable) mv_gpio_reg_set(dev, reg, pin); else mv_gpio_reg_clear(dev, reg, pin); } static void mv_gpio_int_ack(struct mv_gpio_pindev *s) { uint32_t reg, pin; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(s->dev); pin = s->pin; if (pin >= sc->pin_num) return; reg = GPIO_INT_CAUSE; mv_gpio_reg_clear(s->dev, reg, pin); } static uint32_t mv_gpio_value_get(device_t dev, uint32_t pin, uint8_t exclude_polar) { uint32_t reg, polar_reg, reg_val, polar_reg_val; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); if (pin >= sc->pin_num) return (0); reg = GPIO_DATA_IN; polar_reg = GPIO_DATA_IN_POLAR; reg_val = mv_gpio_reg_read(dev, reg); if (exclude_polar) { polar_reg_val = mv_gpio_reg_read(dev, polar_reg); return ((reg_val & GPIO(pin)) ^ (polar_reg_val & GPIO(pin))); } else return (reg_val & GPIO(pin)); } static void mv_gpio_value_set(device_t dev, uint32_t pin, uint8_t val) { uint32_t reg; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); if (pin >= sc->pin_num) return; reg = GPIO_DATA_OUT; if (val) mv_gpio_reg_set(dev, reg, pin); else mv_gpio_reg_clear(dev, reg, pin); } /* * GPIO interface methods */ static int mv_gpio_pin_max(device_t dev, int *maxpin) { struct mv_gpio_softc *sc; if (maxpin == NULL) return (EINVAL); sc = device_get_softc(dev); *maxpin = sc->pin_num; return (0); } static int mv_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps) { struct mv_gpio_softc *sc = device_get_softc(dev); if (caps == NULL) return (EINVAL); if (pin >= sc->pin_num) return (EINVAL); MV_GPIO_LOCK(); *caps = sc->gpio_setup[pin].gp_caps; MV_GPIO_UNLOCK(); return (0); } static int mv_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags) { struct mv_gpio_softc *sc = device_get_softc(dev); if (flags == NULL) return (EINVAL); if (pin >= sc->pin_num) return (EINVAL); MV_GPIO_LOCK(); *flags = sc->gpio_setup[pin].gp_flags; MV_GPIO_UNLOCK(); return (0); } static int mv_gpio_pin_getname(device_t dev, uint32_t pin, char *name) { struct mv_gpio_softc *sc = device_get_softc(dev); if (name == NULL) return (EINVAL); if (pin >= sc->pin_num) return (EINVAL); MV_GPIO_LOCK(); memcpy(name, sc->gpio_setup[pin].gp_name, GPIOMAXNAME); MV_GPIO_UNLOCK(); return (0); } static int mv_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags) { int ret; struct mv_gpio_softc *sc = device_get_softc(dev); if (pin >= sc->pin_num) return (EINVAL); /* Check for unwanted flags. */ if ((flags & sc->gpio_setup[pin].gp_caps) != flags) return (EINVAL); ret = mv_gpio_configure(dev, pin, flags, ~0); return (ret); } static int mv_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value) { struct mv_gpio_softc *sc = device_get_softc(dev); if (pin >= sc->pin_num) return (EINVAL); MV_GPIO_LOCK(); mv_gpio_value_set(dev, pin, value); MV_GPIO_UNLOCK(); return (0); } static int mv_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *value) { struct mv_gpio_softc *sc = device_get_softc(dev); if (value == NULL) return (EINVAL); if (pin >= sc->pin_num) return (EINVAL); MV_GPIO_LOCK(); *value = mv_gpio_in(dev, pin); MV_GPIO_UNLOCK(); return (0); } static int mv_gpio_pin_toggle(device_t dev, uint32_t pin) { struct mv_gpio_softc *sc = device_get_softc(dev); uint32_t value; if (pin >= sc->pin_num) return (EINVAL); MV_GPIO_LOCK(); value = mv_gpio_in(dev, pin); value = (~value) & 1; mv_gpio_value_set(dev, pin, value); MV_GPIO_UNLOCK(); return (0); } static device_t mv_gpio_get_bus(device_t dev) { struct mv_gpio_softc *sc = device_get_softc(dev); return (sc->sc_busdev); } static int mv_gpio_map_gpios(device_t bus, phandle_t dev, phandle_t gparent, int gcells, pcell_t *gpios, uint32_t *pin, uint32_t *flags) { struct mv_gpio_softc *sc = device_get_softc(bus); if (gpios[0] >= sc->pin_num) return (EINVAL); *pin = gpios[0]; *flags = gpios[1]; mv_gpio_configure(bus, *pin, *flags, ~0); return (0); } Index: head/sys/arm/mv/mpic.c =================================================================== --- head/sys/arm/mv/mpic.c (revision 368140) +++ head/sys/arm/mv/mpic.c (revision 368141) @@ -1,611 +1,609 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Benno Rice. * Copyright (C) 2007-2011 MARVELL INTERNATIONAL LTD. * Copyright (c) 2012 Semihalf. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: FreeBSD: //depot/projects/arm/src/sys/arm/xscale/pxa2x0/pxa2x0_icu.c, rev 1 * from: FreeBSD: src/sys/arm/mv/ic.c,v 1.5 2011/02/08 01:49:30 */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#ifdef INTRNG #include "pic_if.h" -#endif #ifdef DEBUG #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif #define MPIC_INT_LOCAL 3 #define MPIC_INT_ERR 4 #define MPIC_INT_MSI 96 #define MPIC_IRQ_MASK 0x3ff #define MPIC_CTRL 0x0 #define MPIC_SOFT_INT 0x4 #define MPIC_SOFT_INT_DRBL1 (1 << 5) #define MPIC_ERR_CAUSE 0x20 #define MPIC_ISE 0x30 #define MPIC_ICE 0x34 #define MPIC_INT_CTL(irq) (0x100 + (irq)*4) #define MPIC_INT_IRQ_FIQ_MASK(cpuid) (0x101 << (cpuid)) #define MPIC_CTRL_NIRQS(ctrl) (((ctrl) >> 2) & 0x3ff) #define MPIC_IN_DRBL 0x08 #define MPIC_IN_DRBL_MASK 0x0c #define MPIC_PPI_CAUSE 0x10 #define MPIC_CTP 0x40 #define MPIC_IIACK 0x44 #define MPIC_ISM 0x48 #define MPIC_ICM 0x4c #define MPIC_ERR_MASK 0x50 #define MPIC_LOCAL_MASK 0x54 #define MPIC_CPU(n) (n) * 0x100 #define MPIC_PPI 32 struct mv_mpic_irqsrc { struct intr_irqsrc mmi_isrc; u_int mmi_irq; }; struct mv_mpic_softc { device_t sc_dev; struct resource * mpic_res[4]; bus_space_tag_t mpic_bst; bus_space_handle_t mpic_bsh; bus_space_tag_t cpu_bst; bus_space_handle_t cpu_bsh; bus_space_tag_t drbl_bst; bus_space_handle_t drbl_bsh; struct mtx mtx; struct mv_mpic_irqsrc * mpic_isrcs; int nirqs; void * intr_hand; }; static struct resource_spec mv_mpic_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_MEMORY, 1, RF_ACTIVE }, { SYS_RES_MEMORY, 2, RF_ACTIVE | RF_OPTIONAL }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_OPTIONAL }, { -1, 0 } }; static struct ofw_compat_data compat_data[] = { {"mrvl,mpic", true}, {"marvell,mpic", true}, {NULL, false} }; static struct mv_mpic_softc *mv_mpic_sc = NULL; void mpic_send_ipi(int cpus, u_int ipi); static int mv_mpic_probe(device_t); static int mv_mpic_attach(device_t); uint32_t mv_mpic_get_cause(void); uint32_t mv_mpic_get_cause_err(void); uint32_t mv_mpic_get_msi(void); static void mpic_unmask_irq(uintptr_t nb); static void mpic_mask_irq(uintptr_t nb); static void mpic_mask_irq_err(uintptr_t nb); static void mpic_unmask_irq_err(uintptr_t nb); static boolean_t mpic_irq_is_percpu(uintptr_t); static int mpic_intr(void *arg); static void mpic_unmask_msi(void); void mpic_init_secondary(device_t); void mpic_ipi_send(device_t, struct intr_irqsrc*, cpuset_t, u_int); int mpic_ipi_read(int); void mpic_ipi_clear(int); #define MPIC_WRITE(softc, reg, val) \ bus_space_write_4((softc)->mpic_bst, (softc)->mpic_bsh, (reg), (val)) #define MPIC_READ(softc, reg) \ bus_space_read_4((softc)->mpic_bst, (softc)->mpic_bsh, (reg)) #define MPIC_CPU_WRITE(softc, reg, val) \ bus_space_write_4((softc)->cpu_bst, (softc)->cpu_bsh, (reg), (val)) #define MPIC_CPU_READ(softc, reg) \ bus_space_read_4((softc)->cpu_bst, (softc)->cpu_bsh, (reg)) #define MPIC_DRBL_WRITE(softc, reg, val) \ bus_space_write_4((softc)->drbl_bst, (softc)->drbl_bsh, (reg), (val)) #define MPIC_DRBL_READ(softc, reg) \ bus_space_read_4((softc)->drbl_bst, (softc)->drbl_bsh, (reg)) static int mv_mpic_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Marvell Integrated Interrupt Controller"); return (0); } static int mv_mpic_register_isrcs(struct mv_mpic_softc *sc) { int error; uint32_t irq; struct intr_irqsrc *isrc; const char *name; sc->mpic_isrcs = malloc(sc->nirqs * sizeof (*sc->mpic_isrcs), M_DEVBUF, M_WAITOK | M_ZERO); name = device_get_nameunit(sc->sc_dev); for (irq = 0; irq < sc->nirqs; irq++) { sc->mpic_isrcs[irq].mmi_irq = irq; isrc = &sc->mpic_isrcs[irq].mmi_isrc; if (irq < MPIC_PPI) { error = intr_isrc_register(isrc, sc->sc_dev, INTR_ISRCF_PPI, "%s", name); } else { error = intr_isrc_register(isrc, sc->sc_dev, 0, "%s", name); } if (error != 0) { /* XXX call intr_isrc_deregister() */ device_printf(sc->sc_dev, "%s failed", __func__); return (error); } } return (0); } static int mv_mpic_attach(device_t dev) { struct mv_mpic_softc *sc; int error; uint32_t val; int cpu; sc = (struct mv_mpic_softc *)device_get_softc(dev); if (mv_mpic_sc != NULL) return (ENXIO); mv_mpic_sc = sc; sc->sc_dev = dev; mtx_init(&sc->mtx, "MPIC lock", NULL, MTX_SPIN); error = bus_alloc_resources(dev, mv_mpic_spec, sc->mpic_res); if (error) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } if (sc->mpic_res[3] == NULL) device_printf(dev, "No interrupt to use.\n"); else bus_setup_intr(dev, sc->mpic_res[3], INTR_TYPE_CLK, mpic_intr, NULL, sc, &sc->intr_hand); sc->mpic_bst = rman_get_bustag(sc->mpic_res[0]); sc->mpic_bsh = rman_get_bushandle(sc->mpic_res[0]); sc->cpu_bst = rman_get_bustag(sc->mpic_res[1]); sc->cpu_bsh = rman_get_bushandle(sc->mpic_res[1]); if (sc->mpic_res[2] != NULL) { /* This is required only if MSIs are used. */ sc->drbl_bst = rman_get_bustag(sc->mpic_res[2]); sc->drbl_bsh = rman_get_bushandle(sc->mpic_res[2]); } MPIC_WRITE(mv_mpic_sc, MPIC_CTRL, 1); MPIC_CPU_WRITE(mv_mpic_sc, MPIC_CTP, 0); val = MPIC_READ(mv_mpic_sc, MPIC_CTRL); sc->nirqs = MPIC_CTRL_NIRQS(val); if (mv_mpic_register_isrcs(sc) != 0) { device_printf(dev, "could not register PIC ISRCs\n"); bus_release_resources(dev, mv_mpic_spec, sc->mpic_res); return (ENXIO); } OF_device_register_xref(OF_xref_from_node(ofw_bus_get_node(dev)), dev); if (intr_pic_register(dev, OF_xref_from_device(dev)) == NULL) { device_printf(dev, "could not register PIC\n"); bus_release_resources(dev, mv_mpic_spec, sc->mpic_res); return (ENXIO); } mpic_unmask_msi(); /* Unmask CPU performance counters overflow irq */ for (cpu = 0; cpu < mp_ncpus; cpu++) MPIC_CPU_WRITE(mv_mpic_sc, MPIC_CPU(cpu) + MPIC_LOCAL_MASK, (1 << cpu) | MPIC_CPU_READ(mv_mpic_sc, MPIC_CPU(cpu) + MPIC_LOCAL_MASK)); return (0); } static int mpic_intr(void *arg) { struct mv_mpic_softc *sc; uint32_t cause, irqsrc; unsigned int irq; u_int cpuid; sc = arg; cpuid = PCPU_GET(cpuid); irq = 0; for (cause = MPIC_CPU_READ(sc, MPIC_PPI_CAUSE); cause > 0; cause >>= 1, irq++) { if (cause & 1) { irqsrc = MPIC_READ(sc, MPIC_INT_CTL(irq)); if ((irqsrc & MPIC_INT_IRQ_FIQ_MASK(cpuid)) == 0) continue; if (intr_isrc_dispatch(&sc->mpic_isrcs[irq].mmi_isrc, curthread->td_intr_frame) != 0) { mpic_mask_irq(irq); device_printf(sc->sc_dev, "Stray irq %u " "disabled\n", irq); } } } return (FILTER_HANDLED); } static void mpic_disable_intr(device_t dev, struct intr_irqsrc *isrc) { u_int irq; irq = ((struct mv_mpic_irqsrc *)isrc)->mmi_irq; mpic_mask_irq(irq); } static void mpic_enable_intr(device_t dev, struct intr_irqsrc *isrc) { u_int irq; irq = ((struct mv_mpic_irqsrc *)isrc)->mmi_irq; mpic_unmask_irq(irq); } static int mpic_map_intr(device_t dev, struct intr_map_data *data, struct intr_irqsrc **isrcp) { struct intr_map_data_fdt *daf; struct mv_mpic_softc *sc; if (data->type != INTR_MAP_DATA_FDT) return (ENOTSUP); sc = device_get_softc(dev); daf = (struct intr_map_data_fdt *)data; if (daf->ncells !=1 || daf->cells[0] >= sc->nirqs) return (EINVAL); *isrcp = &sc->mpic_isrcs[daf->cells[0]].mmi_isrc; return (0); } static void mpic_pre_ithread(device_t dev, struct intr_irqsrc *isrc) { mpic_disable_intr(dev, isrc); } static void mpic_post_ithread(device_t dev, struct intr_irqsrc *isrc) { mpic_enable_intr(dev, isrc); } static void mpic_post_filter(device_t dev, struct intr_irqsrc *isrc) { } static device_method_t mv_mpic_methods[] = { DEVMETHOD(device_probe, mv_mpic_probe), DEVMETHOD(device_attach, mv_mpic_attach), DEVMETHOD(pic_disable_intr, mpic_disable_intr), DEVMETHOD(pic_enable_intr, mpic_enable_intr), DEVMETHOD(pic_map_intr, mpic_map_intr), DEVMETHOD(pic_post_filter, mpic_post_filter), DEVMETHOD(pic_post_ithread, mpic_post_ithread), DEVMETHOD(pic_pre_ithread, mpic_pre_ithread), DEVMETHOD(pic_init_secondary, mpic_init_secondary), DEVMETHOD(pic_ipi_send, mpic_ipi_send), { 0, 0 } }; static driver_t mv_mpic_driver = { "mpic", mv_mpic_methods, sizeof(struct mv_mpic_softc), }; static devclass_t mv_mpic_devclass; EARLY_DRIVER_MODULE(mpic, simplebus, mv_mpic_driver, mv_mpic_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE); static void mpic_unmask_msi(void) { mpic_unmask_irq(MPIC_INT_MSI); } static void mpic_unmask_irq_err(uintptr_t nb) { uint32_t mask; uint8_t bit_off; MPIC_WRITE(mv_mpic_sc, MPIC_ISE, MPIC_INT_ERR); MPIC_CPU_WRITE(mv_mpic_sc, MPIC_ICM, MPIC_INT_ERR); bit_off = nb - ERR_IRQ; mask = MPIC_CPU_READ(mv_mpic_sc, MPIC_ERR_MASK); mask |= (1 << bit_off); MPIC_CPU_WRITE(mv_mpic_sc, MPIC_ERR_MASK, mask); } static void mpic_mask_irq_err(uintptr_t nb) { uint32_t mask; uint8_t bit_off; bit_off = nb - ERR_IRQ; mask = MPIC_CPU_READ(mv_mpic_sc, MPIC_ERR_MASK); mask &= ~(1 << bit_off); MPIC_CPU_WRITE(mv_mpic_sc, MPIC_ERR_MASK, mask); } static boolean_t mpic_irq_is_percpu(uintptr_t nb) { if (nb < MPIC_PPI) return TRUE; return FALSE; } static void mpic_unmask_irq(uintptr_t nb) { #ifdef SMP int cpu; if (nb == MPIC_INT_LOCAL) { for (cpu = 0; cpu < mp_ncpus; cpu++) MPIC_CPU_WRITE(mv_mpic_sc, MPIC_CPU(cpu) + MPIC_ICM, nb); return; } #endif if (mpic_irq_is_percpu(nb)) MPIC_CPU_WRITE(mv_mpic_sc, MPIC_ICM, nb); else if (nb < ERR_IRQ) MPIC_WRITE(mv_mpic_sc, MPIC_ISE, nb); else if (nb < MSI_IRQ) mpic_unmask_irq_err(nb); if (nb == 0) MPIC_CPU_WRITE(mv_mpic_sc, MPIC_IN_DRBL_MASK, 0xffffffff); } static void mpic_mask_irq(uintptr_t nb) { #ifdef SMP int cpu; if (nb == MPIC_INT_LOCAL) { for (cpu = 0; cpu < mp_ncpus; cpu++) MPIC_CPU_WRITE(mv_mpic_sc, MPIC_CPU(cpu) + MPIC_ISM, nb); return; } #endif if (mpic_irq_is_percpu(nb)) MPIC_CPU_WRITE(mv_mpic_sc, MPIC_ISM, nb); else if (nb < ERR_IRQ) MPIC_WRITE(mv_mpic_sc, MPIC_ICE, nb); else if (nb < MSI_IRQ) mpic_mask_irq_err(nb); } uint32_t mv_mpic_get_cause(void) { return (MPIC_CPU_READ(mv_mpic_sc, MPIC_IIACK)); } uint32_t mv_mpic_get_cause_err(void) { uint32_t err_cause; uint8_t bit_off; err_cause = MPIC_READ(mv_mpic_sc, MPIC_ERR_CAUSE); if (err_cause) bit_off = ffs(err_cause) - 1; else return (-1); debugf("%s: irq:%x cause:%x\n", __func__, bit_off, err_cause); return (ERR_IRQ + bit_off); } uint32_t mv_mpic_get_msi(void) { uint32_t cause; uint8_t bit_off; KASSERT(mv_mpic_sc->drbl_bst != NULL, ("No doorbell in mv_mpic_get_msi")); cause = MPIC_DRBL_READ(mv_mpic_sc, 0); if (cause) bit_off = ffs(cause) - 1; else return (-1); debugf("%s: irq:%x cause:%x\n", __func__, bit_off, cause); cause &= ~(1 << bit_off); MPIC_DRBL_WRITE(mv_mpic_sc, 0, cause); return (MSI_IRQ + bit_off); } int mv_msi_data(int irq, uint64_t *addr, uint32_t *data) { u_long phys, base, size; phandle_t node; int error; node = ofw_bus_get_node(mv_mpic_sc->sc_dev); /* Get physical address of register space */ error = fdt_get_range(OF_parent(node), 0, &phys, &size); if (error) { printf("%s: Cannot get register physical address, err:%d", __func__, error); return (error); } /* Get offset of MPIC register space */ error = fdt_regsize(node, &base, &size); if (error) { printf("%s: Cannot get MPIC register offset, err:%d", __func__, error); return (error); } *addr = phys + base + MPIC_SOFT_INT; *data = MPIC_SOFT_INT_DRBL1 | irq; return (0); } void mpic_init_secondary(device_t dev) { } void mpic_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus, u_int ipi) { uint32_t val, i; val = 0x00000000; for (i = 0; i < MAXCPU; i++) if (CPU_ISSET(i, &cpus)) val |= (1 << (8 + i)); val |= ipi; MPIC_WRITE(mv_mpic_sc, MPIC_SOFT_INT, val); } int mpic_ipi_read(int i __unused) { uint32_t val; int ipi; val = MPIC_CPU_READ(mv_mpic_sc, MPIC_IN_DRBL); if (val) { ipi = ffs(val) - 1; MPIC_CPU_WRITE(mv_mpic_sc, MPIC_IN_DRBL, ~(1 << ipi)); return (ipi); } return (0x3ff); } void mpic_ipi_clear(int ipi) { } Index: head/sys/arm/mv/mv_common.c =================================================================== --- head/sys/arm/mv/mv_common.c (revision 368140) +++ head/sys/arm/mv/mv_common.c (revision 368141) @@ -1,2942 +1,2909 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (C) 2008-2011 MARVELL INTERNATIONAL LTD. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DEFINE(M_IDMA, "idma", "idma dma test memory"); #define IDMA_DEBUG #undef IDMA_DEBUG #define MAX_CPU_WIN 5 #ifdef DEBUG #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif #ifdef DEBUG #define MV_DUMP_WIN 1 #else #define MV_DUMP_WIN 0 #endif struct soc_node_spec; static enum soc_family soc_family; static int mv_win_cesa_attr_armv5(int eng_sel); static int mv_win_cesa_attr_armada38x(int eng_sel); static int mv_win_cesa_attr_armadaxp(int eng_sel); uint32_t read_cpu_ctrl_armv5(uint32_t reg); uint32_t read_cpu_ctrl_armv7(uint32_t reg); void write_cpu_ctrl_armv5(uint32_t reg, uint32_t val); void write_cpu_ctrl_armv7(uint32_t reg, uint32_t val); static int win_eth_can_remap(int i); static int decode_win_cesa_valid(void); static int decode_win_usb_valid(void); static int decode_win_usb3_valid(void); static int decode_win_eth_valid(void); static int decode_win_pcie_valid(void); static int decode_win_sata_valid(void); static int decode_win_sdhci_valid(void); static int decode_win_idma_valid(void); static int decode_win_xor_valid(void); static void decode_win_cpu_setup(void); static int decode_win_sdram_fixup(void); static void decode_win_cesa_setup(u_long); static void decode_win_a38x_cesa_setup(u_long); static void decode_win_usb_setup(u_long); static void decode_win_usb3_setup(u_long); static void decode_win_eth_setup(u_long); static void decode_win_neta_setup(u_long); static void decode_win_sata_setup(u_long); static void decode_win_ahci_setup(u_long); static void decode_win_sdhci_setup(u_long); static void decode_win_idma_setup(u_long); static void decode_win_xor_setup(u_long); static void decode_win_cesa_dump(u_long); static void decode_win_a38x_cesa_dump(u_long); static void decode_win_usb_dump(u_long); static void decode_win_usb3_dump(u_long); static void decode_win_eth_dump(u_long base); static void decode_win_neta_dump(u_long base); static void decode_win_idma_dump(u_long base); static void decode_win_xor_dump(u_long base); static void decode_win_ahci_dump(u_long base); static void decode_win_sdhci_dump(u_long); static void decode_win_pcie_dump(u_long); static uint32_t win_cpu_cr_read(int); static uint32_t win_cpu_armv5_cr_read(int); static uint32_t win_cpu_armv7_cr_read(int); static uint32_t win_cpu_br_read(int); static uint32_t win_cpu_armv5_br_read(int); static uint32_t win_cpu_armv7_br_read(int); static uint32_t win_cpu_remap_l_read(int); static uint32_t win_cpu_armv5_remap_l_read(int); static uint32_t win_cpu_armv7_remap_l_read(int); static uint32_t win_cpu_remap_h_read(int); static uint32_t win_cpu_armv5_remap_h_read(int); static uint32_t win_cpu_armv7_remap_h_read(int); static void win_cpu_cr_write(int, uint32_t); static void win_cpu_armv5_cr_write(int, uint32_t); static void win_cpu_armv7_cr_write(int, uint32_t); static void win_cpu_br_write(int, uint32_t); static void win_cpu_armv5_br_write(int, uint32_t); static void win_cpu_armv7_br_write(int, uint32_t); static void win_cpu_remap_l_write(int, uint32_t); static void win_cpu_armv5_remap_l_write(int, uint32_t); static void win_cpu_armv7_remap_l_write(int, uint32_t); static void win_cpu_remap_h_write(int, uint32_t); static void win_cpu_armv5_remap_h_write(int, uint32_t); static void win_cpu_armv7_remap_h_write(int, uint32_t); static uint32_t ddr_br_read(int); static uint32_t ddr_sz_read(int); static uint32_t ddr_armv5_br_read(int); static uint32_t ddr_armv5_sz_read(int); static uint32_t ddr_armv7_br_read(int); static uint32_t ddr_armv7_sz_read(int); static void ddr_br_write(int, uint32_t); static void ddr_sz_write(int, uint32_t); static void ddr_armv5_br_write(int, uint32_t); static void ddr_armv5_sz_write(int, uint32_t); static void ddr_armv7_br_write(int, uint32_t); static void ddr_armv7_sz_write(int, uint32_t); static int fdt_get_ranges(const char *, void *, int, int *, int *); int gic_decode_fdt(phandle_t iparent, pcell_t *intr, int *interrupt, int *trig, int *pol); static int win_cpu_from_dt(void); static int fdt_win_setup(void); static int fdt_win_process_child(phandle_t, struct soc_node_spec *, const char*); static void soc_identify(uint32_t, uint32_t); static uint32_t dev_mask = 0; static int cpu_wins_no = 0; static int eth_port = 0; static int usb_port = 0; static boolean_t platform_io_coherent = false; static struct decode_win cpu_win_tbl[MAX_CPU_WIN]; const struct decode_win *cpu_wins = cpu_win_tbl; typedef void (*decode_win_setup_t)(u_long); typedef void (*dump_win_t)(u_long); typedef int (*valid_t)(void); /* * The power status of device feature is only supported on * Kirkwood and Discovery SoCs. */ #if defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY) #define SOC_MV_POWER_STAT_SUPPORTED 1 #else #define SOC_MV_POWER_STAT_SUPPORTED 0 #endif struct soc_node_spec { const char *compat; decode_win_setup_t decode_handler; dump_win_t dump_handler; valid_t valid_handler; }; static struct soc_node_spec soc_nodes[] = { { "mrvl,ge", &decode_win_eth_setup, &decode_win_eth_dump, &decode_win_eth_valid}, { "marvell,armada-370-neta", &decode_win_neta_setup, &decode_win_neta_dump, NULL }, { "mrvl,usb-ehci", &decode_win_usb_setup, &decode_win_usb_dump, &decode_win_usb_valid}, { "marvell,orion-ehci", &decode_win_usb_setup, &decode_win_usb_dump, &decode_win_usb_valid }, { "marvell,armada-380-xhci", &decode_win_usb3_setup, &decode_win_usb3_dump, &decode_win_usb3_valid }, { "marvell,armada-380-ahci", &decode_win_ahci_setup, &decode_win_ahci_dump, NULL }, { "marvell,armada-380-sdhci", &decode_win_sdhci_setup, &decode_win_sdhci_dump, &decode_win_sdhci_valid}, { "mrvl,sata", &decode_win_sata_setup, NULL, &decode_win_sata_valid}, { "mrvl,xor", &decode_win_xor_setup, &decode_win_xor_dump, &decode_win_xor_valid}, { "mrvl,idma", &decode_win_idma_setup, &decode_win_idma_dump, &decode_win_idma_valid}, { "mrvl,cesa", &decode_win_cesa_setup, &decode_win_cesa_dump, &decode_win_cesa_valid}, { "mrvl,pcie", &decode_win_pcie_setup, &decode_win_pcie_dump, &decode_win_pcie_valid}, { "marvell,armada-38x-crypto", &decode_win_a38x_cesa_setup, &decode_win_a38x_cesa_dump, &decode_win_cesa_valid}, { NULL, NULL, NULL, NULL }, }; #define SOC_NODE_PCIE_ENTRY_IDX 11 typedef uint32_t(*read_cpu_ctrl_t)(uint32_t); typedef void(*write_cpu_ctrl_t)(uint32_t, uint32_t); typedef uint32_t (*win_read_t)(int); typedef void (*win_write_t)(int, uint32_t); typedef int (*win_cesa_attr_t)(int); typedef uint32_t (*get_t)(void); struct decode_win_spec { read_cpu_ctrl_t read_cpu_ctrl; write_cpu_ctrl_t write_cpu_ctrl; win_read_t cr_read; win_read_t br_read; win_read_t remap_l_read; win_read_t remap_h_read; win_write_t cr_write; win_write_t br_write; win_write_t remap_l_write; win_write_t remap_h_write; uint32_t mv_win_cpu_max; win_cesa_attr_t win_cesa_attr; int win_cesa_target; win_read_t ddr_br_read; win_read_t ddr_sz_read; win_write_t ddr_br_write; win_write_t ddr_sz_write; -#if __ARM_ARCH >= 6 get_t get_tclk; get_t get_cpu_freq; -#endif }; struct decode_win_spec *soc_decode_win_spec; static struct decode_win_spec decode_win_specs[] = { { &read_cpu_ctrl_armv7, &write_cpu_ctrl_armv7, &win_cpu_armv7_cr_read, &win_cpu_armv7_br_read, &win_cpu_armv7_remap_l_read, &win_cpu_armv7_remap_h_read, &win_cpu_armv7_cr_write, &win_cpu_armv7_br_write, &win_cpu_armv7_remap_l_write, &win_cpu_armv7_remap_h_write, MV_WIN_CPU_MAX_ARMV7, &mv_win_cesa_attr_armada38x, MV_WIN_CESA_TARGET_ARMADA38X, &ddr_armv7_br_read, &ddr_armv7_sz_read, &ddr_armv7_br_write, &ddr_armv7_sz_write, -#if __ARM_ARCH >= 6 &get_tclk_armada38x, &get_cpu_freq_armada38x, -#endif }, { &read_cpu_ctrl_armv7, &write_cpu_ctrl_armv7, &win_cpu_armv7_cr_read, &win_cpu_armv7_br_read, &win_cpu_armv7_remap_l_read, &win_cpu_armv7_remap_h_read, &win_cpu_armv7_cr_write, &win_cpu_armv7_br_write, &win_cpu_armv7_remap_l_write, &win_cpu_armv7_remap_h_write, MV_WIN_CPU_MAX_ARMV7, &mv_win_cesa_attr_armadaxp, MV_WIN_CESA_TARGET_ARMADAXP, &ddr_armv7_br_read, &ddr_armv7_sz_read, &ddr_armv7_br_write, &ddr_armv7_sz_write, -#if __ARM_ARCH >= 6 &get_tclk_armadaxp, &get_cpu_freq_armadaxp, -#endif }, { &read_cpu_ctrl_armv5, &write_cpu_ctrl_armv5, &win_cpu_armv5_cr_read, &win_cpu_armv5_br_read, &win_cpu_armv5_remap_l_read, &win_cpu_armv5_remap_h_read, &win_cpu_armv5_cr_write, &win_cpu_armv5_br_write, &win_cpu_armv5_remap_l_write, &win_cpu_armv5_remap_h_write, MV_WIN_CPU_MAX, &mv_win_cesa_attr_armv5, MV_WIN_CESA_TARGET, &ddr_armv5_br_read, &ddr_armv5_sz_read, &ddr_armv5_br_write, &ddr_armv5_sz_write, -#if __ARM_ARCH >= 6 NULL, NULL, -#endif }, }; struct fdt_pm_mask_entry { char *compat; uint32_t mask; }; static struct fdt_pm_mask_entry fdt_pm_mask_table[] = { { "mrvl,ge", CPU_PM_CTRL_GE(0) }, { "mrvl,ge", CPU_PM_CTRL_GE(1) }, { "mrvl,usb-ehci", CPU_PM_CTRL_USB(0) }, { "mrvl,usb-ehci", CPU_PM_CTRL_USB(1) }, { "mrvl,usb-ehci", CPU_PM_CTRL_USB(2) }, { "mrvl,xor", CPU_PM_CTRL_XOR }, { "mrvl,sata", CPU_PM_CTRL_SATA }, { NULL, 0 } }; static __inline int pm_is_disabled(uint32_t mask) { #if SOC_MV_POWER_STAT_SUPPORTED return (soc_power_ctrl_get(mask) == mask ? 0 : 1); #else return (0); #endif } /* * Disable device using power management register. * 1 - Device Power On * 0 - Device Power Off * Mask can be set in loader. * EXAMPLE: * loader> set hw.pm-disable-mask=0x2 * * Common mask: * |-------------------------------| * | Device | Kirkwood | Discovery | * |-------------------------------| * | USB0 | 0x00008 | 0x020000 | * |-------------------------------| * | USB1 | - | 0x040000 | * |-------------------------------| * | USB2 | - | 0x080000 | * |-------------------------------| * | GE0 | 0x00001 | 0x000002 | * |-------------------------------| * | GE1 | - | 0x000004 | * |-------------------------------| * | IDMA | - | 0x100000 | * |-------------------------------| * | XOR | 0x10000 | 0x200000 | * |-------------------------------| * | CESA | 0x20000 | 0x400000 | * |-------------------------------| * | SATA | 0x04000 | 0x004000 | * --------------------------------| * This feature can be used only on Kirkwood and Discovery * machines. */ static int mv_win_cesa_attr_armv5(int eng_sel) { return MV_WIN_CESA_ATTR(eng_sel); } static int mv_win_cesa_attr_armada38x(int eng_sel) { return MV_WIN_CESA_ATTR_ARMADA38X(eng_sel); } static int mv_win_cesa_attr_armadaxp(int eng_sel) { return MV_WIN_CESA_ATTR_ARMADAXP(eng_sel); } enum soc_family mv_check_soc_family() { uint32_t dev, rev; soc_id(&dev, &rev); switch (dev) { case MV_DEV_MV78230: case MV_DEV_MV78260: case MV_DEV_MV78460: soc_decode_win_spec = &decode_win_specs[MV_SOC_ARMADA_XP]; soc_family = MV_SOC_ARMADA_XP; break; case MV_DEV_88F6828: case MV_DEV_88F6820: case MV_DEV_88F6810: soc_decode_win_spec = &decode_win_specs[MV_SOC_ARMADA_38X]; soc_family = MV_SOC_ARMADA_38X; break; case MV_DEV_88F5181: case MV_DEV_88F5182: case MV_DEV_88F5281: case MV_DEV_88F6281: case MV_DEV_88RC8180: case MV_DEV_88RC9480: case MV_DEV_88RC9580: case MV_DEV_88F6781: case MV_DEV_88F6282: case MV_DEV_MV78100_Z0: case MV_DEV_MV78100: case MV_DEV_MV78160: soc_decode_win_spec = &decode_win_specs[MV_SOC_ARMV5]; soc_family = MV_SOC_ARMV5; break; default: soc_family = MV_SOC_UNSUPPORTED; return (MV_SOC_UNSUPPORTED); } soc_identify(dev, rev); return (soc_family); } static __inline void pm_disable_device(int mask) { #ifdef DIAGNOSTIC uint32_t reg; reg = soc_power_ctrl_get(CPU_PM_CTRL_ALL); printf("Power Management Register: 0%x\n", reg); reg &= ~mask; soc_power_ctrl_set(reg); printf("Device %x is disabled\n", mask); reg = soc_power_ctrl_get(CPU_PM_CTRL_ALL); printf("Power Management Register: 0%x\n", reg); #endif } int mv_fdt_is_type(phandle_t node, const char *typestr) { #define FDT_TYPE_LEN 64 char type[FDT_TYPE_LEN]; if (OF_getproplen(node, "device_type") <= 0) return (0); if (OF_getprop(node, "device_type", type, FDT_TYPE_LEN) < 0) return (0); if (strncasecmp(type, typestr, FDT_TYPE_LEN) == 0) /* This fits. */ return (1); return (0); #undef FDT_TYPE_LEN } int mv_fdt_pm(phandle_t node) { uint32_t cpu_pm_ctrl; int i, ena, compat; ena = 1; cpu_pm_ctrl = read_cpu_ctrl(CPU_PM_CTRL); for (i = 0; fdt_pm_mask_table[i].compat != NULL; i++) { if (dev_mask & (1 << i)) continue; compat = ofw_bus_node_is_compatible(node, fdt_pm_mask_table[i].compat); #if defined(SOC_MV_KIRKWOOD) if (compat && (cpu_pm_ctrl & fdt_pm_mask_table[i].mask)) { dev_mask |= (1 << i); ena = 0; break; } else if (compat) { dev_mask |= (1 << i); break; } #else if (compat && (~cpu_pm_ctrl & fdt_pm_mask_table[i].mask)) { dev_mask |= (1 << i); ena = 0; break; } else if (compat) { dev_mask |= (1 << i); break; } #endif } return (ena); } uint32_t read_cpu_ctrl(uint32_t reg) { if (soc_decode_win_spec->read_cpu_ctrl != NULL) return (soc_decode_win_spec->read_cpu_ctrl(reg)); return (-1); } uint32_t read_cpu_ctrl_armv5(uint32_t reg) { return (bus_space_read_4(fdtbus_bs_tag, MV_CPU_CONTROL_BASE, reg)); } uint32_t read_cpu_ctrl_armv7(uint32_t reg) { return (bus_space_read_4(fdtbus_bs_tag, MV_CPU_CONTROL_BASE_ARMV7, reg)); } void write_cpu_ctrl(uint32_t reg, uint32_t val) { if (soc_decode_win_spec->write_cpu_ctrl != NULL) soc_decode_win_spec->write_cpu_ctrl(reg, val); } void write_cpu_ctrl_armv5(uint32_t reg, uint32_t val) { bus_space_write_4(fdtbus_bs_tag, MV_CPU_CONTROL_BASE, reg, val); } void write_cpu_ctrl_armv7(uint32_t reg, uint32_t val) { bus_space_write_4(fdtbus_bs_tag, MV_CPU_CONTROL_BASE_ARMV7, reg, val); } uint32_t read_cpu_mp_clocks(uint32_t reg) { return (bus_space_read_4(fdtbus_bs_tag, MV_MP_CLOCKS_BASE, reg)); } void write_cpu_mp_clocks(uint32_t reg, uint32_t val) { bus_space_write_4(fdtbus_bs_tag, MV_MP_CLOCKS_BASE, reg, val); } uint32_t read_cpu_misc(uint32_t reg) { return (bus_space_read_4(fdtbus_bs_tag, MV_MISC_BASE, reg)); } void write_cpu_misc(uint32_t reg, uint32_t val) { bus_space_write_4(fdtbus_bs_tag, MV_MISC_BASE, reg, val); } uint32_t cpu_extra_feat(void) { uint32_t dev, rev; uint32_t ef = 0; soc_id(&dev, &rev); switch (dev) { case MV_DEV_88F6281: case MV_DEV_88F6282: case MV_DEV_88RC8180: case MV_DEV_MV78100_Z0: case MV_DEV_MV78100: __asm __volatile("mrc p15, 1, %0, c15, c1, 0" : "=r" (ef)); break; case MV_DEV_88F5182: case MV_DEV_88F5281: __asm __volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (ef)); break; default: if (bootverbose) printf("This ARM Core does not support any extra features\n"); } return (ef); } /* * Get the power status of device. This feature is only supported on * Kirkwood and Discovery SoCs. */ uint32_t soc_power_ctrl_get(uint32_t mask) { #if SOC_MV_POWER_STAT_SUPPORTED if (mask != CPU_PM_CTRL_NONE) mask &= read_cpu_ctrl(CPU_PM_CTRL); return (mask); #else return (mask); #endif } /* * Set the power status of device. This feature is only supported on * Kirkwood and Discovery SoCs. */ void soc_power_ctrl_set(uint32_t mask) { #if !defined(SOC_MV_ORION) if (mask != CPU_PM_CTRL_NONE) write_cpu_ctrl(CPU_PM_CTRL, mask); #endif } void soc_id(uint32_t *dev, uint32_t *rev) { uint64_t mv_pcie_base = MV_PCIE_BASE; phandle_t node; /* * Notice: system identifiers are available in the registers range of * PCIE controller, so using this function is only allowed (and * possible) after the internal registers range has been mapped in via * devmap_bootstrap(). */ *dev = 0; *rev = 0; if ((node = OF_finddevice("/")) == -1) return; if (ofw_bus_node_is_compatible(node, "marvell,armada380")) mv_pcie_base = MV_PCIE_BASE_ARMADA38X; *dev = bus_space_read_4(fdtbus_bs_tag, mv_pcie_base, 0) >> 16; *rev = bus_space_read_4(fdtbus_bs_tag, mv_pcie_base, 8) & 0xff; } static void soc_identify(uint32_t d, uint32_t r) { uint32_t size, mode, freq; const char *dev; const char *rev; printf("SOC: "); if (bootverbose) printf("(0x%4x:0x%02x) ", d, r); rev = ""; switch (d) { case MV_DEV_88F5181: dev = "Marvell 88F5181"; if (r == 3) rev = "B1"; break; case MV_DEV_88F5182: dev = "Marvell 88F5182"; if (r == 2) rev = "A2"; break; case MV_DEV_88F5281: dev = "Marvell 88F5281"; if (r == 4) rev = "D0"; else if (r == 5) rev = "D1"; else if (r == 6) rev = "D2"; break; case MV_DEV_88F6281: dev = "Marvell 88F6281"; if (r == 0) rev = "Z0"; else if (r == 2) rev = "A0"; else if (r == 3) rev = "A1"; break; case MV_DEV_88RC8180: dev = "Marvell 88RC8180"; break; case MV_DEV_88RC9480: dev = "Marvell 88RC9480"; break; case MV_DEV_88RC9580: dev = "Marvell 88RC9580"; break; case MV_DEV_88F6781: dev = "Marvell 88F6781"; if (r == 2) rev = "Y0"; break; case MV_DEV_88F6282: dev = "Marvell 88F6282"; if (r == 0) rev = "A0"; else if (r == 1) rev = "A1"; break; case MV_DEV_88F6828: dev = "Marvell 88F6828"; break; case MV_DEV_88F6820: dev = "Marvell 88F6820"; break; case MV_DEV_88F6810: dev = "Marvell 88F6810"; break; case MV_DEV_MV78100_Z0: dev = "Marvell MV78100 Z0"; break; case MV_DEV_MV78100: dev = "Marvell MV78100"; break; case MV_DEV_MV78160: dev = "Marvell MV78160"; break; case MV_DEV_MV78260: dev = "Marvell MV78260"; break; case MV_DEV_MV78460: dev = "Marvell MV78460"; break; default: dev = "UNKNOWN"; break; } printf("%s", dev); if (*rev != '\0') printf(" rev %s", rev); printf(", TClock %dMHz", get_tclk() / 1000 / 1000); freq = get_cpu_freq(); if (freq != 0) printf(", Frequency %dMHz", freq / 1000 / 1000); printf("\n"); mode = read_cpu_ctrl(CPU_CONFIG); printf(" Instruction cache prefetch %s, data cache prefetch %s\n", (mode & CPU_CONFIG_IC_PREF) ? "enabled" : "disabled", (mode & CPU_CONFIG_DC_PREF) ? "enabled" : "disabled"); switch (d) { case MV_DEV_88F6281: case MV_DEV_88F6282: mode = read_cpu_ctrl(CPU_L2_CONFIG) & CPU_L2_CONFIG_MODE; printf(" 256KB 4-way set-associative %s unified L2 cache\n", mode ? "write-through" : "write-back"); break; case MV_DEV_MV78100: mode = read_cpu_ctrl(CPU_CONTROL); size = mode & CPU_CONTROL_L2_SIZE; mode = mode & CPU_CONTROL_L2_MODE; printf(" %s set-associative %s unified L2 cache\n", size ? "256KB 4-way" : "512KB 8-way", mode ? "write-through" : "write-back"); break; default: break; } } #ifdef KDB static void mv_enter_debugger(void *dummy) { if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); } SYSINIT(mv_enter_debugger, SI_SUB_CPU, SI_ORDER_ANY, mv_enter_debugger, NULL); #endif int soc_decode_win(void) { uint32_t dev, rev; int mask, err; mask = 0; TUNABLE_INT_FETCH("hw.pm-disable-mask", &mask); if (mask != 0) pm_disable_device(mask); /* Retrieve data about physical addresses from device tree. */ if ((err = win_cpu_from_dt()) != 0) return (err); /* Retrieve our ID: some windows facilities vary between SoC models */ soc_id(&dev, &rev); if (soc_family == MV_SOC_ARMADA_XP) if ((err = decode_win_sdram_fixup()) != 0) return(err); decode_win_cpu_setup(); if (MV_DUMP_WIN) soc_dump_decode_win(); eth_port = 0; usb_port = 0; if ((err = fdt_win_setup()) != 0) return (err); return (0); } /************************************************************************** * Decode windows registers accessors **************************************************************************/ WIN_REG_IDX_RD(win_cpu_armv5, cr, MV_WIN_CPU_CTRL_ARMV5, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_RD(win_cpu_armv5, br, MV_WIN_CPU_BASE_ARMV5, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_RD(win_cpu_armv5, remap_l, MV_WIN_CPU_REMAP_LO_ARMV5, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_RD(win_cpu_armv5, remap_h, MV_WIN_CPU_REMAP_HI_ARMV5, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_WR(win_cpu_armv5, cr, MV_WIN_CPU_CTRL_ARMV5, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_WR(win_cpu_armv5, br, MV_WIN_CPU_BASE_ARMV5, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_WR(win_cpu_armv5, remap_l, MV_WIN_CPU_REMAP_LO_ARMV5, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_WR(win_cpu_armv5, remap_h, MV_WIN_CPU_REMAP_HI_ARMV5, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_RD(win_cpu_armv7, cr, MV_WIN_CPU_CTRL_ARMV7, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_RD(win_cpu_armv7, br, MV_WIN_CPU_BASE_ARMV7, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_RD(win_cpu_armv7, remap_l, MV_WIN_CPU_REMAP_LO_ARMV7, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_RD(win_cpu_armv7, remap_h, MV_WIN_CPU_REMAP_HI_ARMV7, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_WR(win_cpu_armv7, cr, MV_WIN_CPU_CTRL_ARMV7, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_WR(win_cpu_armv7, br, MV_WIN_CPU_BASE_ARMV7, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_WR(win_cpu_armv7, remap_l, MV_WIN_CPU_REMAP_LO_ARMV7, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_WR(win_cpu_armv7, remap_h, MV_WIN_CPU_REMAP_HI_ARMV7, MV_MBUS_BRIDGE_BASE) static uint32_t win_cpu_cr_read(int i) { if (soc_decode_win_spec->cr_read != NULL) return (soc_decode_win_spec->cr_read(i)); return (-1); } static uint32_t win_cpu_br_read(int i) { if (soc_decode_win_spec->br_read != NULL) return (soc_decode_win_spec->br_read(i)); return (-1); } static uint32_t win_cpu_remap_l_read(int i) { if (soc_decode_win_spec->remap_l_read != NULL) return (soc_decode_win_spec->remap_l_read(i)); return (-1); } static uint32_t win_cpu_remap_h_read(int i) { if (soc_decode_win_spec->remap_h_read != NULL) return soc_decode_win_spec->remap_h_read(i); return (-1); } static void win_cpu_cr_write(int i, uint32_t val) { if (soc_decode_win_spec->cr_write != NULL) soc_decode_win_spec->cr_write(i, val); } static void win_cpu_br_write(int i, uint32_t val) { if (soc_decode_win_spec->br_write != NULL) soc_decode_win_spec->br_write(i, val); } static void win_cpu_remap_l_write(int i, uint32_t val) { if (soc_decode_win_spec->remap_l_write != NULL) soc_decode_win_spec->remap_l_write(i, val); } static void win_cpu_remap_h_write(int i, uint32_t val) { if (soc_decode_win_spec->remap_h_write != NULL) soc_decode_win_spec->remap_h_write(i, val); } WIN_REG_BASE_IDX_RD(win_cesa, cr, MV_WIN_CESA_CTRL) WIN_REG_BASE_IDX_RD(win_cesa, br, MV_WIN_CESA_BASE) WIN_REG_BASE_IDX_WR(win_cesa, cr, MV_WIN_CESA_CTRL) WIN_REG_BASE_IDX_WR(win_cesa, br, MV_WIN_CESA_BASE) WIN_REG_BASE_IDX_RD(win_usb, cr, MV_WIN_USB_CTRL) WIN_REG_BASE_IDX_RD(win_usb, br, MV_WIN_USB_BASE) WIN_REG_BASE_IDX_WR(win_usb, cr, MV_WIN_USB_CTRL) WIN_REG_BASE_IDX_WR(win_usb, br, MV_WIN_USB_BASE) WIN_REG_BASE_IDX_RD(win_usb3, cr, MV_WIN_USB3_CTRL) WIN_REG_BASE_IDX_RD(win_usb3, br, MV_WIN_USB3_BASE) WIN_REG_BASE_IDX_WR(win_usb3, cr, MV_WIN_USB3_CTRL) WIN_REG_BASE_IDX_WR(win_usb3, br, MV_WIN_USB3_BASE) WIN_REG_BASE_IDX_RD(win_eth, br, MV_WIN_ETH_BASE) WIN_REG_BASE_IDX_RD(win_eth, sz, MV_WIN_ETH_SIZE) WIN_REG_BASE_IDX_RD(win_eth, har, MV_WIN_ETH_REMAP) WIN_REG_BASE_IDX_WR(win_eth, br, MV_WIN_ETH_BASE) WIN_REG_BASE_IDX_WR(win_eth, sz, MV_WIN_ETH_SIZE) WIN_REG_BASE_IDX_WR(win_eth, har, MV_WIN_ETH_REMAP) WIN_REG_BASE_RD(win_eth, bare, 0x290) WIN_REG_BASE_RD(win_eth, epap, 0x294) WIN_REG_BASE_WR(win_eth, bare, 0x290) WIN_REG_BASE_WR(win_eth, epap, 0x294) WIN_REG_BASE_IDX_RD(win_pcie, cr, MV_WIN_PCIE_CTRL); WIN_REG_BASE_IDX_RD(win_pcie, br, MV_WIN_PCIE_BASE); WIN_REG_BASE_IDX_RD(win_pcie, remap, MV_WIN_PCIE_REMAP); WIN_REG_BASE_IDX_WR(win_pcie, cr, MV_WIN_PCIE_CTRL); WIN_REG_BASE_IDX_WR(win_pcie, br, MV_WIN_PCIE_BASE); WIN_REG_BASE_IDX_WR(win_pcie, remap, MV_WIN_PCIE_REMAP); WIN_REG_BASE_IDX_RD(pcie_bar, br, MV_PCIE_BAR_BASE); WIN_REG_BASE_IDX_RD(pcie_bar, brh, MV_PCIE_BAR_BASE_H); WIN_REG_BASE_IDX_RD(pcie_bar, cr, MV_PCIE_BAR_CTRL); WIN_REG_BASE_IDX_WR(pcie_bar, br, MV_PCIE_BAR_BASE); WIN_REG_BASE_IDX_WR(pcie_bar, brh, MV_PCIE_BAR_BASE_H); WIN_REG_BASE_IDX_WR(pcie_bar, cr, MV_PCIE_BAR_CTRL); WIN_REG_BASE_IDX_RD(win_sata, cr, MV_WIN_SATA_CTRL); WIN_REG_BASE_IDX_RD(win_sata, br, MV_WIN_SATA_BASE); WIN_REG_BASE_IDX_WR(win_sata, cr, MV_WIN_SATA_CTRL); WIN_REG_BASE_IDX_WR(win_sata, br, MV_WIN_SATA_BASE); WIN_REG_BASE_IDX_RD(win_sata_armada38x, sz, MV_WIN_SATA_SIZE_ARMADA38X); WIN_REG_BASE_IDX_WR(win_sata_armada38x, sz, MV_WIN_SATA_SIZE_ARMADA38X); WIN_REG_BASE_IDX_RD(win_sata_armada38x, cr, MV_WIN_SATA_CTRL_ARMADA38X); WIN_REG_BASE_IDX_WR(win_sata_armada38x, cr, MV_WIN_SATA_CTRL_ARMADA38X); WIN_REG_BASE_IDX_WR(win_sata_armada38x, br, MV_WIN_SATA_BASE_ARMADA38X); WIN_REG_BASE_IDX_RD(win_sdhci, cr, MV_WIN_SDHCI_CTRL); WIN_REG_BASE_IDX_RD(win_sdhci, br, MV_WIN_SDHCI_BASE); WIN_REG_BASE_IDX_WR(win_sdhci, cr, MV_WIN_SDHCI_CTRL); WIN_REG_BASE_IDX_WR(win_sdhci, br, MV_WIN_SDHCI_BASE); #ifndef SOC_MV_DOVE WIN_REG_IDX_RD(ddr_armv5, br, MV_WIN_DDR_BASE, MV_DDR_CADR_BASE) WIN_REG_IDX_RD(ddr_armv5, sz, MV_WIN_DDR_SIZE, MV_DDR_CADR_BASE) WIN_REG_IDX_WR(ddr_armv5, br, MV_WIN_DDR_BASE, MV_DDR_CADR_BASE) WIN_REG_IDX_WR(ddr_armv5, sz, MV_WIN_DDR_SIZE, MV_DDR_CADR_BASE) WIN_REG_IDX_RD(ddr_armv7, br, MV_WIN_DDR_BASE, MV_DDR_CADR_BASE_ARMV7) WIN_REG_IDX_RD(ddr_armv7, sz, MV_WIN_DDR_SIZE, MV_DDR_CADR_BASE_ARMV7) WIN_REG_IDX_WR(ddr_armv7, br, MV_WIN_DDR_BASE, MV_DDR_CADR_BASE_ARMV7) WIN_REG_IDX_WR(ddr_armv7, sz, MV_WIN_DDR_SIZE, MV_DDR_CADR_BASE_ARMV7) static inline uint32_t ddr_br_read(int i) { if (soc_decode_win_spec->ddr_br_read != NULL) return (soc_decode_win_spec->ddr_br_read(i)); return (-1); } static inline uint32_t ddr_sz_read(int i) { if (soc_decode_win_spec->ddr_sz_read != NULL) return (soc_decode_win_spec->ddr_sz_read(i)); return (-1); } static inline void ddr_br_write(int i, uint32_t val) { if (soc_decode_win_spec->ddr_br_write != NULL) soc_decode_win_spec->ddr_br_write(i, val); } static inline void ddr_sz_write(int i, uint32_t val) { if (soc_decode_win_spec->ddr_sz_write != NULL) soc_decode_win_spec->ddr_sz_write(i, val); } #else /* * On 88F6781 (Dove) SoC DDR Controller is accessed through * single MBUS <-> AXI bridge. In this case we provide emulated * ddr_br_read() and ddr_sz_read() functions to keep compatibility * with common decoding windows setup code. */ static inline uint32_t ddr_br_read(int i) { uint32_t mmap; /* Read Memory Address Map Register for CS i */ mmap = bus_space_read_4(fdtbus_bs_tag, MV_DDR_CADR_BASE + (i * 0x10), 0); /* Return CS i base address */ return (mmap & 0xFF000000); } static inline uint32_t ddr_sz_read(int i) { uint32_t mmap, size; /* Read Memory Address Map Register for CS i */ mmap = bus_space_read_4(fdtbus_bs_tag, MV_DDR_CADR_BASE + (i * 0x10), 0); /* Extract size of CS space in 64kB units */ size = (1 << ((mmap >> 16) & 0x0F)); /* Return CS size and enable/disable status */ return (((size - 1) << 16) | (mmap & 0x01)); } #endif /************************************************************************** * Decode windows helper routines **************************************************************************/ void soc_dump_decode_win(void) { int i; for (i = 0; i < soc_decode_win_spec->mv_win_cpu_max; i++) { printf("CPU window#%d: c 0x%08x, b 0x%08x", i, win_cpu_cr_read(i), win_cpu_br_read(i)); if (win_cpu_can_remap(i)) printf(", rl 0x%08x, rh 0x%08x", win_cpu_remap_l_read(i), win_cpu_remap_h_read(i)); printf("\n"); } printf("Internal regs base: 0x%08x\n", bus_space_read_4(fdtbus_bs_tag, MV_INTREGS_BASE, 0)); for (i = 0; i < MV_WIN_DDR_MAX; i++) printf("DDR CS#%d: b 0x%08x, s 0x%08x\n", i, ddr_br_read(i), ddr_sz_read(i)); } /************************************************************************** * CPU windows routines **************************************************************************/ int win_cpu_can_remap(int i) { uint32_t dev, rev; soc_id(&dev, &rev); /* Depending on the SoC certain windows have remap capability */ if ((dev == MV_DEV_88F5182 && i < 2) || (dev == MV_DEV_88F5281 && i < 4) || (dev == MV_DEV_88F6281 && i < 4) || (dev == MV_DEV_88F6282 && i < 4) || (dev == MV_DEV_88F6828 && i < 20) || (dev == MV_DEV_88F6820 && i < 20) || (dev == MV_DEV_88F6810 && i < 20) || (dev == MV_DEV_88RC8180 && i < 2) || (dev == MV_DEV_88F6781 && i < 4) || (dev == MV_DEV_MV78100_Z0 && i < 8) || ((dev & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY && i < 8)) return (1); return (0); } /* XXX This should check for overlapping remap fields too.. */ int decode_win_overlap(int win, int win_no, const struct decode_win *wintab) { const struct decode_win *tab; int i; tab = wintab; for (i = 0; i < win_no; i++, tab++) { if (i == win) /* Skip self */ continue; if ((tab->base + tab->size - 1) < (wintab + win)->base) continue; else if (((wintab + win)->base + (wintab + win)->size - 1) < tab->base) continue; else return (i); } return (-1); } int decode_win_cpu_set(int target, int attr, vm_paddr_t base, uint32_t size, vm_paddr_t remap) { uint32_t br, cr; int win, i; if (remap == ~0) { win = soc_decode_win_spec->mv_win_cpu_max - 1; i = -1; } else { win = 0; i = 1; } while ((win >= 0) && (win < soc_decode_win_spec->mv_win_cpu_max)) { cr = win_cpu_cr_read(win); if ((cr & MV_WIN_CPU_ENABLE_BIT) == 0) break; if ((cr & ((0xff << MV_WIN_CPU_ATTR_SHIFT) | (0x1f << MV_WIN_CPU_TARGET_SHIFT))) == ((attr << MV_WIN_CPU_ATTR_SHIFT) | (target << MV_WIN_CPU_TARGET_SHIFT))) break; win += i; } if ((win < 0) || (win >= soc_decode_win_spec->mv_win_cpu_max) || ((remap != ~0) && (win_cpu_can_remap(win) == 0))) return (-1); br = base & 0xffff0000; win_cpu_br_write(win, br); if (win_cpu_can_remap(win)) { if (remap != ~0) { win_cpu_remap_l_write(win, remap & 0xffff0000); win_cpu_remap_h_write(win, 0); } else { /* * Remap function is not used for a given window * (capable of remapping) - set remap field with the * same value as base. */ win_cpu_remap_l_write(win, base & 0xffff0000); win_cpu_remap_h_write(win, 0); } } cr = ((size - 1) & 0xffff0000) | (attr << MV_WIN_CPU_ATTR_SHIFT) | (target << MV_WIN_CPU_TARGET_SHIFT) | MV_WIN_CPU_ENABLE_BIT; win_cpu_cr_write(win, cr); return (0); } static void decode_win_cpu_setup(void) { int i; /* Disable all CPU windows */ for (i = 0; i < soc_decode_win_spec->mv_win_cpu_max; i++) { win_cpu_cr_write(i, 0); win_cpu_br_write(i, 0); if (win_cpu_can_remap(i)) { win_cpu_remap_l_write(i, 0); win_cpu_remap_h_write(i, 0); } } for (i = 0; i < cpu_wins_no; i++) if (cpu_wins[i].target > 0) decode_win_cpu_set(cpu_wins[i].target, cpu_wins[i].attr, cpu_wins[i].base, cpu_wins[i].size, cpu_wins[i].remap); } static int decode_win_sdram_fixup(void) { struct mem_region mr[FDT_MEM_REGIONS]; uint8_t window_valid[MV_WIN_DDR_MAX]; int mr_cnt, err, i, j; uint32_t valid_win_num = 0; /* Grab physical memory regions information from device tree. */ err = fdt_get_mem_regions(mr, &mr_cnt, NULL); if (err != 0) return (err); for (i = 0; i < MV_WIN_DDR_MAX; i++) window_valid[i] = 0; /* Try to match entries from device tree with settings from u-boot */ for (i = 0; i < mr_cnt; i++) { for (j = 0; j < MV_WIN_DDR_MAX; j++) { if (ddr_is_active(j) && (ddr_base(j) == mr[i].mr_start) && (ddr_size(j) == mr[i].mr_size)) { window_valid[j] = 1; valid_win_num++; } } } if (mr_cnt != valid_win_num) return (EINVAL); /* Destroy windows without corresponding device tree entry */ for (j = 0; j < MV_WIN_DDR_MAX; j++) { if (ddr_is_active(j) && (window_valid[j] != 1)) { printf("Disabling SDRAM decoding window: %d\n", j); ddr_disable(j); } } return (0); } /* * Check if we're able to cover all active DDR banks. */ static int decode_win_can_cover_ddr(int max) { int i, c; c = 0; for (i = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) c++; if (c > max) { printf("Unable to cover all active DDR banks: " "%d, available windows: %d\n", c, max); return (0); } return (1); } /************************************************************************** * DDR windows routines **************************************************************************/ int ddr_is_active(int i) { if (ddr_sz_read(i) & 0x1) return (1); return (0); } void ddr_disable(int i) { ddr_sz_write(i, 0); ddr_br_write(i, 0); } uint32_t ddr_base(int i) { return (ddr_br_read(i) & 0xff000000); } uint32_t ddr_size(int i) { return ((ddr_sz_read(i) | 0x00ffffff) + 1); } uint32_t ddr_attr(int i) { uint32_t dev, rev, attr; soc_id(&dev, &rev); if (dev == MV_DEV_88RC8180) return ((ddr_sz_read(i) & 0xf0) >> 4); if (dev == MV_DEV_88F6781) return (0); attr = (i == 0 ? 0xe : (i == 1 ? 0xd : (i == 2 ? 0xb : (i == 3 ? 0x7 : 0xff)))); if (platform_io_coherent) attr |= 0x10; return (attr); } uint32_t ddr_target(int i) { uint32_t dev, rev; soc_id(&dev, &rev); if (dev == MV_DEV_88RC8180) { i = (ddr_sz_read(i) & 0xf0) >> 4; return (i == 0xe ? 0xc : (i == 0xd ? 0xd : (i == 0xb ? 0xe : (i == 0x7 ? 0xf : 0xc)))); } /* * On SOCs other than 88RC8180 Mbus unit ID for * DDR SDRAM controller is always 0x0. */ return (0); } /************************************************************************** * CESA windows routines **************************************************************************/ static int decode_win_cesa_valid(void) { return (decode_win_can_cover_ddr(MV_WIN_CESA_MAX)); } static void decode_win_cesa_dump(u_long base) { int i; for (i = 0; i < MV_WIN_CESA_MAX; i++) printf("CESA window#%d: c 0x%08x, b 0x%08x\n", i, win_cesa_cr_read(base, i), win_cesa_br_read(base, i)); } /* * Set CESA decode windows. */ static void decode_win_cesa_setup(u_long base) { uint32_t br, cr; uint64_t size; int i, j; for (i = 0; i < MV_WIN_CESA_MAX; i++) { win_cesa_cr_write(base, i, 0); win_cesa_br_write(base, i, 0); } /* Only access to active DRAM banks is required */ for (i = 0; i < MV_WIN_DDR_MAX; i++) { if (ddr_is_active(i)) { br = ddr_base(i); size = ddr_size(i); /* * Armada 38x SoC's equipped with 4GB DRAM * suffer freeze during CESA operation, if * MBUS window opened at given DRAM CS reaches * end of the address space. Apply a workaround * by setting the window size to the closest possible * value, i.e. divide it by 2. */ if ((soc_family == MV_SOC_ARMADA_38X) && (size + ddr_base(i) == 0x100000000ULL)) size /= 2; cr = (((size - 1) & 0xffff0000) | (ddr_attr(i) << IO_WIN_ATTR_SHIFT) | (ddr_target(i) << IO_WIN_TGT_SHIFT) | IO_WIN_ENA_MASK); /* Set the first free CESA window */ for (j = 0; j < MV_WIN_CESA_MAX; j++) { if (win_cesa_cr_read(base, j) & 0x1) continue; win_cesa_br_write(base, j, br); win_cesa_cr_write(base, j, cr); break; } } } } static void decode_win_a38x_cesa_setup(u_long base) { decode_win_cesa_setup(base); decode_win_cesa_setup(base + MV_WIN_CESA_OFFSET); } static void decode_win_a38x_cesa_dump(u_long base) { decode_win_cesa_dump(base); decode_win_cesa_dump(base + MV_WIN_CESA_OFFSET); } /************************************************************************** * USB windows routines **************************************************************************/ static int decode_win_usb_valid(void) { return (decode_win_can_cover_ddr(MV_WIN_USB_MAX)); } static void decode_win_usb_dump(u_long base) { int i; if (pm_is_disabled(CPU_PM_CTRL_USB(usb_port - 1))) return; for (i = 0; i < MV_WIN_USB_MAX; i++) printf("USB window#%d: c 0x%08x, b 0x%08x\n", i, win_usb_cr_read(base, i), win_usb_br_read(base, i)); } /* * Set USB decode windows. */ static void decode_win_usb_setup(u_long base) { uint32_t br, cr; int i, j; if (pm_is_disabled(CPU_PM_CTRL_USB(usb_port))) return; usb_port++; for (i = 0; i < MV_WIN_USB_MAX; i++) { win_usb_cr_write(base, i, 0); win_usb_br_write(base, i, 0); } /* Only access to active DRAM banks is required */ for (i = 0; i < MV_WIN_DDR_MAX; i++) { if (ddr_is_active(i)) { br = ddr_base(i); /* * XXX for 6281 we should handle Mbus write * burst limit field in the ctrl reg */ cr = (((ddr_size(i) - 1) & 0xffff0000) | (ddr_attr(i) << 8) | (ddr_target(i) << 4) | 1); /* Set the first free USB window */ for (j = 0; j < MV_WIN_USB_MAX; j++) { if (win_usb_cr_read(base, j) & 0x1) continue; win_usb_br_write(base, j, br); win_usb_cr_write(base, j, cr); break; } } } } /************************************************************************** * USB3 windows routines **************************************************************************/ static int decode_win_usb3_valid(void) { return (decode_win_can_cover_ddr(MV_WIN_USB3_MAX)); } static void decode_win_usb3_dump(u_long base) { int i; for (i = 0; i < MV_WIN_USB3_MAX; i++) printf("USB3.0 window#%d: c 0x%08x, b 0x%08x\n", i, win_usb3_cr_read(base, i), win_usb3_br_read(base, i)); } /* * Set USB3 decode windows */ static void decode_win_usb3_setup(u_long base) { uint32_t br, cr; int i, j; for (i = 0; i < MV_WIN_USB3_MAX; i++) { win_usb3_cr_write(base, i, 0); win_usb3_br_write(base, i, 0); } /* Only access to active DRAM banks is required */ for (i = 0; i < MV_WIN_DDR_MAX; i++) { if (ddr_is_active(i)) { br = ddr_base(i); cr = (((ddr_size(i) - 1) & (IO_WIN_SIZE_MASK << IO_WIN_SIZE_SHIFT)) | (ddr_attr(i) << IO_WIN_ATTR_SHIFT) | (ddr_target(i) << IO_WIN_TGT_SHIFT) | IO_WIN_ENA_MASK); /* Set the first free USB3.0 window */ for (j = 0; j < MV_WIN_USB3_MAX; j++) { if (win_usb3_cr_read(base, j) & IO_WIN_ENA_MASK) continue; win_usb3_br_write(base, j, br); win_usb3_cr_write(base, j, cr); break; } } } } /************************************************************************** * ETH windows routines **************************************************************************/ static int win_eth_can_remap(int i) { /* ETH encode windows 0-3 have remap capability */ if (i < 4) return (1); return (0); } static int eth_bare_read(uint32_t base, int i) { uint32_t v; v = win_eth_bare_read(base); v &= (1 << i); return (v >> i); } static void eth_bare_write(uint32_t base, int i, int val) { uint32_t v; v = win_eth_bare_read(base); v &= ~(1 << i); v |= (val << i); win_eth_bare_write(base, v); } static void eth_epap_write(uint32_t base, int i, int val) { uint32_t v; v = win_eth_epap_read(base); v &= ~(0x3 << (i * 2)); v |= (val << (i * 2)); win_eth_epap_write(base, v); } static void decode_win_eth_dump(u_long base) { int i; if (pm_is_disabled(CPU_PM_CTRL_GE(eth_port - 1))) return; for (i = 0; i < MV_WIN_ETH_MAX; i++) { printf("ETH window#%d: b 0x%08x, s 0x%08x", i, win_eth_br_read(base, i), win_eth_sz_read(base, i)); if (win_eth_can_remap(i)) printf(", ha 0x%08x", win_eth_har_read(base, i)); printf("\n"); } printf("ETH windows: bare 0x%08x, epap 0x%08x\n", win_eth_bare_read(base), win_eth_epap_read(base)); } #define MV_WIN_ETH_DDR_TRGT(n) ddr_target(n) static void decode_win_eth_setup(u_long base) { uint32_t br, sz; int i, j; if (pm_is_disabled(CPU_PM_CTRL_GE(eth_port))) return; eth_port++; /* Disable, clear and revoke protection for all ETH windows */ for (i = 0; i < MV_WIN_ETH_MAX; i++) { eth_bare_write(base, i, 1); eth_epap_write(base, i, 0); win_eth_br_write(base, i, 0); win_eth_sz_write(base, i, 0); if (win_eth_can_remap(i)) win_eth_har_write(base, i, 0); } /* Only access to active DRAM banks is required */ for (i = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) { br = ddr_base(i) | (ddr_attr(i) << 8) | MV_WIN_ETH_DDR_TRGT(i); sz = ((ddr_size(i) - 1) & 0xffff0000); /* Set the first free ETH window */ for (j = 0; j < MV_WIN_ETH_MAX; j++) { if (eth_bare_read(base, j) == 0) continue; win_eth_br_write(base, j, br); win_eth_sz_write(base, j, sz); /* XXX remapping ETH windows not supported */ /* Set protection RW */ eth_epap_write(base, j, 0x3); /* Enable window */ eth_bare_write(base, j, 0); break; } } } static void decode_win_neta_dump(u_long base) { decode_win_eth_dump(base + MV_WIN_NETA_OFFSET); } static void decode_win_neta_setup(u_long base) { decode_win_eth_setup(base + MV_WIN_NETA_OFFSET); } static int decode_win_eth_valid(void) { return (decode_win_can_cover_ddr(MV_WIN_ETH_MAX)); } /************************************************************************** * PCIE windows routines **************************************************************************/ static void decode_win_pcie_dump(u_long base) { int i; printf("PCIE windows base 0x%08lx\n", base); for (i = 0; i < MV_WIN_PCIE_MAX; i++) printf("PCIE window#%d: cr 0x%08x br 0x%08x remap 0x%08x\n", i, win_pcie_cr_read(base, i), win_pcie_br_read(base, i), win_pcie_remap_read(base, i)); for (i = 0; i < MV_PCIE_BAR_MAX; i++) printf("PCIE bar#%d: cr 0x%08x br 0x%08x brh 0x%08x\n", i, pcie_bar_cr_read(base, i), pcie_bar_br_read(base, i), pcie_bar_brh_read(base, i)); } void decode_win_pcie_setup(u_long base) { uint32_t size = 0, ddrbase = ~0; uint32_t cr, br; int i, j; for (i = 0; i < MV_PCIE_BAR_MAX; i++) { pcie_bar_br_write(base, i, MV_PCIE_BAR_64BIT | MV_PCIE_BAR_PREFETCH_EN); if (i < 3) pcie_bar_brh_write(base, i, 0); if (i > 0) pcie_bar_cr_write(base, i, 0); } for (i = 0; i < MV_WIN_PCIE_MAX; i++) { win_pcie_cr_write(base, i, 0); win_pcie_br_write(base, i, 0); win_pcie_remap_write(base, i, 0); } /* On End-Point only set BAR size to 1MB regardless of DDR size */ if ((bus_space_read_4(fdtbus_bs_tag, base, MV_PCIE_CONTROL) & MV_PCIE_ROOT_CMPLX) == 0) { pcie_bar_cr_write(base, 1, 0xf0000 | 1); return; } for (i = 0; i < MV_WIN_DDR_MAX; i++) { if (ddr_is_active(i)) { /* Map DDR to BAR 1 */ cr = (ddr_size(i) - 1) & 0xffff0000; size += ddr_size(i) & 0xffff0000; cr |= (ddr_attr(i) << 8) | (ddr_target(i) << 4) | 1; br = ddr_base(i); if (br < ddrbase) ddrbase = br; /* Use the first available PCIE window */ for (j = 0; j < MV_WIN_PCIE_MAX; j++) { if (win_pcie_cr_read(base, j) != 0) continue; win_pcie_br_write(base, j, br); win_pcie_cr_write(base, j, cr); break; } } } /* * Upper 16 bits in BAR register is interpreted as BAR size * (in 64 kB units) plus 64kB, so subtract 0x10000 * form value passed to register to get correct value. */ size -= 0x10000; pcie_bar_cr_write(base, 1, size | 1); pcie_bar_br_write(base, 1, ddrbase | MV_PCIE_BAR_64BIT | MV_PCIE_BAR_PREFETCH_EN); pcie_bar_br_write(base, 0, fdt_immr_pa | MV_PCIE_BAR_64BIT | MV_PCIE_BAR_PREFETCH_EN); } static int decode_win_pcie_valid(void) { return (decode_win_can_cover_ddr(MV_WIN_PCIE_MAX)); } /************************************************************************** * IDMA windows routines **************************************************************************/ #if defined(SOC_MV_ORION) || defined(SOC_MV_DISCOVERY) static int idma_bare_read(u_long base, int i) { uint32_t v; v = win_idma_bare_read(base); v &= (1 << i); return (v >> i); } static void idma_bare_write(u_long base, int i, int val) { uint32_t v; v = win_idma_bare_read(base); v &= ~(1 << i); v |= (val << i); win_idma_bare_write(base, v); } /* * Sets channel protection 'val' for window 'w' on channel 'c' */ static void idma_cap_write(u_long base, int c, int w, int val) { uint32_t v; v = win_idma_cap_read(base, c); v &= ~(0x3 << (w * 2)); v |= (val << (w * 2)); win_idma_cap_write(base, c, v); } /* * Set protection 'val' on all channels for window 'w' */ static void idma_set_prot(u_long base, int w, int val) { int c; for (c = 0; c < MV_IDMA_CHAN_MAX; c++) idma_cap_write(base, c, w, val); } static int win_idma_can_remap(int i) { /* IDMA decode windows 0-3 have remap capability */ if (i < 4) return (1); return (0); } void decode_win_idma_setup(u_long base) { uint32_t br, sz; int i, j; if (pm_is_disabled(CPU_PM_CTRL_IDMA)) return; /* * Disable and clear all IDMA windows, revoke protection for all channels */ for (i = 0; i < MV_WIN_IDMA_MAX; i++) { idma_bare_write(base, i, 1); win_idma_br_write(base, i, 0); win_idma_sz_write(base, i, 0); if (win_idma_can_remap(i) == 1) win_idma_har_write(base, i, 0); } for (i = 0; i < MV_IDMA_CHAN_MAX; i++) win_idma_cap_write(base, i, 0); /* * Set up access to all active DRAM banks */ for (i = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) { br = ddr_base(i) | (ddr_attr(i) << 8) | ddr_target(i); sz = ((ddr_size(i) - 1) & 0xffff0000); /* Place DDR entries in non-remapped windows */ for (j = 0; j < MV_WIN_IDMA_MAX; j++) if (win_idma_can_remap(j) != 1 && idma_bare_read(base, j) == 1) { /* Configure window */ win_idma_br_write(base, j, br); win_idma_sz_write(base, j, sz); /* Set protection RW on all channels */ idma_set_prot(base, j, 0x3); /* Enable window */ idma_bare_write(base, j, 0); break; } } /* * Remaining targets -- from statically defined table */ for (i = 0; i < idma_wins_no; i++) if (idma_wins[i].target > 0) { br = (idma_wins[i].base & 0xffff0000) | (idma_wins[i].attr << 8) | idma_wins[i].target; sz = ((idma_wins[i].size - 1) & 0xffff0000); /* Set the first free IDMA window */ for (j = 0; j < MV_WIN_IDMA_MAX; j++) { if (idma_bare_read(base, j) == 0) continue; /* Configure window */ win_idma_br_write(base, j, br); win_idma_sz_write(base, j, sz); if (win_idma_can_remap(j) && idma_wins[j].remap >= 0) win_idma_har_write(base, j, idma_wins[j].remap); /* Set protection RW on all channels */ idma_set_prot(base, j, 0x3); /* Enable window */ idma_bare_write(base, j, 0); break; } } } int decode_win_idma_valid(void) { const struct decode_win *wintab; int c, i, j, rv; uint32_t b, e, s; if (idma_wins_no > MV_WIN_IDMA_MAX) { printf("IDMA windows: too many entries: %d\n", idma_wins_no); return (0); } for (i = 0, c = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) c++; if (idma_wins_no > (MV_WIN_IDMA_MAX - c)) { printf("IDMA windows: too many entries: %d, available: %d\n", idma_wins_no, MV_WIN_IDMA_MAX - c); return (0); } wintab = idma_wins; rv = 1; for (i = 0; i < idma_wins_no; i++, wintab++) { if (wintab->target == 0) { printf("IDMA window#%d: DDR target window is not " "supposed to be reprogrammed!\n", i); rv = 0; } if (wintab->remap >= 0 && win_cpu_can_remap(i) != 1) { printf("IDMA window#%d: not capable of remapping, but " "val 0x%08x defined\n", i, wintab->remap); rv = 0; } s = wintab->size; b = wintab->base; e = b + s - 1; if (s > (0xFFFFFFFF - b + 1)) { /* XXX this boundary check should account for 64bit and * remapping.. */ printf("IDMA window#%d: no space for size 0x%08x at " "0x%08x\n", i, s, b); rv = 0; continue; } j = decode_win_overlap(i, idma_wins_no, &idma_wins[0]); if (j >= 0) { printf("IDMA window#%d: (0x%08x - 0x%08x) overlaps " "with #%d (0x%08x - 0x%08x)\n", i, b, e, j, idma_wins[j].base, idma_wins[j].base + idma_wins[j].size - 1); rv = 0; } } return (rv); } void decode_win_idma_dump(u_long base) { int i; if (pm_is_disabled(CPU_PM_CTRL_IDMA)) return; for (i = 0; i < MV_WIN_IDMA_MAX; i++) { printf("IDMA window#%d: b 0x%08x, s 0x%08x", i, win_idma_br_read(base, i), win_idma_sz_read(base, i)); if (win_idma_can_remap(i)) printf(", ha 0x%08x", win_idma_har_read(base, i)); printf("\n"); } for (i = 0; i < MV_IDMA_CHAN_MAX; i++) printf("IDMA channel#%d: ap 0x%08x\n", i, win_idma_cap_read(base, i)); printf("IDMA windows: bare 0x%08x\n", win_idma_bare_read(base)); } #else /* Provide dummy functions to satisfy the build for SoCs not equipped with IDMA */ int decode_win_idma_valid(void) { return (1); } void decode_win_idma_setup(u_long base) { } void decode_win_idma_dump(u_long base) { } #endif /************************************************************************** * XOR windows routines **************************************************************************/ #if defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY) static int xor_ctrl_read(u_long base, int i, int c, int e) { uint32_t v; v = win_xor_ctrl_read(base, c, e); v &= (1 << i); return (v >> i); } static void xor_ctrl_write(u_long base, int i, int c, int e, int val) { uint32_t v; v = win_xor_ctrl_read(base, c, e); v &= ~(1 << i); v |= (val << i); win_xor_ctrl_write(base, c, e, v); } /* * Set channel protection 'val' for window 'w' on channel 'c' */ static void xor_chan_write(u_long base, int c, int e, int w, int val) { uint32_t v; v = win_xor_ctrl_read(base, c, e); v &= ~(0x3 << (w * 2 + 16)); v |= (val << (w * 2 + 16)); win_xor_ctrl_write(base, c, e, v); } /* * Set protection 'val' on all channels for window 'w' on engine 'e' */ static void xor_set_prot(u_long base, int w, int e, int val) { int c; for (c = 0; c < MV_XOR_CHAN_MAX; c++) xor_chan_write(base, c, e, w, val); } static int win_xor_can_remap(int i) { /* XOR decode windows 0-3 have remap capability */ if (i < 4) return (1); return (0); } static int xor_max_eng(void) { uint32_t dev, rev; soc_id(&dev, &rev); switch (dev) { case MV_DEV_88F6281: case MV_DEV_88F6282: case MV_DEV_MV78130: case MV_DEV_MV78160: case MV_DEV_MV78230: case MV_DEV_MV78260: case MV_DEV_MV78460: return (2); case MV_DEV_MV78100: case MV_DEV_MV78100_Z0: return (1); default: return (0); } } static void xor_active_dram(u_long base, int c, int e, int *window) { uint32_t br, sz; int i, m, w; /* * Set up access to all active DRAM banks */ m = xor_max_eng(); for (i = 0; i < m; i++) if (ddr_is_active(i)) { br = ddr_base(i) | (ddr_attr(i) << 8) | ddr_target(i); sz = ((ddr_size(i) - 1) & 0xffff0000); /* Place DDR entries in non-remapped windows */ for (w = 0; w < MV_WIN_XOR_MAX; w++) if (win_xor_can_remap(w) != 1 && (xor_ctrl_read(base, w, c, e) == 0) && w > *window) { /* Configure window */ win_xor_br_write(base, w, e, br); win_xor_sz_write(base, w, e, sz); /* Set protection RW on all channels */ xor_set_prot(base, w, e, 0x3); /* Enable window */ xor_ctrl_write(base, w, c, e, 1); (*window)++; break; } } } void decode_win_xor_setup(u_long base) { uint32_t br, sz; int i, j, z, e = 1, m, window; if (pm_is_disabled(CPU_PM_CTRL_XOR)) return; /* * Disable and clear all XOR windows, revoke protection for all * channels */ m = xor_max_eng(); for (j = 0; j < m; j++, e--) { /* Number of non-remaped windows */ window = MV_XOR_NON_REMAP - 1; for (i = 0; i < MV_WIN_XOR_MAX; i++) { win_xor_br_write(base, i, e, 0); win_xor_sz_write(base, i, e, 0); } if (win_xor_can_remap(i) == 1) win_xor_har_write(base, i, e, 0); for (i = 0; i < MV_XOR_CHAN_MAX; i++) { win_xor_ctrl_write(base, i, e, 0); xor_active_dram(base, i, e, &window); } /* * Remaining targets -- from a statically defined table */ for (i = 0; i < xor_wins_no; i++) if (xor_wins[i].target > 0) { br = (xor_wins[i].base & 0xffff0000) | (xor_wins[i].attr << 8) | xor_wins[i].target; sz = ((xor_wins[i].size - 1) & 0xffff0000); /* Set the first free XOR window */ for (z = 0; z < MV_WIN_XOR_MAX; z++) { if (xor_ctrl_read(base, z, 0, e) && xor_ctrl_read(base, z, 1, e)) continue; /* Configure window */ win_xor_br_write(base, z, e, br); win_xor_sz_write(base, z, e, sz); if (win_xor_can_remap(z) && xor_wins[z].remap >= 0) win_xor_har_write(base, z, e, xor_wins[z].remap); /* Set protection RW on all channels */ xor_set_prot(base, z, e, 0x3); /* Enable window */ xor_ctrl_write(base, z, 0, e, 1); xor_ctrl_write(base, z, 1, e, 1); break; } } } } int decode_win_xor_valid(void) { const struct decode_win *wintab; int c, i, j, rv; uint32_t b, e, s; if (xor_wins_no > MV_WIN_XOR_MAX) { printf("XOR windows: too many entries: %d\n", xor_wins_no); return (0); } for (i = 0, c = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) c++; if (xor_wins_no > (MV_WIN_XOR_MAX - c)) { printf("XOR windows: too many entries: %d, available: %d\n", xor_wins_no, MV_WIN_IDMA_MAX - c); return (0); } wintab = xor_wins; rv = 1; for (i = 0; i < xor_wins_no; i++, wintab++) { if (wintab->target == 0) { printf("XOR window#%d: DDR target window is not " "supposed to be reprogrammed!\n", i); rv = 0; } if (wintab->remap >= 0 && win_cpu_can_remap(i) != 1) { printf("XOR window#%d: not capable of remapping, but " "val 0x%08x defined\n", i, wintab->remap); rv = 0; } s = wintab->size; b = wintab->base; e = b + s - 1; if (s > (0xFFFFFFFF - b + 1)) { /* * XXX this boundary check should account for 64bit * and remapping.. */ printf("XOR window#%d: no space for size 0x%08x at " "0x%08x\n", i, s, b); rv = 0; continue; } j = decode_win_overlap(i, xor_wins_no, &xor_wins[0]); if (j >= 0) { printf("XOR window#%d: (0x%08x - 0x%08x) overlaps " "with #%d (0x%08x - 0x%08x)\n", i, b, e, j, xor_wins[j].base, xor_wins[j].base + xor_wins[j].size - 1); rv = 0; } } return (rv); } void decode_win_xor_dump(u_long base) { int i, j; int e = 1; if (pm_is_disabled(CPU_PM_CTRL_XOR)) return; for (j = 0; j < xor_max_eng(); j++, e--) { for (i = 0; i < MV_WIN_XOR_MAX; i++) { printf("XOR window#%d: b 0x%08x, s 0x%08x", i, win_xor_br_read(base, i, e), win_xor_sz_read(base, i, e)); if (win_xor_can_remap(i)) printf(", ha 0x%08x", win_xor_har_read(base, i, e)); printf("\n"); } for (i = 0; i < MV_XOR_CHAN_MAX; i++) printf("XOR control#%d: 0x%08x\n", i, win_xor_ctrl_read(base, i, e)); } } #else /* Provide dummy functions to satisfy the build for SoCs not equipped with XOR */ static int decode_win_xor_valid(void) { return (1); } static void decode_win_xor_setup(u_long base) { } static void decode_win_xor_dump(u_long base) { } #endif /************************************************************************** * SATA windows routines **************************************************************************/ static void decode_win_sata_setup(u_long base) { uint32_t cr, br; int i, j; if (pm_is_disabled(CPU_PM_CTRL_SATA)) return; for (i = 0; i < MV_WIN_SATA_MAX; i++) { win_sata_cr_write(base, i, 0); win_sata_br_write(base, i, 0); } for (i = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) { cr = ((ddr_size(i) - 1) & 0xffff0000) | (ddr_attr(i) << 8) | (ddr_target(i) << 4) | 1; br = ddr_base(i); /* Use the first available SATA window */ for (j = 0; j < MV_WIN_SATA_MAX; j++) { if ((win_sata_cr_read(base, j) & 1) != 0) continue; win_sata_br_write(base, j, br); win_sata_cr_write(base, j, cr); break; } } } /* * Configure AHCI decoding windows */ static void decode_win_ahci_setup(u_long base) { uint32_t br, cr, sz; int i, j; for (i = 0; i < MV_WIN_SATA_MAX_ARMADA38X; i++) { win_sata_armada38x_cr_write(base, i, 0); win_sata_armada38x_br_write(base, i, 0); win_sata_armada38x_sz_write(base, i, 0); } for (i = 0; i < MV_WIN_DDR_MAX; i++) { if (ddr_is_active(i)) { cr = (ddr_attr(i) << IO_WIN_ATTR_SHIFT) | (ddr_target(i) << IO_WIN_TGT_SHIFT) | IO_WIN_ENA_MASK; br = ddr_base(i); sz = (ddr_size(i) - 1) & (IO_WIN_SIZE_MASK << IO_WIN_SIZE_SHIFT); /* Use first available SATA window */ for (j = 0; j < MV_WIN_SATA_MAX_ARMADA38X; j++) { if (win_sata_armada38x_cr_read(base, j) & IO_WIN_ENA_MASK) continue; /* BASE is set to DRAM base (0x00000000) */ win_sata_armada38x_br_write(base, j, br); /* CTRL targets DRAM ctrl with 0x0E or 0x0D */ win_sata_armada38x_cr_write(base, j, cr); /* SIZE is set to 16MB - max value */ win_sata_armada38x_sz_write(base, j, sz); break; } } } } static void decode_win_ahci_dump(u_long base) { int i; for (i = 0; i < MV_WIN_SATA_MAX_ARMADA38X; i++) printf("SATA window#%d: cr 0x%08x, br 0x%08x, sz 0x%08x\n", i, win_sata_armada38x_cr_read(base, i), win_sata_br_read(base, i), win_sata_armada38x_sz_read(base,i)); } static int decode_win_sata_valid(void) { uint32_t dev, rev; soc_id(&dev, &rev); if (dev == MV_DEV_88F5281) return (1); return (decode_win_can_cover_ddr(MV_WIN_SATA_MAX)); } static void decode_win_sdhci_setup(u_long base) { uint32_t cr, br; int i, j; for (i = 0; i < MV_WIN_SDHCI_MAX; i++) { win_sdhci_cr_write(base, i, 0); win_sdhci_br_write(base, i, 0); } for (i = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) { br = ddr_base(i); cr = (((ddr_size(i) - 1) & (IO_WIN_SIZE_MASK << IO_WIN_SIZE_SHIFT)) | (ddr_attr(i) << IO_WIN_ATTR_SHIFT) | (ddr_target(i) << IO_WIN_TGT_SHIFT) | IO_WIN_ENA_MASK); /* Use the first available SDHCI window */ for (j = 0; j < MV_WIN_SDHCI_MAX; j++) { if (win_sdhci_cr_read(base, j) & IO_WIN_ENA_MASK) continue; win_sdhci_cr_write(base, j, cr); win_sdhci_br_write(base, j, br); break; } } } static void decode_win_sdhci_dump(u_long base) { int i; for (i = 0; i < MV_WIN_SDHCI_MAX; i++) printf("SDHCI window#%d: c 0x%08x, b 0x%08x\n", i, win_sdhci_cr_read(base, i), win_sdhci_br_read(base, i)); } static int decode_win_sdhci_valid(void) { return (decode_win_can_cover_ddr(MV_WIN_SDHCI_MAX)); } /************************************************************************** * FDT parsing routines. **************************************************************************/ static int fdt_get_ranges(const char *nodename, void *buf, int size, int *tuples, int *tuplesize) { phandle_t node; pcell_t addr_cells, par_addr_cells, size_cells; int len, tuple_size, tuples_count; node = OF_finddevice(nodename); if (node == -1) return (EINVAL); if ((fdt_addrsize_cells(node, &addr_cells, &size_cells)) != 0) return (ENXIO); par_addr_cells = fdt_parent_addr_cells(node); if (par_addr_cells > 2) return (ERANGE); tuple_size = sizeof(pcell_t) * (addr_cells + par_addr_cells + size_cells); /* Note the OF_getprop_alloc() cannot be used at this early stage. */ len = OF_getprop(node, "ranges", buf, size); /* * XXX this does not handle the empty 'ranges;' case, which is * legitimate and should be allowed. */ tuples_count = len / tuple_size; if (tuples_count <= 0) return (ERANGE); if (par_addr_cells > 2 || addr_cells > 2 || size_cells > 2) return (ERANGE); *tuples = tuples_count; *tuplesize = tuple_size; return (0); } static int win_cpu_from_dt(void) { pcell_t ranges[48]; phandle_t node; int i, entry_size, err, t, tuple_size, tuples; u_long sram_base, sram_size; t = 0; /* Retrieve 'ranges' property of '/localbus' node. */ if ((err = fdt_get_ranges("/localbus", ranges, sizeof(ranges), &tuples, &tuple_size)) == 0) { /* * Fill CPU decode windows table. */ bzero((void *)&cpu_win_tbl, sizeof(cpu_win_tbl)); entry_size = tuple_size / sizeof(pcell_t); cpu_wins_no = tuples; /* Check range */ if (tuples > nitems(cpu_win_tbl)) { debugf("too many tuples to fit into cpu_win_tbl\n"); return (ENOMEM); } for (i = 0, t = 0; t < tuples; i += entry_size, t++) { cpu_win_tbl[t].target = 1; cpu_win_tbl[t].attr = fdt32_to_cpu(ranges[i + 1]); cpu_win_tbl[t].base = fdt32_to_cpu(ranges[i + 2]); cpu_win_tbl[t].size = fdt32_to_cpu(ranges[i + 3]); cpu_win_tbl[t].remap = ~0; debugf("target = 0x%0x attr = 0x%0x base = 0x%0x " "size = 0x%0x remap = 0x%0x\n", cpu_win_tbl[t].target, cpu_win_tbl[t].attr, cpu_win_tbl[t].base, cpu_win_tbl[t].size, cpu_win_tbl[t].remap); } } /* * Retrieve CESA SRAM data. */ if ((node = OF_finddevice("sram")) != -1) if (ofw_bus_node_is_compatible(node, "mrvl,cesa-sram")) goto moveon; if ((node = OF_finddevice("/")) == -1) return (ENXIO); if ((node = fdt_find_compatible(node, "mrvl,cesa-sram", 0)) == 0) /* SRAM block is not always present. */ return (0); moveon: sram_base = sram_size = 0; if (fdt_regsize(node, &sram_base, &sram_size) != 0) return (EINVAL); /* Check range */ if (t >= nitems(cpu_win_tbl)) { debugf("cannot fit CESA tuple into cpu_win_tbl\n"); return (ENOMEM); } cpu_win_tbl[t].target = soc_decode_win_spec->win_cesa_target; if (soc_family == MV_SOC_ARMADA_38X) cpu_win_tbl[t].attr = soc_decode_win_spec->win_cesa_attr(0); else cpu_win_tbl[t].attr = soc_decode_win_spec->win_cesa_attr(1); cpu_win_tbl[t].base = sram_base; cpu_win_tbl[t].size = sram_size; cpu_win_tbl[t].remap = ~0; cpu_wins_no++; debugf("sram: base = 0x%0lx size = 0x%0lx\n", sram_base, sram_size); /* Check if there is a second CESA node */ while ((node = OF_peer(node)) != 0) { if (ofw_bus_node_is_compatible(node, "mrvl,cesa-sram")) { if (fdt_regsize(node, &sram_base, &sram_size) != 0) return (EINVAL); break; } } if (node == 0) return (0); t++; if (t >= nitems(cpu_win_tbl)) { debugf("cannot fit CESA tuple into cpu_win_tbl\n"); return (ENOMEM); } /* Configure window for CESA1 */ cpu_win_tbl[t].target = soc_decode_win_spec->win_cesa_target; cpu_win_tbl[t].attr = soc_decode_win_spec->win_cesa_attr(1); cpu_win_tbl[t].base = sram_base; cpu_win_tbl[t].size = sram_size; cpu_win_tbl[t].remap = ~0; cpu_wins_no++; debugf("sram: base = 0x%0lx size = 0x%0lx\n", sram_base, sram_size); return (0); } static int fdt_win_process(phandle_t child) { int i, ret; for (i = 0; soc_nodes[i].compat != NULL; i++) { /* Setup only for enabled devices */ if (ofw_bus_node_status_okay(child) == 0) continue; if (!ofw_bus_node_is_compatible(child, soc_nodes[i].compat)) continue; ret = fdt_win_process_child(child, &soc_nodes[i], "reg"); if (ret != 0) return (ret); } return (0); } static int fdt_win_process_child(phandle_t child, struct soc_node_spec *soc_node, const char* mimo_reg_source) { int addr_cells, size_cells; pcell_t reg[8]; u_long size, base; if (fdt_addrsize_cells(OF_parent(child), &addr_cells, &size_cells)) return (ENXIO); if ((sizeof(pcell_t) * (addr_cells + size_cells)) > sizeof(reg)) return (ENOMEM); if (OF_getprop(child, mimo_reg_source, ®, sizeof(reg)) <= 0) return (EINVAL); if (addr_cells <= 2) base = fdt_data_get(®[0], addr_cells); else base = fdt_data_get(®[addr_cells - 2], 2); size = fdt_data_get(®[addr_cells], size_cells); if (soc_node->valid_handler != NULL) if (!soc_node->valid_handler()) return (EINVAL); base = (base & 0x000fffff) | fdt_immr_va; if (soc_node->decode_handler != NULL) soc_node->decode_handler(base); else return (ENXIO); if (MV_DUMP_WIN && (soc_node->dump_handler != NULL)) soc_node->dump_handler(base); return (0); } static int fdt_win_setup(void) { phandle_t node, child, sb; phandle_t child_pci; int err; sb = 0; node = OF_finddevice("/"); if (node == -1) panic("fdt_win_setup: no root node"); /* Allow for coherent transactions on the A38x MBUS */ if (ofw_bus_node_is_compatible(node, "marvell,armada380")) platform_io_coherent = true; /* * Traverse through all children of root and simple-bus nodes. * For each found device retrieve decode windows data (if applicable). */ child = OF_child(node); while (child != 0) { /* Lookup for callback and run */ err = fdt_win_process(child); if (err != 0) return (err); /* Process Marvell Armada-XP/38x PCIe controllers */ if (ofw_bus_node_is_compatible(child, "marvell,armada-370-pcie")) { child_pci = OF_child(child); while (child_pci != 0) { err = fdt_win_process_child(child_pci, &soc_nodes[SOC_NODE_PCIE_ENTRY_IDX], "assigned-addresses"); if (err != 0) return (err); child_pci = OF_peer(child_pci); } } /* * Once done with root-level children let's move down to * simple-bus and its children. */ child = OF_peer(child); if ((child == 0) && (node == OF_finddevice("/"))) { sb = node = fdt_find_compatible(node, "simple-bus", 0); if (node == 0) return (ENXIO); child = OF_child(node); } /* * Next, move one more level down to internal-regs node (if * it is present) and its children. This node also have * "simple-bus" compatible. */ if ((child == 0) && (node == sb)) { node = fdt_find_compatible(node, "simple-bus", 0); if (node == 0) return (0); child = OF_child(node); } } return (0); } static void fdt_fixup_busfreq(phandle_t root) { phandle_t sb; pcell_t freq; freq = cpu_to_fdt32(get_tclk()); /* * Fix bus speed in cpu node */ if ((sb = OF_finddevice("cpu")) != -1) if (fdt_is_compatible_strict(sb, "ARM,88VS584")) OF_setprop(sb, "bus-frequency", (void *)&freq, sizeof(freq)); /* * This fixup sets the simple-bus bus-frequency property. */ if ((sb = fdt_find_compatible(root, "simple-bus", 1)) != 0) OF_setprop(sb, "bus-frequency", (void *)&freq, sizeof(freq)); } static void fdt_fixup_ranges(phandle_t root) { phandle_t node; pcell_t par_addr_cells, addr_cells, size_cells; pcell_t ranges[3], reg[2], *rangesptr; int len, tuple_size, tuples_count; uint32_t base; /* Fix-up SoC ranges according to real fdt_immr_pa */ if ((node = fdt_find_compatible(root, "simple-bus", 1)) != 0) { if (fdt_addrsize_cells(node, &addr_cells, &size_cells) == 0 && ((par_addr_cells = fdt_parent_addr_cells(node)) <= 2)) { tuple_size = sizeof(pcell_t) * (par_addr_cells + addr_cells + size_cells); len = OF_getprop(node, "ranges", ranges, sizeof(ranges)); tuples_count = len / tuple_size; /* Unexpected settings are not supported */ if (tuples_count != 1) goto fixup_failed; rangesptr = &ranges[0]; rangesptr += par_addr_cells; base = fdt_data_get((void *)rangesptr, addr_cells); *rangesptr = cpu_to_fdt32(fdt_immr_pa); if (OF_setprop(node, "ranges", (void *)&ranges[0], sizeof(ranges)) < 0) goto fixup_failed; } } /* Fix-up PCIe reg according to real PCIe registers' PA */ if ((node = fdt_find_compatible(root, "mrvl,pcie", 1)) != 0) { if (fdt_addrsize_cells(OF_parent(node), &par_addr_cells, &size_cells) == 0) { tuple_size = sizeof(pcell_t) * (par_addr_cells + size_cells); len = OF_getprop(node, "reg", reg, sizeof(reg)); tuples_count = len / tuple_size; /* Unexpected settings are not supported */ if (tuples_count != 1) goto fixup_failed; base = fdt_data_get((void *)®[0], par_addr_cells); base &= ~0xFF000000; base |= fdt_immr_pa; reg[0] = cpu_to_fdt32(base); if (OF_setprop(node, "reg", (void *)®[0], sizeof(reg)) < 0) goto fixup_failed; } } /* Fix-up succeeded. May return and continue */ return; fixup_failed: while (1) { /* * In case of any error while fixing ranges just hang. * 1. No message can be displayed yet since console * is not initialized. * 2. Going further will cause failure on bus_space_map() * relying on the wrong ranges or data abort when * accessing PCIe registers. */ } } struct fdt_fixup_entry fdt_fixup_table[] = { { "mrvl,DB-88F6281", &fdt_fixup_busfreq }, { "mrvl,DB-78460", &fdt_fixup_busfreq }, { "mrvl,DB-78460", &fdt_fixup_ranges }, { NULL, NULL } }; -#if __ARM_ARCH >= 6 uint32_t get_tclk(void) { if (soc_decode_win_spec->get_tclk != NULL) return soc_decode_win_spec->get_tclk(); else return -1; } uint32_t get_cpu_freq(void) { if (soc_decode_win_spec->get_cpu_freq != NULL) return soc_decode_win_spec->get_cpu_freq(); else return -1; } -#endif - -#ifndef INTRNG -static int -fdt_pic_decode_ic(phandle_t node, pcell_t *intr, int *interrupt, int *trig, - int *pol) -{ - - if (!ofw_bus_node_is_compatible(node, "mrvl,pic") && - !ofw_bus_node_is_compatible(node, "mrvl,mpic")) - return (ENXIO); - - *interrupt = fdt32_to_cpu(intr[0]); - *trig = INTR_TRIGGER_CONFORM; - *pol = INTR_POLARITY_CONFORM; - - return (0); -} - -fdt_pic_decode_t fdt_pic_table[] = { - &fdt_pic_decode_ic, - NULL -}; -#endif Index: head/sys/arm/mv/mv_pci.c =================================================================== --- head/sys/arm/mv/mv_pci.c (revision 368140) +++ head/sys/arm/mv/mv_pci.c (revision 368141) @@ -1,1288 +1,1284 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2008 MARVELL INTERNATIONAL LTD. * Copyright (c) 2010 The FreeBSD Foundation * Copyright (c) 2010-2015 Semihalf * All rights reserved. * * Developed by Semihalf. * * Portions of this software were developed by Semihalf * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Marvell integrated PCI/PCI-Express controller driver. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ofw_bus_if.h" #include "pcib_if.h" #include #include #include #include #include #ifdef DEBUG #define debugf(fmt, args...) do { printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif /* * Code and data related to fdt-based PCI configuration. * * This stuff used to be in dev/fdt/fdt_pci.c and fdt_common.h, but it was * always Marvell-specific so that was deleted and the code now lives here. */ struct mv_pci_range { u_long base_pci; u_long base_parent; u_long len; }; #define FDT_RANGES_CELLS ((3 + 3 + 2) * 2) #define PCI_SPACE_LEN 0x00400000 static void mv_pci_range_dump(struct mv_pci_range *range) { #ifdef DEBUG printf("\n"); printf(" base_pci = 0x%08lx\n", range->base_pci); printf(" base_par = 0x%08lx\n", range->base_parent); printf(" len = 0x%08lx\n", range->len); #endif } static int mv_pci_ranges_decode(phandle_t node, struct mv_pci_range *io_space, struct mv_pci_range *mem_space) { pcell_t ranges[FDT_RANGES_CELLS]; struct mv_pci_range *pci_space; pcell_t addr_cells, size_cells, par_addr_cells; pcell_t *rangesptr; pcell_t cell0, cell1, cell2; int tuple_size, tuples, i, rv, offset_cells, len; int portid, is_io_space; /* * Retrieve 'ranges' property. */ if ((fdt_addrsize_cells(node, &addr_cells, &size_cells)) != 0) return (EINVAL); if (addr_cells != 3 || size_cells != 2) return (ERANGE); par_addr_cells = fdt_parent_addr_cells(node); if (par_addr_cells > 3) return (ERANGE); len = OF_getproplen(node, "ranges"); if (len > sizeof(ranges)) return (ENOMEM); if (OF_getprop(node, "ranges", ranges, sizeof(ranges)) <= 0) return (EINVAL); tuple_size = sizeof(pcell_t) * (addr_cells + par_addr_cells + size_cells); tuples = len / tuple_size; /* * Initialize the ranges so that we don't have to worry about * having them all defined in the FDT. In particular, it is * perfectly fine not to want I/O space on PCI buses. */ bzero(io_space, sizeof(*io_space)); bzero(mem_space, sizeof(*mem_space)); rangesptr = &ranges[0]; offset_cells = 0; for (i = 0; i < tuples; i++) { cell0 = fdt_data_get((void *)rangesptr, 1); rangesptr++; cell1 = fdt_data_get((void *)rangesptr, 1); rangesptr++; cell2 = fdt_data_get((void *)rangesptr, 1); rangesptr++; portid = fdt_data_get((void *)(rangesptr+1), 1); if (cell0 & 0x02000000) { pci_space = mem_space; is_io_space = 0; } else if (cell0 & 0x01000000) { pci_space = io_space; is_io_space = 1; } else { rv = ERANGE; goto out; } if (par_addr_cells == 3) { /* * This is a PCI subnode 'ranges'. Skip cell0 and * cell1 of this entry and only use cell2. */ offset_cells = 2; rangesptr += offset_cells; } if ((par_addr_cells - offset_cells) > 2) { rv = ERANGE; goto out; } pci_space->base_parent = fdt_data_get((void *)rangesptr, par_addr_cells - offset_cells); rangesptr += par_addr_cells - offset_cells; if (size_cells > 2) { rv = ERANGE; goto out; } pci_space->len = fdt_data_get((void *)rangesptr, size_cells); rangesptr += size_cells; pci_space->base_pci = cell2; if (pci_space->len == 0) { pci_space->len = PCI_SPACE_LEN; pci_space->base_parent = fdt_immr_va + PCI_SPACE_LEN * ( 2 * portid + is_io_space); } } rv = 0; out: return (rv); } static int mv_pci_ranges(phandle_t node, struct mv_pci_range *io_space, struct mv_pci_range *mem_space) { int err; debugf("Processing PCI node: %x\n", node); if ((err = mv_pci_ranges_decode(node, io_space, mem_space)) != 0) { debugf("could not decode parent PCI node 'ranges'\n"); return (err); } debugf("Post fixup dump:\n"); mv_pci_range_dump(io_space); mv_pci_range_dump(mem_space); return (0); } int mv_pci_devmap(phandle_t node, struct devmap_entry *devmap, vm_offset_t io_va, vm_offset_t mem_va) { struct mv_pci_range io_space, mem_space; int error; if ((error = mv_pci_ranges_decode(node, &io_space, &mem_space)) != 0) return (error); devmap->pd_va = (io_va ? io_va : io_space.base_parent); devmap->pd_pa = io_space.base_parent; devmap->pd_size = io_space.len; devmap++; devmap->pd_va = (mem_va ? mem_va : mem_space.base_parent); devmap->pd_pa = mem_space.base_parent; devmap->pd_size = mem_space.len; return (0); } /* * Code and data related to the Marvell pcib driver. */ #define PCI_CFG_ENA (1U << 31) #define PCI_CFG_BUS(bus) (((bus) & 0xff) << 16) #define PCI_CFG_DEV(dev) (((dev) & 0x1f) << 11) #define PCI_CFG_FUN(fun) (((fun) & 0x7) << 8) #define PCI_CFG_PCIE_REG(reg) ((reg) & 0xfc) #define PCI_REG_CFG_ADDR 0x0C78 #define PCI_REG_CFG_DATA 0x0C7C #define PCIE_REG_CFG_ADDR 0x18F8 #define PCIE_REG_CFG_DATA 0x18FC #define PCIE_REG_CONTROL 0x1A00 #define PCIE_CTRL_LINK1X 0x00000001 #define PCIE_REG_STATUS 0x1A04 #define PCIE_REG_IRQ_MASK 0x1910 #define PCIE_CONTROL_ROOT_CMPLX (1 << 1) #define PCIE_CONTROL_HOT_RESET (1 << 24) #define PCIE_LINK_TIMEOUT 1000000 #define PCIE_STATUS_LINK_DOWN 1 #define PCIE_STATUS_DEV_OFFS 16 /* Minimum PCI Memory and I/O allocations taken from PCI spec (in bytes) */ #define PCI_MIN_IO_ALLOC 4 #define PCI_MIN_MEM_ALLOC 16 #define BITS_PER_UINT32 (NBBY * sizeof(uint32_t)) struct mv_pcib_softc { device_t sc_dev; struct rman sc_mem_rman; bus_addr_t sc_mem_base; bus_addr_t sc_mem_size; uint32_t sc_mem_map[MV_PCI_MEM_SLICE_SIZE / (PCI_MIN_MEM_ALLOC * BITS_PER_UINT32)]; int sc_win_target; int sc_mem_win_attr; struct rman sc_io_rman; bus_addr_t sc_io_base; bus_addr_t sc_io_size; uint32_t sc_io_map[MV_PCI_IO_SLICE_SIZE / (PCI_MIN_IO_ALLOC * BITS_PER_UINT32)]; int sc_io_win_attr; struct resource *sc_res; bus_space_handle_t sc_bsh; bus_space_tag_t sc_bst; int sc_rid; struct mtx sc_msi_mtx; uint32_t sc_msi_bitmap; int sc_busnr; /* Host bridge bus number */ int sc_devnr; /* Host bridge device number */ int sc_type; int sc_mode; /* Endpoint / Root Complex */ int sc_msi_supported; int sc_skip_enable_procedure; int sc_enable_find_root_slot; struct ofw_bus_iinfo sc_pci_iinfo; int ap_segment; /* PCI domain */ }; /* Local forward prototypes */ static int mv_pcib_decode_win(phandle_t, struct mv_pcib_softc *); static void mv_pcib_hw_cfginit(void); static uint32_t mv_pcib_hw_cfgread(struct mv_pcib_softc *, u_int, u_int, u_int, u_int, int); static void mv_pcib_hw_cfgwrite(struct mv_pcib_softc *, u_int, u_int, u_int, u_int, uint32_t, int); static int mv_pcib_init(struct mv_pcib_softc *, int, int); static int mv_pcib_init_all_bars(struct mv_pcib_softc *, int, int, int, int); static void mv_pcib_init_bridge(struct mv_pcib_softc *, int, int, int); static inline void pcib_write_irq_mask(struct mv_pcib_softc *, uint32_t); static void mv_pcib_enable(struct mv_pcib_softc *, uint32_t); static int mv_pcib_mem_init(struct mv_pcib_softc *); /* Forward prototypes */ static int mv_pcib_probe(device_t); static int mv_pcib_attach(device_t); static struct resource *mv_pcib_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int mv_pcib_release_resource(device_t, device_t, int, int, struct resource *); static int mv_pcib_read_ivar(device_t, device_t, int, uintptr_t *); static int mv_pcib_write_ivar(device_t, device_t, int, uintptr_t); static int mv_pcib_maxslots(device_t); static uint32_t mv_pcib_read_config(device_t, u_int, u_int, u_int, u_int, int); static void mv_pcib_write_config(device_t, u_int, u_int, u_int, u_int, uint32_t, int); static int mv_pcib_route_interrupt(device_t, device_t, int); static int mv_pcib_alloc_msi(device_t, device_t, int, int, int *); static int mv_pcib_map_msi(device_t, device_t, int, uint64_t *, uint32_t *); static int mv_pcib_release_msi(device_t, device_t, int, int *); /* * Bus interface definitions. */ static device_method_t mv_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mv_pcib_probe), DEVMETHOD(device_attach, mv_pcib_attach), /* Bus interface */ DEVMETHOD(bus_read_ivar, mv_pcib_read_ivar), DEVMETHOD(bus_write_ivar, mv_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, mv_pcib_alloc_resource), DEVMETHOD(bus_release_resource, mv_pcib_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, mv_pcib_maxslots), DEVMETHOD(pcib_read_config, mv_pcib_read_config), DEVMETHOD(pcib_write_config, mv_pcib_write_config), DEVMETHOD(pcib_route_interrupt, mv_pcib_route_interrupt), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), DEVMETHOD(pcib_alloc_msi, mv_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, mv_pcib_release_msi), DEVMETHOD(pcib_map_msi, mv_pcib_map_msi), /* OFW bus interface */ DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; static driver_t mv_pcib_driver = { "pcib", mv_pcib_methods, sizeof(struct mv_pcib_softc), }; devclass_t pcib_devclass; DRIVER_MODULE(mv_pcib, ofwbus, mv_pcib_driver, pcib_devclass, 0, 0); DRIVER_MODULE(mv_pcib, pcib_ctrl, mv_pcib_driver, pcib_devclass, 0, 0); static struct mtx pcicfg_mtx; static int mv_pcib_probe(device_t self) { phandle_t node; node = ofw_bus_get_node(self); if (!mv_fdt_is_type(node, "pci")) return (ENXIO); if (!(ofw_bus_is_compatible(self, "mrvl,pcie") || ofw_bus_is_compatible(self, "mrvl,pci") || ofw_bus_node_is_compatible( OF_parent(node), "marvell,armada-370-pcie"))) return (ENXIO); if (!ofw_bus_status_okay(self)) return (ENXIO); device_set_desc(self, "Marvell Integrated PCI/PCI-E Controller"); return (BUS_PROBE_DEFAULT); } static int mv_pcib_attach(device_t self) { struct mv_pcib_softc *sc; phandle_t node, parnode; uint32_t val, reg0; int err, bus, devfn, port_id; sc = device_get_softc(self); sc->sc_dev = self; node = ofw_bus_get_node(self); parnode = OF_parent(node); if (OF_getencprop(node, "marvell,pcie-port", &(port_id), sizeof(port_id)) <= 0) { /* If port ID does not exist in the FDT set value to 0 */ if (!OF_hasprop(node, "marvell,pcie-port")) port_id = 0; else return(ENXIO); } sc->ap_segment = port_id; if (ofw_bus_node_is_compatible(node, "mrvl,pcie")) { sc->sc_type = MV_TYPE_PCIE; sc->sc_win_target = MV_WIN_PCIE_TARGET(port_id); sc->sc_mem_win_attr = MV_WIN_PCIE_MEM_ATTR(port_id); sc->sc_io_win_attr = MV_WIN_PCIE_IO_ATTR(port_id); -#if __ARM_ARCH >= 6 sc->sc_skip_enable_procedure = 1; -#endif } else if (ofw_bus_node_is_compatible(parnode, "marvell,armada-370-pcie")) { sc->sc_type = MV_TYPE_PCIE; sc->sc_win_target = MV_WIN_PCIE_TARGET_ARMADA38X(port_id); sc->sc_mem_win_attr = MV_WIN_PCIE_MEM_ATTR_ARMADA38X(port_id); sc->sc_io_win_attr = MV_WIN_PCIE_IO_ATTR_ARMADA38X(port_id); sc->sc_enable_find_root_slot = 1; } else if (ofw_bus_node_is_compatible(node, "mrvl,pci")) { sc->sc_type = MV_TYPE_PCI; sc->sc_win_target = MV_WIN_PCI_TARGET; sc->sc_mem_win_attr = MV_WIN_PCI_MEM_ATTR; sc->sc_io_win_attr = MV_WIN_PCI_IO_ATTR; } else return (ENXIO); /* * Retrieve our mem-mapped registers range. */ sc->sc_rid = 0; sc->sc_res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &sc->sc_rid, RF_ACTIVE); if (sc->sc_res == NULL) { device_printf(self, "could not map memory\n"); return (ENXIO); } sc->sc_bst = rman_get_bustag(sc->sc_res); sc->sc_bsh = rman_get_bushandle(sc->sc_res); val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_CONTROL); sc->sc_mode = (val & PCIE_CONTROL_ROOT_CMPLX ? MV_MODE_ROOT : MV_MODE_ENDPOINT); /* * Get PCI interrupt info. */ if (sc->sc_mode == MV_MODE_ROOT) ofw_bus_setup_iinfo(node, &sc->sc_pci_iinfo, sizeof(pcell_t)); /* * Configure decode windows for PCI(E) access. */ if (mv_pcib_decode_win(node, sc) != 0) return (ENXIO); mv_pcib_hw_cfginit(); /* * Enable PCIE device. */ mv_pcib_enable(sc, port_id); /* * Memory management. */ err = mv_pcib_mem_init(sc); if (err) return (err); /* * Preliminary bus enumeration to find first linked devices and set * appropriate bus number from which should start the actual enumeration */ for (bus = 0; bus < PCI_BUSMAX; bus++) { for (devfn = 0; devfn < mv_pcib_maxslots(self); devfn++) { reg0 = mv_pcib_read_config(self, bus, devfn, devfn & 0x7, 0x0, 4); if (reg0 == (~0U)) continue; /* no device */ else { sc->sc_busnr = bus; /* update bus number */ break; } } } if (sc->sc_mode == MV_MODE_ROOT) { err = mv_pcib_init(sc, sc->sc_busnr, mv_pcib_maxslots(sc->sc_dev)); if (err) goto error; device_add_child(self, "pci", -1); } else { sc->sc_devnr = 1; bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS, 1 << PCIE_STATUS_DEV_OFFS); device_add_child(self, "pci_ep", -1); } mtx_init(&sc->sc_msi_mtx, "msi_mtx", NULL, MTX_DEF); return (bus_generic_attach(self)); error: /* XXX SYS_RES_ should be released here */ rman_fini(&sc->sc_mem_rman); rman_fini(&sc->sc_io_rman); return (err); } static void mv_pcib_enable(struct mv_pcib_softc *sc, uint32_t unit) { uint32_t val; int timeout; if (sc->sc_skip_enable_procedure) goto pcib_enable_root_mode; /* * Check if PCIE device is enabled. */ if ((sc->sc_skip_enable_procedure == 0) && (read_cpu_ctrl(CPU_CONTROL) & CPU_CONTROL_PCIE_DISABLE(unit))) { write_cpu_ctrl(CPU_CONTROL, read_cpu_ctrl(CPU_CONTROL) & ~(CPU_CONTROL_PCIE_DISABLE(unit))); timeout = PCIE_LINK_TIMEOUT; val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS); while (((val & PCIE_STATUS_LINK_DOWN) == 1) && (timeout > 0)) { DELAY(1000); timeout -= 1000; val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS); } } pcib_enable_root_mode: if (sc->sc_mode == MV_MODE_ROOT) { /* * Enable PCI bridge. */ val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIR_COMMAND); val |= PCIM_CMD_SERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_PORTEN; bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIR_COMMAND, val); } } static int mv_pcib_mem_init(struct mv_pcib_softc *sc) { int err; /* * Memory management. */ sc->sc_mem_rman.rm_type = RMAN_ARRAY; err = rman_init(&sc->sc_mem_rman); if (err) return (err); sc->sc_io_rman.rm_type = RMAN_ARRAY; err = rman_init(&sc->sc_io_rman); if (err) { rman_fini(&sc->sc_mem_rman); return (err); } err = rman_manage_region(&sc->sc_mem_rman, sc->sc_mem_base, sc->sc_mem_base + sc->sc_mem_size - 1); if (err) goto error; err = rman_manage_region(&sc->sc_io_rman, sc->sc_io_base, sc->sc_io_base + sc->sc_io_size - 1); if (err) goto error; return (0); error: rman_fini(&sc->sc_mem_rman); rman_fini(&sc->sc_io_rman); return (err); } static inline uint32_t pcib_bit_get(uint32_t *map, uint32_t bit) { uint32_t n = bit / BITS_PER_UINT32; bit = bit % BITS_PER_UINT32; return (map[n] & (1 << bit)); } static inline void pcib_bit_set(uint32_t *map, uint32_t bit) { uint32_t n = bit / BITS_PER_UINT32; bit = bit % BITS_PER_UINT32; map[n] |= (1 << bit); } static inline uint32_t pcib_map_check(uint32_t *map, uint32_t start, uint32_t bits) { uint32_t i; for (i = start; i < start + bits; i++) if (pcib_bit_get(map, i)) return (0); return (1); } static inline void pcib_map_set(uint32_t *map, uint32_t start, uint32_t bits) { uint32_t i; for (i = start; i < start + bits; i++) pcib_bit_set(map, i); } /* * The idea of this allocator is taken from ARM No-Cache memory * management code (sys/arm/arm/vm_machdep.c). */ static bus_addr_t pcib_alloc(struct mv_pcib_softc *sc, uint32_t smask) { uint32_t bits, bits_limit, i, *map, min_alloc, size; bus_addr_t addr = 0; bus_addr_t base; if (smask & 1) { base = sc->sc_io_base; min_alloc = PCI_MIN_IO_ALLOC; bits_limit = sc->sc_io_size / min_alloc; map = sc->sc_io_map; smask &= ~0x3; } else { base = sc->sc_mem_base; min_alloc = PCI_MIN_MEM_ALLOC; bits_limit = sc->sc_mem_size / min_alloc; map = sc->sc_mem_map; smask &= ~0xF; } size = ~smask + 1; bits = size / min_alloc; for (i = 0; i + bits <= bits_limit; i += bits) if (pcib_map_check(map, i, bits)) { pcib_map_set(map, i, bits); addr = base + (i * min_alloc); return (addr); } return (addr); } static int mv_pcib_init_bar(struct mv_pcib_softc *sc, int bus, int slot, int func, int barno) { uint32_t addr, bar; int reg, width; reg = PCIR_BAR(barno); /* * Need to init the BAR register with 0xffffffff before correct * value can be read. */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg, ~0, 4); bar = mv_pcib_read_config(sc->sc_dev, bus, slot, func, reg, 4); if (bar == 0) return (1); /* Calculate BAR size: 64 or 32 bit (in 32-bit units) */ width = ((bar & 7) == 4) ? 2 : 1; addr = pcib_alloc(sc, bar); if (!addr) return (-1); if (bootverbose) printf("PCI %u:%u:%u: reg %x: smask=%08x: addr=%08x\n", bus, slot, func, reg, bar, addr); mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg, addr, 4); if (width == 2) mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg + 4, 0, 4); return (width); } static void mv_pcib_init_bridge(struct mv_pcib_softc *sc, int bus, int slot, int func) { bus_addr_t io_base, mem_base; uint32_t io_limit, mem_limit; int secbus; io_base = sc->sc_io_base; io_limit = io_base + sc->sc_io_size - 1; mem_base = sc->sc_mem_base; mem_limit = mem_base + sc->sc_mem_size - 1; /* Configure I/O decode registers */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOBASEL_1, io_base >> 8, 1); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOBASEH_1, io_base >> 16, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOLIMITL_1, io_limit >> 8, 1); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOLIMITH_1, io_limit >> 16, 2); /* Configure memory decode registers */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_MEMBASE_1, mem_base >> 16, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_MEMLIMIT_1, mem_limit >> 16, 2); /* Disable memory prefetch decode */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMBASEL_1, 0x10, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMBASEH_1, 0x0, 4); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMLIMITL_1, 0xF, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMLIMITH_1, 0x0, 4); secbus = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_SECBUS_1, 1); /* Configure buses behind the bridge */ mv_pcib_init(sc, secbus, PCI_SLOTMAX); } static int mv_pcib_init(struct mv_pcib_softc *sc, int bus, int maxslot) { int slot, func, maxfunc, error; uint8_t hdrtype, command, class, subclass; for (slot = 0; slot <= maxslot; slot++) { maxfunc = 0; for (func = 0; func <= maxfunc; func++) { hdrtype = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (func == 0 && (hdrtype & PCIM_MFDEV)) maxfunc = PCI_FUNCMAX; command = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_COMMAND, 1); command &= ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_COMMAND, command, 1); error = mv_pcib_init_all_bars(sc, bus, slot, func, hdrtype); if (error) return (error); command |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_PORTEN; mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_COMMAND, command, 1); /* Handle PCI-PCI bridges */ class = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_CLASS, 1); subclass = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_SUBCLASS, 1); if (class != PCIC_BRIDGE || subclass != PCIS_BRIDGE_PCI) continue; mv_pcib_init_bridge(sc, bus, slot, func); } } /* Enable all ABCD interrupts */ pcib_write_irq_mask(sc, (0xF << 24)); return (0); } static int mv_pcib_init_all_bars(struct mv_pcib_softc *sc, int bus, int slot, int func, int hdrtype) { int maxbar, bar, i; maxbar = (hdrtype & PCIM_HDRTYPE) ? 0 : 6; bar = 0; /* Program the base address registers */ while (bar < maxbar) { i = mv_pcib_init_bar(sc, bus, slot, func, bar); bar += i; if (i < 0) { device_printf(sc->sc_dev, "PCI IO/Memory space exhausted\n"); return (ENOMEM); } } return (0); } static struct resource * mv_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct mv_pcib_softc *sc = device_get_softc(dev); struct rman *rm = NULL; struct resource *res; switch (type) { case SYS_RES_IOPORT: rm = &sc->sc_io_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; break; #ifdef PCI_RES_BUS case PCI_RES_BUS: return (pci_domain_alloc_bus(sc->ap_segment, child, rid, start, end, count, flags)); #endif default: return (BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, type, rid, start, end, count, flags)); } if (RMAN_IS_DEFAULT_RANGE(start, end)) { start = sc->sc_mem_base; end = sc->sc_mem_base + sc->sc_mem_size - 1; count = sc->sc_mem_size; } if ((start < sc->sc_mem_base) || (start + count - 1 != end) || (end > sc->sc_mem_base + sc->sc_mem_size - 1)) return (NULL); res = rman_reserve_resource(rm, start, end, count, flags, child); if (res == NULL) return (NULL); rman_set_rid(res, *rid); rman_set_bustag(res, fdtbus_bs_tag); rman_set_bushandle(res, start); if (flags & RF_ACTIVE) if (bus_activate_resource(child, type, *rid, res)) { rman_release_resource(res); return (NULL); } return (res); } static int mv_pcib_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { #ifdef PCI_RES_BUS struct mv_pcib_softc *sc = device_get_softc(dev); if (type == PCI_RES_BUS) return (pci_domain_release_bus(sc->ap_segment, child, rid, res)); #endif if (type != SYS_RES_IOPORT && type != SYS_RES_MEMORY) return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, res)); return (rman_release_resource(res)); } static int mv_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct mv_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: *result = sc->sc_busnr; return (0); case PCIB_IVAR_DOMAIN: *result = device_get_unit(dev); return (0); } return (ENOENT); } static int mv_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct mv_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busnr = value; return (0); } return (ENOENT); } static inline void pcib_write_irq_mask(struct mv_pcib_softc *sc, uint32_t mask) { if (sc->sc_type != MV_TYPE_PCIE) return; bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_IRQ_MASK, mask); } static void mv_pcib_hw_cfginit(void) { static int opened = 0; if (opened) return; mtx_init(&pcicfg_mtx, "pcicfg", NULL, MTX_SPIN); opened = 1; } static uint32_t mv_pcib_hw_cfgread(struct mv_pcib_softc *sc, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { uint32_t addr, data, ca, cd; ca = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_ADDR : PCI_REG_CFG_ADDR; cd = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_DATA : PCI_REG_CFG_DATA; addr = PCI_CFG_ENA | PCI_CFG_BUS(bus) | PCI_CFG_DEV(slot) | PCI_CFG_FUN(func) | PCI_CFG_PCIE_REG(reg); mtx_lock_spin(&pcicfg_mtx); bus_space_write_4(sc->sc_bst, sc->sc_bsh, ca, addr); data = ~0; switch (bytes) { case 1: data = bus_space_read_1(sc->sc_bst, sc->sc_bsh, cd + (reg & 3)); break; case 2: data = le16toh(bus_space_read_2(sc->sc_bst, sc->sc_bsh, cd + (reg & 2))); break; case 4: data = le32toh(bus_space_read_4(sc->sc_bst, sc->sc_bsh, cd)); break; } mtx_unlock_spin(&pcicfg_mtx); return (data); } static void mv_pcib_hw_cfgwrite(struct mv_pcib_softc *sc, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { uint32_t addr, ca, cd; ca = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_ADDR : PCI_REG_CFG_ADDR; cd = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_DATA : PCI_REG_CFG_DATA; addr = PCI_CFG_ENA | PCI_CFG_BUS(bus) | PCI_CFG_DEV(slot) | PCI_CFG_FUN(func) | PCI_CFG_PCIE_REG(reg); mtx_lock_spin(&pcicfg_mtx); bus_space_write_4(sc->sc_bst, sc->sc_bsh, ca, addr); switch (bytes) { case 1: bus_space_write_1(sc->sc_bst, sc->sc_bsh, cd + (reg & 3), data); break; case 2: bus_space_write_2(sc->sc_bst, sc->sc_bsh, cd + (reg & 2), htole16(data)); break; case 4: bus_space_write_4(sc->sc_bst, sc->sc_bsh, cd, htole32(data)); break; } mtx_unlock_spin(&pcicfg_mtx); } static int mv_pcib_maxslots(device_t dev) { struct mv_pcib_softc *sc = device_get_softc(dev); return ((sc->sc_type != MV_TYPE_PCI) ? 1 : PCI_SLOTMAX); } static int mv_pcib_root_slot(device_t dev, u_int bus, u_int slot, u_int func) { struct mv_pcib_softc *sc = device_get_softc(dev); uint32_t vendor, device; /* On platforms other than Armada38x, root link is always at slot 0 */ if (!sc->sc_enable_find_root_slot) return (slot == 0); vendor = mv_pcib_hw_cfgread(sc, bus, slot, func, PCIR_VENDOR, PCIR_VENDOR_LENGTH); device = mv_pcib_hw_cfgread(sc, bus, slot, func, PCIR_DEVICE, PCIR_DEVICE_LENGTH) & MV_DEV_FAMILY_MASK; return (vendor == PCI_VENDORID_MRVL && device == MV_DEV_ARMADA38X); } static uint32_t mv_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct mv_pcib_softc *sc = device_get_softc(dev); /* Return ~0 if link is inactive or trying to read from Root */ if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS) & PCIE_STATUS_LINK_DOWN) || mv_pcib_root_slot(dev, bus, slot, func)) return (~0U); return (mv_pcib_hw_cfgread(sc, bus, slot, func, reg, bytes)); } static void mv_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { struct mv_pcib_softc *sc = device_get_softc(dev); /* Return if link is inactive or trying to write to Root */ if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS) & PCIE_STATUS_LINK_DOWN) || mv_pcib_root_slot(dev, bus, slot, func)) return; mv_pcib_hw_cfgwrite(sc, bus, slot, func, reg, val, bytes); } static int mv_pcib_route_interrupt(device_t bus, device_t dev, int pin) { struct mv_pcib_softc *sc; struct ofw_pci_register reg; uint32_t pintr, mintr[4]; int icells; phandle_t iparent; sc = device_get_softc(bus); pintr = pin; /* Fabricate imap information in case this isn't an OFW device */ bzero(®, sizeof(reg)); reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) | (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) | (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT); icells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->sc_pci_iinfo, ®, sizeof(reg), &pintr, sizeof(pintr), mintr, sizeof(mintr), &iparent); if (icells > 0) return (ofw_bus_map_intr(dev, iparent, icells, mintr)); /* Maybe it's a real interrupt, not an intpin */ if (pin > 4) return (pin); device_printf(bus, "could not route pin %d for device %d.%d\n", pin, pci_get_slot(dev), pci_get_function(dev)); return (PCI_INVALID_IRQ); } static int mv_pcib_decode_win(phandle_t node, struct mv_pcib_softc *sc) { struct mv_pci_range io_space, mem_space; device_t dev; int error; dev = sc->sc_dev; if ((error = mv_pci_ranges(node, &io_space, &mem_space)) != 0) { device_printf(dev, "could not retrieve 'ranges' data\n"); return (error); } /* Configure CPU decoding windows */ error = decode_win_cpu_set(sc->sc_win_target, sc->sc_io_win_attr, io_space.base_parent, io_space.len, ~0); if (error < 0) { device_printf(dev, "could not set up CPU decode " "window for PCI IO\n"); return (ENXIO); } error = decode_win_cpu_set(sc->sc_win_target, sc->sc_mem_win_attr, mem_space.base_parent, mem_space.len, mem_space.base_parent); if (error < 0) { device_printf(dev, "could not set up CPU decode " "windows for PCI MEM\n"); return (ENXIO); } sc->sc_io_base = io_space.base_parent; sc->sc_io_size = io_space.len; sc->sc_mem_base = mem_space.base_parent; sc->sc_mem_size = mem_space.len; return (0); } static int mv_pcib_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data) { struct mv_pcib_softc *sc; sc = device_get_softc(dev); if (!sc->sc_msi_supported) return (ENOTSUP); irq = irq - MSI_IRQ; /* validate parameters */ if (isclr(&sc->sc_msi_bitmap, irq)) { device_printf(dev, "invalid MSI 0x%x\n", irq); return (EINVAL); } -#if __ARM_ARCH >= 6 mv_msi_data(irq, addr, data); -#endif debugf("%s: irq: %d addr: %jx data: %x\n", __func__, irq, *addr, *data); return (0); } static int mv_pcib_alloc_msi(device_t dev, device_t child, int count, int maxcount __unused, int *irqs) { struct mv_pcib_softc *sc; u_int start = 0, i; sc = device_get_softc(dev); if (!sc->sc_msi_supported) return (ENOTSUP); if (powerof2(count) == 0 || count > MSI_IRQ_NUM) return (EINVAL); mtx_lock(&sc->sc_msi_mtx); for (start = 0; (start + count) < MSI_IRQ_NUM; start++) { for (i = start; i < start + count; i++) { if (isset(&sc->sc_msi_bitmap, i)) break; } if (i == start + count) break; } if ((start + count) == MSI_IRQ_NUM) { mtx_unlock(&sc->sc_msi_mtx); return (ENXIO); } for (i = start; i < start + count; i++) { setbit(&sc->sc_msi_bitmap, i); *irqs++ = MSI_IRQ + i; } debugf("%s: start: %x count: %x\n", __func__, start, count); mtx_unlock(&sc->sc_msi_mtx); return (0); } static int mv_pcib_release_msi(device_t dev, device_t child, int count, int *irqs) { struct mv_pcib_softc *sc; u_int i; sc = device_get_softc(dev); if(!sc->sc_msi_supported) return (ENOTSUP); mtx_lock(&sc->sc_msi_mtx); for (i = 0; i < count; i++) clrbit(&sc->sc_msi_bitmap, irqs[i] - MSI_IRQ); mtx_unlock(&sc->sc_msi_mtx); return (0); } Index: head/sys/arm/versatile/versatile_common.c =================================================================== --- head/sys/arm/versatile/versatile_common.c (revision 368140) +++ head/sys/arm/versatile/versatile_common.c (nonexistent) @@ -1,72 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-3-Clause - * - * Copyright (C) 2008-2011 MARVELL INTERNATIONAL LTD. - * All rights reserved. - * - * Developed by Semihalf. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of MARVELL nor the names of contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include - -#ifndef INTRNG -static int -fdt_intc_decode_ic(phandle_t node, pcell_t *intr, int *interrupt, int *trig, - int *pol) -{ - - if (!ofw_bus_node_is_compatible(node, "arm,versatile-vic")) - return (ENXIO); - - *interrupt = fdt32_to_cpu(intr[0]); - *trig = INTR_TRIGGER_CONFORM; - *pol = INTR_POLARITY_CONFORM; - - return (0); -} - -fdt_pic_decode_t fdt_pic_table[] = { - &fdt_intc_decode_ic, - NULL -}; -#endif Property changes on: head/sys/arm/versatile/versatile_common.c ___________________________________________________________________ Deleted: svn:eol-style ## -1 +0,0 ## -native \ No newline at end of property Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Deleted: svn:mime-type ## -1 +0,0 ## -text/plain \ No newline at end of property Index: head/sys/arm/versatile/files.versatile =================================================================== --- head/sys/arm/versatile/files.versatile (revision 368140) +++ head/sys/arm/versatile/files.versatile (revision 368141) @@ -1,10 +1,9 @@ # $FreeBSD$ arm/versatile/pl050.c optional sc arm/versatile/sp804.c standard arm/versatile/versatile_machdep.c standard arm/versatile/versatile_clcd.c optional sc -arm/versatile/versatile_common.c standard arm/versatile/versatile_pci.c optional pci arm/versatile/versatile_scm.c standard arm/versatile/versatile_sic.c standard