Page MenuHomeFreeBSD

No OneTemporary

File Metadata

Created
Wed, Nov 20, 2:15 AM
This file is larger than 256 KB, so syntax highlighting was skipped.
Index: head/sys/arm/allwinner/aw_wdog.c
===================================================================
--- head/sys/arm/allwinner/aw_wdog.c (revision 327172)
+++ head/sys/arm/allwinner/aw_wdog.c (revision 327173)
@@ -1,275 +1,272 @@
/*-
* Copyright (c) 2013 Oleksandr Tymoshenko <gonzo@freebsd.org>
* Copyright (c) 2016 Emmanuel Vadot <manu@bidouilliste.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/watchdog.h>
#include <sys/reboot.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/rman.h>
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <machine/bus.h>
#include <machine/machdep.h>
#include <arm/allwinner/aw_wdog.h>
#define READ(_sc, _r) bus_read_4((_sc)->res, (_r))
#define WRITE(_sc, _r, _v) bus_write_4((_sc)->res, (_r), (_v))
#define A10_WDOG_CTRL 0x00
#define A31_WDOG_CTRL 0x10
#define WDOG_CTRL_RESTART (1 << 0)
#define A31_WDOG_CTRL_KEY (0xa57 << 1)
#define A10_WDOG_MODE 0x04
#define A31_WDOG_MODE 0x18
#define A10_WDOG_MODE_INTVL_SHIFT 3
#define A31_WDOG_MODE_INTVL_SHIFT 4
#define A10_WDOG_MODE_RST_EN (1 << 1)
#define WDOG_MODE_EN (1 << 0)
#define A31_WDOG_CONFIG 0x14
#define A31_WDOG_CONFIG_RST_EN_SYSTEM (1 << 0)
#define A31_WDOG_CONFIG_RST_EN_INT (2 << 0)
struct aw_wdog_interval {
uint64_t milliseconds;
unsigned int value;
};
struct aw_wdog_interval wd_intervals[] = {
{ 500, 0 },
{ 1000, 1 },
{ 2000, 2 },
{ 3000, 3 },
{ 4000, 4 },
{ 5000, 5 },
{ 6000, 6 },
{ 8000, 7 },
{ 10000, 8 },
{ 12000, 9 },
{ 14000, 10 },
{ 16000, 11 },
{ 0, 0 } /* sentinel */
};
static struct aw_wdog_softc *aw_wdog_sc = NULL;
struct aw_wdog_softc {
device_t dev;
struct resource * res;
struct mtx mtx;
uint8_t wdog_ctrl;
uint32_t wdog_ctrl_key;
uint8_t wdog_mode;
uint8_t wdog_mode_intvl_shift;
uint8_t wdog_mode_en;
uint8_t wdog_config;
uint8_t wdog_config_value;
};
#define A10_WATCHDOG 1
#define A31_WATCHDOG 2
static struct ofw_compat_data compat_data[] = {
{"allwinner,sun4i-a10-wdt", A10_WATCHDOG},
{"allwinner,sun6i-a31-wdt", A31_WATCHDOG},
{NULL, 0}
};
static void aw_wdog_watchdog_fn(void *, u_int, int *);
static void aw_wdog_shutdown_fn(void *, int);
static int
aw_wdog_probe(device_t dev)
{
- struct aw_wdog_softc *sc;
-
- sc = device_get_softc(dev);
if (!ofw_bus_status_okay(dev))
return (ENXIO);
switch (ofw_bus_search_compatible(dev, compat_data)->ocd_data) {
case A10_WATCHDOG:
device_set_desc(dev, "Allwinner A10 Watchdog");
return (BUS_PROBE_DEFAULT);
case A31_WATCHDOG:
device_set_desc(dev, "Allwinner A31 Watchdog");
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
static int
aw_wdog_attach(device_t dev)
{
struct aw_wdog_softc *sc;
int rid;
if (aw_wdog_sc != NULL)
return (ENXIO);
sc = device_get_softc(dev);
sc->dev = dev;
rid = 0;
sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (sc->res == NULL) {
device_printf(dev, "could not allocate memory resource\n");
return (ENXIO);
}
aw_wdog_sc = sc;
switch (ofw_bus_search_compatible(dev, compat_data)->ocd_data) {
case A10_WATCHDOG:
sc->wdog_ctrl = A10_WDOG_CTRL;
sc->wdog_mode = A10_WDOG_MODE;
sc->wdog_mode_intvl_shift = A10_WDOG_MODE_INTVL_SHIFT;
sc->wdog_mode_en = A10_WDOG_MODE_RST_EN | WDOG_MODE_EN;
break;
case A31_WATCHDOG:
sc->wdog_ctrl = A31_WDOG_CTRL;
sc->wdog_ctrl_key = A31_WDOG_CTRL_KEY;
sc->wdog_mode = A31_WDOG_MODE;
sc->wdog_mode_intvl_shift = A31_WDOG_MODE_INTVL_SHIFT;
sc->wdog_mode_en = WDOG_MODE_EN;
sc->wdog_config = A31_WDOG_CONFIG;
sc->wdog_config_value = A31_WDOG_CONFIG_RST_EN_SYSTEM;
break;
default:
bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res);
return (ENXIO);
}
mtx_init(&sc->mtx, "AW Watchdog", "aw_wdog", MTX_DEF);
EVENTHANDLER_REGISTER(watchdog_list, aw_wdog_watchdog_fn, sc, 0);
EVENTHANDLER_REGISTER(shutdown_final, aw_wdog_shutdown_fn, sc,
SHUTDOWN_PRI_LAST - 1);
return (0);
}
static void
aw_wdog_watchdog_fn(void *private, u_int cmd, int *error)
{
struct aw_wdog_softc *sc;
uint64_t ms;
int i;
sc = private;
mtx_lock(&sc->mtx);
cmd &= WD_INTERVAL;
if (cmd > 0) {
ms = ((uint64_t)1 << (cmd & WD_INTERVAL)) / 1000000;
i = 0;
while (wd_intervals[i].milliseconds &&
(ms > wd_intervals[i].milliseconds))
i++;
if (wd_intervals[i].milliseconds) {
WRITE(sc, sc->wdog_mode,
(wd_intervals[i].value << sc->wdog_mode_intvl_shift) |
sc->wdog_mode_en);
WRITE(sc, sc->wdog_ctrl,
WDOG_CTRL_RESTART | sc->wdog_ctrl_key);
if (sc->wdog_config)
WRITE(sc, sc->wdog_config,
sc->wdog_config_value);
*error = 0;
}
else {
/*
* Can't arm
* disable watchdog as watchdog(9) requires
*/
device_printf(sc->dev,
"Can't arm, timeout is more than 16 sec\n");
mtx_unlock(&sc->mtx);
WRITE(sc, sc->wdog_mode, 0);
return;
}
}
else
WRITE(sc, sc->wdog_mode, 0);
mtx_unlock(&sc->mtx);
}
static void
aw_wdog_shutdown_fn(void *private, int howto)
{
if ((howto & (RB_POWEROFF|RB_HALT)) == 0)
aw_wdog_watchdog_reset();
}
void
aw_wdog_watchdog_reset(void)
{
if (aw_wdog_sc == NULL) {
printf("Reset: watchdog device has not been initialized\n");
return;
}
WRITE(aw_wdog_sc, aw_wdog_sc->wdog_mode,
(wd_intervals[0].value << aw_wdog_sc->wdog_mode_intvl_shift) |
aw_wdog_sc->wdog_mode_en);
if (aw_wdog_sc->wdog_config)
WRITE(aw_wdog_sc, aw_wdog_sc->wdog_config,
aw_wdog_sc->wdog_config_value);
WRITE(aw_wdog_sc, aw_wdog_sc->wdog_ctrl,
WDOG_CTRL_RESTART | aw_wdog_sc->wdog_ctrl_key);
while(1)
;
}
static device_method_t aw_wdog_methods[] = {
DEVMETHOD(device_probe, aw_wdog_probe),
DEVMETHOD(device_attach, aw_wdog_attach),
DEVMETHOD_END
};
static driver_t aw_wdog_driver = {
"aw_wdog",
aw_wdog_methods,
sizeof(struct aw_wdog_softc),
};
static devclass_t aw_wdog_devclass;
DRIVER_MODULE(aw_wdog, simplebus, aw_wdog_driver, aw_wdog_devclass, 0, 0);
Index: head/sys/arm/allwinner/axp81x.c
===================================================================
--- head/sys/arm/allwinner/axp81x.c (revision 327172)
+++ head/sys/arm/allwinner/axp81x.c (revision 327173)
@@ -1,785 +1,783 @@
/*-
* Copyright (c) 2016 Jared McNeill <jmcneill@invisible.ca>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* X-Powers AXP813/818 PMU for Allwinner SoCs
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/eventhandler.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/kernel.h>
#include <sys/reboot.h>
#include <sys/gpio.h>
#include <sys/module.h>
#include <machine/bus.h>
#include <dev/iicbus/iicbus.h>
#include <dev/iicbus/iiconf.h>
#include <dev/gpio/gpiobusvar.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/extres/regulator/regulator.h>
#include "gpio_if.h"
#include "iicbus_if.h"
#include "regdev_if.h"
MALLOC_DEFINE(M_AXP81X_REG, "AXP81x regulator", "AXP81x power regulator");
#define AXP_ICTYPE 0x03
#define AXP_POWERCTL1 0x10
#define AXP_POWERCTL1_DCDC2 (1 << 1)
#define AXP_POWERCTL2 0x12
#define AXP_POWERCTL2_DC1SW (1 << 7)
#define AXP_VOLTCTL_DCDC2 0x21
#define AXP_VOLTCTL_STATUS (1 << 7)
#define AXP_VOLTCTL_MASK 0x7f
#define AXP_POWERBAT 0x32
#define AXP_POWERBAT_SHUTDOWN (1 << 7)
#define AXP_IRQEN1 0x40
#define AXP_IRQEN2 0x41
#define AXP_IRQEN3 0x42
#define AXP_IRQEN4 0x43
#define AXP_IRQEN5 0x44
#define AXP_IRQEN5_POKSIRQ (1 << 4)
#define AXP_IRQEN6 0x45
#define AXP_IRQSTAT5 0x4c
#define AXP_IRQSTAT5_POKSIRQ (1 << 4)
#define AXP_GPIO0_CTRL 0x90
#define AXP_GPIO1_CTRL 0x92
#define AXP_GPIO_FUNC (0x7 << 0)
#define AXP_GPIO_FUNC_SHIFT 0
#define AXP_GPIO_FUNC_DRVLO 0
#define AXP_GPIO_FUNC_DRVHI 1
#define AXP_GPIO_FUNC_INPUT 2
#define AXP_GPIO_SIGBIT 0x94
#define AXP_GPIO_PD 0x97
static const struct {
const char *name;
uint8_t ctrl_reg;
} axp81x_pins[] = {
{ "GPIO0", AXP_GPIO0_CTRL },
{ "GPIO1", AXP_GPIO1_CTRL },
};
static struct ofw_compat_data compat_data[] = {
{ "x-powers,axp813", 1 },
{ "x-powers,axp818", 1 },
{ NULL, 0 }
};
static struct resource_spec axp81x_spec[] = {
{ SYS_RES_IRQ, 0, RF_ACTIVE },
{ -1, 0 }
};
struct axp81x_regdef {
intptr_t id;
char *name;
char *supply_name;
uint8_t enable_reg;
uint8_t enable_mask;
uint8_t voltage_reg;
int voltage_min;
int voltage_max;
int voltage_step1;
int voltage_nstep1;
int voltage_step2;
int voltage_nstep2;
};
enum axp81x_reg_id {
AXP81X_REG_ID_DC1SW,
AXP81X_REG_ID_DCDC2,
};
static struct axp81x_regdef axp81x_regdefs[] = {
{
.id = AXP81X_REG_ID_DC1SW,
.name = "dc1sw",
.enable_reg = AXP_POWERCTL2,
.enable_mask = AXP_POWERCTL2_DC1SW,
},
{
.id = AXP81X_REG_ID_DCDC2,
.name = "dcdc2",
.enable_reg = AXP_POWERCTL1,
.enable_mask = AXP_POWERCTL1_DCDC2,
.voltage_reg = AXP_VOLTCTL_DCDC2,
.voltage_min = 500,
.voltage_max = 1300,
.voltage_step1 = 10,
.voltage_nstep1 = 70,
.voltage_step2 = 20,
.voltage_nstep2 = 5,
},
};
struct axp81x_softc;
struct axp81x_reg_sc {
struct regnode *regnode;
device_t base_dev;
struct axp81x_regdef *def;
phandle_t xref;
struct regnode_std_param *param;
};
struct axp81x_softc {
struct resource *res;
uint16_t addr;
void *ih;
device_t gpiodev;
struct mtx mtx;
int busy;
/* Regulators */
struct axp81x_reg_sc **regs;
int nregs;
};
#define AXP_LOCK(sc) mtx_lock(&(sc)->mtx)
#define AXP_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
static int
axp81x_read(device_t dev, uint8_t reg, uint8_t *data, uint8_t size)
{
struct axp81x_softc *sc;
struct iic_msg msg[2];
sc = device_get_softc(dev);
msg[0].slave = sc->addr;
msg[0].flags = IIC_M_WR;
msg[0].len = 1;
msg[0].buf = &reg;
msg[1].slave = sc->addr;
msg[1].flags = IIC_M_RD;
msg[1].len = size;
msg[1].buf = data;
return (iicbus_transfer(dev, msg, 2));
}
static int
axp81x_write(device_t dev, uint8_t reg, uint8_t val)
{
struct axp81x_softc *sc;
struct iic_msg msg[2];
sc = device_get_softc(dev);
msg[0].slave = sc->addr;
msg[0].flags = IIC_M_WR;
msg[0].len = 1;
msg[0].buf = &reg;
msg[1].slave = sc->addr;
msg[1].flags = IIC_M_WR;
msg[1].len = 1;
msg[1].buf = &val;
return (iicbus_transfer(dev, msg, 2));
}
static int
axp81x_regnode_init(struct regnode *regnode)
{
return (0);
}
static int
axp81x_regnode_enable(struct regnode *regnode, bool enable, int *udelay)
{
struct axp81x_reg_sc *sc;
uint8_t val;
sc = regnode_get_softc(regnode);
axp81x_read(sc->base_dev, sc->def->enable_reg, &val, 1);
if (enable)
val |= sc->def->enable_mask;
else
val &= ~sc->def->enable_mask;
axp81x_write(sc->base_dev, sc->def->enable_reg, val);
*udelay = 0;
return (0);
}
static void
axp81x_regnode_reg_to_voltage(struct axp81x_reg_sc *sc, uint8_t val, int *uv)
{
if (val < sc->def->voltage_nstep1)
*uv = sc->def->voltage_min + val * sc->def->voltage_step1;
else
*uv = sc->def->voltage_min +
(sc->def->voltage_nstep1 * sc->def->voltage_step1) +
((val - sc->def->voltage_nstep1) * sc->def->voltage_step2);
*uv *= 1000;
}
static int
axp81x_regnode_voltage_to_reg(struct axp81x_reg_sc *sc, int min_uvolt,
int max_uvolt, uint8_t *val)
{
uint8_t nval;
int nstep, uvolt;
nval = 0;
uvolt = sc->def->voltage_min * 1000;
for (nstep = 0; nstep < sc->def->voltage_nstep1 && uvolt < min_uvolt;
nstep++) {
++nval;
uvolt += (sc->def->voltage_step1 * 1000);
}
for (nstep = 0; nstep < sc->def->voltage_nstep2 && uvolt < min_uvolt;
nstep++) {
++nval;
uvolt += (sc->def->voltage_step2 * 1000);
}
if (uvolt > max_uvolt)
return (EINVAL);
*val = nval;
return (0);
}
static int
axp81x_regnode_set_voltage(struct regnode *regnode, int min_uvolt,
int max_uvolt, int *udelay)
{
struct axp81x_reg_sc *sc;
uint8_t val;
sc = regnode_get_softc(regnode);
if (!sc->def->voltage_step1 || !sc->def->voltage_step2)
return (ENXIO);
if (axp81x_regnode_voltage_to_reg(sc, min_uvolt, max_uvolt, &val) != 0)
return (ERANGE);
axp81x_write(sc->base_dev, sc->def->voltage_reg, val);
*udelay = 0;
return (0);
}
static int
axp81x_regnode_get_voltage(struct regnode *regnode, int *uvolt)
{
struct axp81x_reg_sc *sc;
uint8_t val;
sc = regnode_get_softc(regnode);
if (!sc->def->voltage_step1 || !sc->def->voltage_step2)
return (ENXIO);
axp81x_read(sc->base_dev, sc->def->voltage_reg, &val, 1);
axp81x_regnode_reg_to_voltage(sc, val & AXP_VOLTCTL_MASK, uvolt);
return (0);
}
static regnode_method_t axp81x_regnode_methods[] = {
/* Regulator interface */
REGNODEMETHOD(regnode_init, axp81x_regnode_init),
REGNODEMETHOD(regnode_enable, axp81x_regnode_enable),
REGNODEMETHOD(regnode_set_voltage, axp81x_regnode_set_voltage),
REGNODEMETHOD(regnode_get_voltage, axp81x_regnode_get_voltage),
REGNODEMETHOD_END
};
DEFINE_CLASS_1(axp81x_regnode, axp81x_regnode_class, axp81x_regnode_methods,
sizeof(struct axp81x_reg_sc), regnode_class);
static void
axp81x_shutdown(void *devp, int howto)
{
device_t dev;
if ((howto & RB_POWEROFF) == 0)
return;
dev = devp;
if (bootverbose)
device_printf(dev, "Shutdown AXP81x\n");
axp81x_write(dev, AXP_POWERBAT, AXP_POWERBAT_SHUTDOWN);
}
static void
axp81x_intr(void *arg)
{
- struct axp81x_softc *sc;
device_t dev;
uint8_t val;
int error;
dev = arg;
- sc = device_get_softc(dev);
error = axp81x_read(dev, AXP_IRQSTAT5, &val, 1);
if (error != 0)
return;
if (val != 0) {
if ((val & AXP_IRQSTAT5_POKSIRQ) != 0) {
if (bootverbose)
device_printf(dev, "Power button pressed\n");
shutdown_nice(RB_POWEROFF);
}
/* Acknowledge */
axp81x_write(dev, AXP_IRQSTAT5, val);
}
}
static device_t
axp81x_gpio_get_bus(device_t dev)
{
struct axp81x_softc *sc;
sc = device_get_softc(dev);
return (sc->gpiodev);
}
static int
axp81x_gpio_pin_max(device_t dev, int *maxpin)
{
*maxpin = nitems(axp81x_pins) - 1;
return (0);
}
static int
axp81x_gpio_pin_getname(device_t dev, uint32_t pin, char *name)
{
if (pin >= nitems(axp81x_pins))
return (EINVAL);
snprintf(name, GPIOMAXNAME, "%s", axp81x_pins[pin].name);
return (0);
}
static int
axp81x_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps)
{
if (pin >= nitems(axp81x_pins))
return (EINVAL);
*caps = GPIO_PIN_INPUT | GPIO_PIN_OUTPUT;
return (0);
}
static int
axp81x_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags)
{
struct axp81x_softc *sc;
uint8_t data, func;
int error;
if (pin >= nitems(axp81x_pins))
return (EINVAL);
sc = device_get_softc(dev);
AXP_LOCK(sc);
error = axp81x_read(dev, axp81x_pins[pin].ctrl_reg, &data, 1);
if (error == 0) {
func = (data & AXP_GPIO_FUNC) >> AXP_GPIO_FUNC_SHIFT;
if (func == AXP_GPIO_FUNC_INPUT)
*flags = GPIO_PIN_INPUT;
else if (func == AXP_GPIO_FUNC_DRVLO ||
func == AXP_GPIO_FUNC_DRVHI)
*flags = GPIO_PIN_OUTPUT;
else
*flags = 0;
}
AXP_UNLOCK(sc);
return (error);
}
static int
axp81x_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
{
struct axp81x_softc *sc;
uint8_t data;
int error;
if (pin >= nitems(axp81x_pins))
return (EINVAL);
sc = device_get_softc(dev);
AXP_LOCK(sc);
error = axp81x_read(dev, axp81x_pins[pin].ctrl_reg, &data, 1);
if (error == 0) {
data &= ~AXP_GPIO_FUNC;
if ((flags & (GPIO_PIN_INPUT|GPIO_PIN_OUTPUT)) != 0) {
if ((flags & GPIO_PIN_OUTPUT) == 0)
data |= AXP_GPIO_FUNC_INPUT;
}
error = axp81x_write(dev, axp81x_pins[pin].ctrl_reg, data);
}
AXP_UNLOCK(sc);
return (error);
}
static int
axp81x_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val)
{
struct axp81x_softc *sc;
uint8_t data, func;
int error;
if (pin >= nitems(axp81x_pins))
return (EINVAL);
sc = device_get_softc(dev);
AXP_LOCK(sc);
error = axp81x_read(dev, axp81x_pins[pin].ctrl_reg, &data, 1);
if (error == 0) {
func = (data & AXP_GPIO_FUNC) >> AXP_GPIO_FUNC_SHIFT;
switch (func) {
case AXP_GPIO_FUNC_DRVLO:
*val = 0;
break;
case AXP_GPIO_FUNC_DRVHI:
*val = 1;
break;
case AXP_GPIO_FUNC_INPUT:
error = axp81x_read(dev, AXP_GPIO_SIGBIT, &data, 1);
if (error == 0)
*val = (data & (1 << pin)) ? 1 : 0;
break;
default:
error = EIO;
break;
}
}
AXP_UNLOCK(sc);
return (error);
}
static int
axp81x_gpio_pin_set(device_t dev, uint32_t pin, unsigned int val)
{
struct axp81x_softc *sc;
uint8_t data, func;
int error;
if (pin >= nitems(axp81x_pins))
return (EINVAL);
sc = device_get_softc(dev);
AXP_LOCK(sc);
error = axp81x_read(dev, axp81x_pins[pin].ctrl_reg, &data, 1);
if (error == 0) {
func = (data & AXP_GPIO_FUNC) >> AXP_GPIO_FUNC_SHIFT;
switch (func) {
case AXP_GPIO_FUNC_DRVLO:
case AXP_GPIO_FUNC_DRVHI:
data &= ~AXP_GPIO_FUNC;
data |= (val << AXP_GPIO_FUNC_SHIFT);
break;
default:
error = EIO;
break;
}
}
if (error == 0)
error = axp81x_write(dev, axp81x_pins[pin].ctrl_reg, data);
AXP_UNLOCK(sc);
return (error);
}
static int
axp81x_gpio_pin_toggle(device_t dev, uint32_t pin)
{
struct axp81x_softc *sc;
uint8_t data, func;
int error;
if (pin >= nitems(axp81x_pins))
return (EINVAL);
sc = device_get_softc(dev);
AXP_LOCK(sc);
error = axp81x_read(dev, axp81x_pins[pin].ctrl_reg, &data, 1);
if (error == 0) {
func = (data & AXP_GPIO_FUNC) >> AXP_GPIO_FUNC_SHIFT;
switch (func) {
case AXP_GPIO_FUNC_DRVLO:
data &= ~AXP_GPIO_FUNC;
data |= (AXP_GPIO_FUNC_DRVHI << AXP_GPIO_FUNC_SHIFT);
break;
case AXP_GPIO_FUNC_DRVHI:
data &= ~AXP_GPIO_FUNC;
data |= (AXP_GPIO_FUNC_DRVLO << AXP_GPIO_FUNC_SHIFT);
break;
default:
error = EIO;
break;
}
}
if (error == 0)
error = axp81x_write(dev, axp81x_pins[pin].ctrl_reg, data);
AXP_UNLOCK(sc);
return (error);
}
static int
axp81x_gpio_map_gpios(device_t bus, phandle_t dev, phandle_t gparent,
int gcells, pcell_t *gpios, uint32_t *pin, uint32_t *flags)
{
if (gpios[0] >= nitems(axp81x_pins))
return (EINVAL);
*pin = gpios[0];
*flags = gpios[1];
return (0);
}
static phandle_t
axp81x_get_node(device_t dev, device_t bus)
{
return (ofw_bus_get_node(dev));
}
static struct axp81x_reg_sc *
axp81x_reg_attach(device_t dev, phandle_t node,
struct axp81x_regdef *def)
{
struct axp81x_reg_sc *reg_sc;
struct regnode_init_def initdef;
struct regnode *regnode;
memset(&initdef, 0, sizeof(initdef));
regulator_parse_ofw_stdparam(dev, node, &initdef);
if (initdef.std_param.min_uvolt == 0)
initdef.std_param.min_uvolt = def->voltage_min * 1000;
if (initdef.std_param.max_uvolt == 0)
initdef.std_param.max_uvolt = def->voltage_max * 1000;
initdef.id = def->id;
initdef.ofw_node = node;
regnode = regnode_create(dev, &axp81x_regnode_class, &initdef);
if (regnode == NULL) {
device_printf(dev, "cannot create regulator\n");
return (NULL);
}
reg_sc = regnode_get_softc(regnode);
reg_sc->regnode = regnode;
reg_sc->base_dev = dev;
reg_sc->def = def;
reg_sc->xref = OF_xref_from_node(node);
reg_sc->param = regnode_get_stdparam(regnode);
regnode_register(regnode);
return (reg_sc);
}
static int
axp81x_regdev_map(device_t dev, phandle_t xref, int ncells, pcell_t *cells,
intptr_t *num)
{
struct axp81x_softc *sc;
int i;
sc = device_get_softc(dev);
for (i = 0; i < sc->nregs; i++) {
if (sc->regs[i] == NULL)
continue;
if (sc->regs[i]->xref == xref) {
*num = sc->regs[i]->def->id;
return (0);
}
}
return (ENXIO);
}
static int
axp81x_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
return (ENXIO);
device_set_desc(dev, "X-Powers AXP81x Power Management Unit");
return (BUS_PROBE_DEFAULT);
}
static int
axp81x_attach(device_t dev)
{
struct axp81x_softc *sc;
struct axp81x_reg_sc *reg;
uint8_t chip_id;
phandle_t rnode, child;
int error, i;
sc = device_get_softc(dev);
sc->addr = iicbus_get_addr(dev);
mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF);
error = bus_alloc_resources(dev, axp81x_spec, &sc->res);
if (error != 0) {
device_printf(dev, "cannot allocate resources for device\n");
return (error);
}
if (bootverbose) {
axp81x_read(dev, AXP_ICTYPE, &chip_id, 1);
device_printf(dev, "chip ID 0x%02x\n", chip_id);
}
sc->nregs = nitems(axp81x_regdefs);
sc->regs = malloc(sizeof(struct axp81x_reg_sc *) * sc->nregs,
M_AXP81X_REG, M_WAITOK | M_ZERO);
/* Attach known regulators that exist in the DT */
rnode = ofw_bus_find_child(ofw_bus_get_node(dev), "regulators");
if (rnode > 0) {
for (i = 0; i < sc->nregs; i++) {
child = ofw_bus_find_child(rnode,
axp81x_regdefs[i].name);
if (child == 0)
continue;
reg = axp81x_reg_attach(dev, child, &axp81x_regdefs[i]);
if (reg == NULL) {
device_printf(dev,
"cannot attach regulator %s\n",
axp81x_regdefs[i].name);
return (ENXIO);
}
sc->regs[i] = reg;
}
}
/* Enable IRQ on short power key press */
axp81x_write(dev, AXP_IRQEN1, 0);
axp81x_write(dev, AXP_IRQEN2, 0);
axp81x_write(dev, AXP_IRQEN3, 0);
axp81x_write(dev, AXP_IRQEN4, 0);
axp81x_write(dev, AXP_IRQEN5, AXP_IRQEN5_POKSIRQ);
axp81x_write(dev, AXP_IRQEN6, 0);
/* Install interrupt handler */
error = bus_setup_intr(dev, sc->res, INTR_TYPE_MISC | INTR_MPSAFE,
NULL, axp81x_intr, dev, &sc->ih);
if (error != 0) {
device_printf(dev, "cannot setup interrupt handler\n");
return (error);
}
EVENTHANDLER_REGISTER(shutdown_final, axp81x_shutdown, dev,
SHUTDOWN_PRI_LAST);
sc->gpiodev = gpiobus_attach_bus(dev);
return (0);
}
static device_method_t axp81x_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, axp81x_probe),
DEVMETHOD(device_attach, axp81x_attach),
/* GPIO interface */
DEVMETHOD(gpio_get_bus, axp81x_gpio_get_bus),
DEVMETHOD(gpio_pin_max, axp81x_gpio_pin_max),
DEVMETHOD(gpio_pin_getname, axp81x_gpio_pin_getname),
DEVMETHOD(gpio_pin_getcaps, axp81x_gpio_pin_getcaps),
DEVMETHOD(gpio_pin_getflags, axp81x_gpio_pin_getflags),
DEVMETHOD(gpio_pin_setflags, axp81x_gpio_pin_setflags),
DEVMETHOD(gpio_pin_get, axp81x_gpio_pin_get),
DEVMETHOD(gpio_pin_set, axp81x_gpio_pin_set),
DEVMETHOD(gpio_pin_toggle, axp81x_gpio_pin_toggle),
DEVMETHOD(gpio_map_gpios, axp81x_gpio_map_gpios),
/* Regdev interface */
DEVMETHOD(regdev_map, axp81x_regdev_map),
/* OFW bus interface */
DEVMETHOD(ofw_bus_get_node, axp81x_get_node),
DEVMETHOD_END
};
static driver_t axp81x_driver = {
"axp81x_pmu",
axp81x_methods,
sizeof(struct axp81x_softc),
};
static devclass_t axp81x_devclass;
extern devclass_t ofwgpiobus_devclass, gpioc_devclass;
extern driver_t ofw_gpiobus_driver, gpioc_driver;
EARLY_DRIVER_MODULE(axp81x, iicbus, axp81x_driver, axp81x_devclass, 0, 0,
BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LAST);
EARLY_DRIVER_MODULE(ofw_gpiobus, axp81x_pmu, ofw_gpiobus_driver,
ofwgpiobus_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LAST);
DRIVER_MODULE(gpioc, axp81x_pmu, gpioc_driver, gpioc_devclass, 0, 0);
MODULE_VERSION(axp81x, 1);
MODULE_DEPEND(axp81x, iicbus, 1, 1, 1);
Index: head/sys/arm/allwinner/clk/aw_pll.c
===================================================================
--- head/sys/arm/allwinner/clk/aw_pll.c (revision 327172)
+++ head/sys/arm/allwinner/clk/aw_pll.c (revision 327173)
@@ -1,1349 +1,1347 @@
/*-
* Copyright (c) 2016 Jared McNeill <jmcneill@invisible.ca>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Allwinner PLL clock
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <machine/bus.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/ofw/ofw_subr.h>
#include <dev/extres/clk/clk.h>
#include <arm/allwinner/aw_machdep.h>
#include "clkdev_if.h"
#define SUN4I_A10_PLL2_1X 0
#define SUN4I_A10_PLL2_2X 1
#define SUN4I_A10_PLL2_4X 2
#define SUN4I_A10_PLL2_8X 3
#define AW_PLL_ENABLE (1 << 31)
#define A10_PLL1_OUT_EXT_DIVP (0x3 << 16)
#define A10_PLL1_OUT_EXT_DIVP_SHIFT 16
#define A10_PLL1_FACTOR_N (0x1f << 8)
#define A10_PLL1_FACTOR_N_SHIFT 8
#define A10_PLL1_FACTOR_K (0x3 << 4)
#define A10_PLL1_FACTOR_K_SHIFT 4
#define A10_PLL1_FACTOR_M (0x3 << 0)
#define A10_PLL1_FACTOR_M_SHIFT 0
#define A10_PLL2_POST_DIV (0xf << 26)
#define A10_PLL2_POST_DIV_SHIFT 26
#define A10_PLL2_FACTOR_N (0x7f << 8)
#define A10_PLL2_FACTOR_N_SHIFT 8
#define A10_PLL2_PRE_DIV (0x1f << 0)
#define A10_PLL2_PRE_DIV_SHIFT 0
#define A10_PLL3_MODE_SEL (0x1 << 15)
#define A10_PLL3_MODE_SEL_FRACT (0 << 15)
#define A10_PLL3_MODE_SEL_INT (1 << 15)
#define A10_PLL3_FUNC_SET (0x1 << 14)
#define A10_PLL3_FUNC_SET_270MHZ (0 << 14)
#define A10_PLL3_FUNC_SET_297MHZ (1 << 14)
#define A10_PLL3_FACTOR_M (0x7f << 0)
#define A10_PLL3_FACTOR_M_SHIFT 0
#define A10_PLL3_REF_FREQ 3000000
#define A10_PLL5_OUT_EXT_DIVP (0x3 << 16)
#define A10_PLL5_OUT_EXT_DIVP_SHIFT 16
#define A10_PLL5_FACTOR_N (0x1f << 8)
#define A10_PLL5_FACTOR_N_SHIFT 8
#define A10_PLL5_FACTOR_K (0x3 << 4)
#define A10_PLL5_FACTOR_K_SHIFT 4
#define A10_PLL5_FACTOR_M1 (0x3 << 2)
#define A10_PLL5_FACTOR_M1_SHIFT 2
#define A10_PLL5_FACTOR_M (0x3 << 0)
#define A10_PLL5_FACTOR_M_SHIFT 0
#define A10_PLL6_BYPASS_EN (1 << 30)
#define A10_PLL6_SATA_CLK_EN (1 << 14)
#define A10_PLL6_FACTOR_N (0x1f << 8)
#define A10_PLL6_FACTOR_N_SHIFT 8
#define A10_PLL6_FACTOR_K (0x3 << 4)
#define A10_PLL6_FACTOR_K_SHIFT 4
#define A10_PLL6_FACTOR_M (0x3 << 0)
#define A10_PLL6_FACTOR_M_SHIFT 0
#define A10_PLL2_POST_DIV (0xf << 26)
#define A13_PLL2_POST_DIV (0xf << 26)
#define A13_PLL2_POST_DIV_SHIFT 26
#define A13_PLL2_FACTOR_N (0x7f << 8)
#define A13_PLL2_FACTOR_N_SHIFT 8
#define A13_PLL2_PRE_DIV (0x1f << 0)
#define A13_PLL2_PRE_DIV_SHIFT 0
#define A23_PLL1_FACTOR_P (0x3 << 16)
#define A23_PLL1_FACTOR_P_SHIFT 16
#define A23_PLL1_FACTOR_N (0x1f << 8)
#define A23_PLL1_FACTOR_N_SHIFT 8
#define A23_PLL1_FACTOR_K (0x3 << 4)
#define A23_PLL1_FACTOR_K_SHIFT 4
#define A23_PLL1_FACTOR_M (0x3 << 0)
#define A23_PLL1_FACTOR_M_SHIFT 0
#define A31_PLL1_LOCK (1 << 28)
#define A31_PLL1_CPU_SIGMA_DELTA_EN (1 << 24)
#define A31_PLL1_FACTOR_N (0x1f << 8)
#define A31_PLL1_FACTOR_N_SHIFT 8
#define A31_PLL1_FACTOR_K (0x3 << 4)
#define A31_PLL1_FACTOR_K_SHIFT 4
#define A31_PLL1_FACTOR_M (0x3 << 0)
#define A31_PLL1_FACTOR_M_SHIFT 0
#define A31_PLL6_LOCK (1 << 28)
#define A31_PLL6_BYPASS_EN (1 << 25)
#define A31_PLL6_CLK_OUT_EN (1 << 24)
#define A31_PLL6_24M_OUT_EN (1 << 18)
#define A31_PLL6_24M_POST_DIV (0x3 << 16)
#define A31_PLL6_24M_POST_DIV_SHIFT 16
#define A31_PLL6_FACTOR_N (0x1f << 8)
#define A31_PLL6_FACTOR_N_SHIFT 8
#define A31_PLL6_FACTOR_K (0x3 << 4)
#define A31_PLL6_FACTOR_K_SHIFT 4
#define A31_PLL6_DEFAULT_N 0x18
#define A31_PLL6_DEFAULT_K 0x1
#define A31_PLL6_TIMEOUT 10
#define A64_PLLHSIC_LOCK (1 << 28)
#define A64_PLLHSIC_FRAC_CLK_OUT (1 << 25)
#define A64_PLLHSIC_PLL_MODE_SEL (1 << 24)
#define A64_PLLHSIC_PLL_SDM_EN (1 << 20)
#define A64_PLLHSIC_FACTOR_N (0x7f << 8)
#define A64_PLLHSIC_FACTOR_N_SHIFT 8
#define A64_PLLHSIC_PRE_DIV_M (0xf << 0)
#define A64_PLLHSIC_PRE_DIV_M_SHIFT 0
#define A80_PLL4_CLK_OUT_EN (1 << 20)
#define A80_PLL4_PLL_DIV2 (1 << 18)
#define A80_PLL4_PLL_DIV1 (1 << 16)
#define A80_PLL4_FACTOR_N (0xff << 8)
#define A80_PLL4_FACTOR_N_SHIFT 8
#define A83T_PLLCPUX_LOCK_TIME (0x7 << 24)
#define A83T_PLLCPUX_LOCK_TIME_SHIFT 24
#define A83T_PLLCPUX_CLOCK_OUTPUT_DIS (1 << 20)
#define A83T_PLLCPUX_OUT_EXT_DIVP (1 << 16)
#define A83T_PLLCPUX_FACTOR_N (0xff << 8)
#define A83T_PLLCPUX_FACTOR_N_SHIFT 8
#define A83T_PLLCPUX_FACTOR_N_MIN 12
#define A83T_PLLCPUX_FACTOR_N_MAX 125
#define A83T_PLLCPUX_POSTDIV_M (0x3 << 0)
#define A83T_PLLCPUX_POSTDIV_M_SHIFT 0
#define H3_PLL2_LOCK (1 << 28)
#define H3_PLL2_SDM_EN (1 << 24)
#define H3_PLL2_POST_DIV (0xf << 16)
#define H3_PLL2_POST_DIV_SHIFT 16
#define H3_PLL2_FACTOR_N (0x7f << 8)
#define H3_PLL2_FACTOR_N_SHIFT 8
#define H3_PLL2_PRE_DIV (0x1f << 0)
#define H3_PLL2_PRE_DIV_SHIFT 0
#define CLKID_A10_PLL5_DDR 0
#define CLKID_A10_PLL5_OTHER 1
#define CLKID_A10_PLL6_SATA 0
#define CLKID_A10_PLL6_OTHER 1
#define CLKID_A10_PLL6 2
#define CLKID_A10_PLL6_DIV_4 3
#define CLKID_A31_PLL6 0
#define CLKID_A31_PLL6_X2 1
struct aw_pll_factor {
unsigned int n;
unsigned int k;
unsigned int m;
unsigned int p;
uint64_t freq;
};
#define PLLFACTOR(_n, _k, _m, _p, _freq) \
{ .n = (_n), .k = (_k), .m = (_m), .p = (_p), .freq = (_freq) }
static struct aw_pll_factor aw_a10_pll1_factors[] = {
PLLFACTOR(6, 0, 0, 0, 144000000),
PLLFACTOR(12, 0, 0, 0, 312000000),
PLLFACTOR(21, 0, 0, 0, 528000000),
PLLFACTOR(29, 0, 0, 0, 720000000),
PLLFACTOR(18, 1, 0, 0, 864000000),
PLLFACTOR(19, 1, 0, 0, 912000000),
PLLFACTOR(20, 1, 0, 0, 960000000),
};
static struct aw_pll_factor aw_a23_pll1_factors[] = {
PLLFACTOR(9, 0, 0, 2, 60000000),
PLLFACTOR(10, 0, 0, 2, 66000000),
PLLFACTOR(11, 0, 0, 2, 72000000),
PLLFACTOR(12, 0, 0, 2, 78000000),
PLLFACTOR(13, 0, 0, 2, 84000000),
PLLFACTOR(14, 0, 0, 2, 90000000),
PLLFACTOR(15, 0, 0, 2, 96000000),
PLLFACTOR(16, 0, 0, 2, 102000000),
PLLFACTOR(17, 0, 0, 2, 108000000),
PLLFACTOR(18, 0, 0, 2, 114000000),
PLLFACTOR(9, 0, 0, 1, 120000000),
PLLFACTOR(10, 0, 0, 1, 132000000),
PLLFACTOR(11, 0, 0, 1, 144000000),
PLLFACTOR(12, 0, 0, 1, 156000000),
PLLFACTOR(13, 0, 0, 1, 168000000),
PLLFACTOR(14, 0, 0, 1, 180000000),
PLLFACTOR(15, 0, 0, 1, 192000000),
PLLFACTOR(16, 0, 0, 1, 204000000),
PLLFACTOR(17, 0, 0, 1, 216000000),
PLLFACTOR(18, 0, 0, 1, 228000000),
PLLFACTOR(9, 0, 0, 0, 240000000),
PLLFACTOR(10, 0, 0, 0, 264000000),
PLLFACTOR(11, 0, 0, 0, 288000000),
PLLFACTOR(12, 0, 0, 0, 312000000),
PLLFACTOR(13, 0, 0, 0, 336000000),
PLLFACTOR(14, 0, 0, 0, 360000000),
PLLFACTOR(15, 0, 0, 0, 384000000),
PLLFACTOR(16, 0, 0, 0, 408000000),
PLLFACTOR(17, 0, 0, 0, 432000000),
PLLFACTOR(18, 0, 0, 0, 456000000),
PLLFACTOR(19, 0, 0, 0, 480000000),
PLLFACTOR(20, 0, 0, 0, 504000000),
PLLFACTOR(21, 0, 0, 0, 528000000),
PLLFACTOR(22, 0, 0, 0, 552000000),
PLLFACTOR(23, 0, 0, 0, 576000000),
PLLFACTOR(24, 0, 0, 0, 600000000),
PLLFACTOR(25, 0, 0, 0, 624000000),
PLLFACTOR(26, 0, 0, 0, 648000000),
PLLFACTOR(27, 0, 0, 0, 672000000),
PLLFACTOR(28, 0, 0, 0, 696000000),
PLLFACTOR(29, 0, 0, 0, 720000000),
PLLFACTOR(15, 1, 0, 0, 768000000),
PLLFACTOR(10, 2, 0, 0, 792000000),
PLLFACTOR(16, 1, 0, 0, 816000000),
PLLFACTOR(17, 1, 0, 0, 864000000),
PLLFACTOR(18, 1, 0, 0, 912000000),
PLLFACTOR(12, 2, 0, 0, 936000000),
PLLFACTOR(19, 1, 0, 0, 960000000),
PLLFACTOR(20, 1, 0, 0, 1008000000),
PLLFACTOR(21, 1, 0, 0, 1056000000),
PLLFACTOR(14, 2, 0, 0, 1080000000),
PLLFACTOR(22, 1, 0, 0, 1104000000),
PLLFACTOR(23, 1, 0, 0, 1152000000),
PLLFACTOR(24, 1, 0, 0, 1200000000),
PLLFACTOR(16, 2, 0, 0, 1224000000),
PLLFACTOR(25, 1, 0, 0, 1248000000),
PLLFACTOR(26, 1, 0, 0, 1296000000),
PLLFACTOR(27, 1, 0, 0, 1344000000),
PLLFACTOR(18, 2, 0, 0, 1368000000),
PLLFACTOR(28, 1, 0, 0, 1392000000),
PLLFACTOR(29, 1, 0, 0, 1440000000),
PLLFACTOR(20, 2, 0, 0, 1512000000),
PLLFACTOR(15, 3, 0, 0, 1536000000),
PLLFACTOR(21, 2, 0, 0, 1584000000),
PLLFACTOR(16, 3, 0, 0, 1632000000),
PLLFACTOR(22, 2, 0, 0, 1656000000),
PLLFACTOR(23, 2, 0, 0, 1728000000),
PLLFACTOR(24, 2, 0, 0, 1800000000),
PLLFACTOR(18, 3, 0, 0, 1824000000),
PLLFACTOR(25, 2, 0, 0, 1872000000),
};
static struct aw_pll_factor aw_h3_pll2_factors[] = {
PLLFACTOR(13, 0, 0, 13, 24576000),
PLLFACTOR(6, 0, 0, 7, 22579200),
};
enum aw_pll_type {
AWPLL_A10_PLL1 = 1,
AWPLL_A10_PLL2,
AWPLL_A10_PLL3,
AWPLL_A10_PLL5,
AWPLL_A10_PLL6,
AWPLL_A13_PLL2,
AWPLL_A23_PLL1,
AWPLL_A31_PLL1,
AWPLL_A31_PLL6,
AWPLL_A64_PLLHSIC,
AWPLL_A80_PLL4,
AWPLL_A83T_PLLCPUX,
AWPLL_H3_PLL1,
AWPLL_H3_PLL2,
};
struct aw_pll_sc {
enum aw_pll_type type;
device_t clkdev;
bus_addr_t reg;
int id;
};
struct aw_pll_funcs {
int (*recalc)(struct aw_pll_sc *, uint64_t *);
int (*set_freq)(struct aw_pll_sc *, uint64_t, uint64_t *, int);
int (*init)(device_t, bus_addr_t, struct clknode_init_def *);
};
#define PLL_READ(sc, val) CLKDEV_READ_4((sc)->clkdev, (sc)->reg, (val))
#define PLL_WRITE(sc, val) CLKDEV_WRITE_4((sc)->clkdev, (sc)->reg, (val))
#define DEVICE_LOCK(sc) CLKDEV_DEVICE_LOCK((sc)->clkdev)
#define DEVICE_UNLOCK(sc) CLKDEV_DEVICE_UNLOCK((sc)->clkdev)
static int
a10_pll1_set_freq(struct aw_pll_sc *sc, uint64_t fin, uint64_t *fout,
int flags)
{
struct aw_pll_factor *f;
uint32_t val;
int n;
f = NULL;
for (n = 0; n < nitems(aw_a10_pll1_factors); n++) {
if (aw_a10_pll1_factors[n].freq == *fout) {
f = &aw_a10_pll1_factors[n];
break;
}
}
if (f == NULL)
return (EINVAL);
if ((flags & CLK_SET_DRYRUN) != 0)
return (0);
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
val &= ~(A10_PLL1_FACTOR_N|A10_PLL1_FACTOR_K|A10_PLL1_FACTOR_M|
A10_PLL1_OUT_EXT_DIVP);
val |= (f->p << A10_PLL1_OUT_EXT_DIVP_SHIFT);
val |= (f->n << A10_PLL1_FACTOR_N_SHIFT);
val |= (f->k << A10_PLL1_FACTOR_K_SHIFT);
val |= (f->m << A10_PLL1_FACTOR_M_SHIFT);
PLL_WRITE(sc, val);
DEVICE_UNLOCK(sc);
return (0);
}
static int
a10_pll1_recalc(struct aw_pll_sc *sc, uint64_t *freq)
{
uint32_t val, m, n, k, p;
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
DEVICE_UNLOCK(sc);
p = 1 << ((val & A10_PLL1_OUT_EXT_DIVP) >> A10_PLL1_OUT_EXT_DIVP_SHIFT);
m = ((val & A10_PLL1_FACTOR_M) >> A10_PLL1_FACTOR_M_SHIFT) + 1;
k = ((val & A10_PLL1_FACTOR_K) >> A10_PLL1_FACTOR_K_SHIFT) + 1;
n = (val & A10_PLL1_FACTOR_N) >> A10_PLL1_FACTOR_N_SHIFT;
if (n == 0)
n = 1;
*freq = (*freq * n * k) / (m * p);
return (0);
}
static int
a10_pll2_recalc(struct aw_pll_sc *sc, uint64_t *freq)
{
uint32_t val, post_div, n, pre_div;
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
DEVICE_UNLOCK(sc);
post_div = (val & A10_PLL2_POST_DIV) >> A10_PLL2_POST_DIV_SHIFT;
if (post_div == 0)
post_div = 1;
n = (val & A10_PLL2_FACTOR_N) >> A10_PLL2_FACTOR_N_SHIFT;
if (n == 0)
n = 1;
pre_div = (val & A10_PLL2_PRE_DIV) >> A10_PLL2_PRE_DIV_SHIFT;
if (pre_div == 0)
pre_div = 1;
switch (sc->id) {
case SUN4I_A10_PLL2_1X:
*freq = (*freq * 2 * n) / pre_div / post_div / 2;
break;
case SUN4I_A10_PLL2_2X:
*freq = (*freq * 2 * n) / pre_div / 4;
break;
case SUN4I_A10_PLL2_4X:
*freq = (*freq * 2 * n) / pre_div / 2;
break;
case SUN4I_A10_PLL2_8X:
*freq = (*freq * 2 * n) / pre_div;
break;
default:
return (EINVAL);
}
return (0);
}
static int
a10_pll2_set_freq(struct aw_pll_sc *sc, uint64_t fin, uint64_t *fout,
int flags)
{
uint32_t val, post_div, n, pre_div;
if (sc->id != SUN4I_A10_PLL2_1X)
return (ENXIO);
/*
* Audio Codec needs PLL2-1X to be either 24576000 or 22579200.
*
* PLL2-1X output frequency is (48MHz * n) / pre_div / post_div / 2.
* To get as close as possible to the desired rate, we use a
* pre-divider of 21 and a post-divider of 4. With these values,
* a multiplier of 86 or 79 gets us close to the target rates.
*/
if (*fout != 24576000 && *fout != 22579200)
return (EINVAL);
pre_div = 21;
post_div = 4;
n = (*fout * pre_div * post_div * 2) / (2 * fin);
if ((flags & CLK_SET_DRYRUN) != 0)
return (0);
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
val &= ~(A10_PLL2_POST_DIV | A10_PLL2_FACTOR_N | A10_PLL2_PRE_DIV);
val |= (post_div << A10_PLL2_POST_DIV_SHIFT);
val |= (n << A10_PLL2_FACTOR_N_SHIFT);
val |= (pre_div << A10_PLL2_PRE_DIV_SHIFT);
PLL_WRITE(sc, val);
DEVICE_UNLOCK(sc);
return (0);
}
static int
a10_pll3_recalc(struct aw_pll_sc *sc, uint64_t *freq)
{
uint32_t val, m;
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
DEVICE_UNLOCK(sc);
if ((val & A10_PLL3_MODE_SEL) == A10_PLL3_MODE_SEL_INT) {
/* In integer mode, output is 3MHz * m */
m = (val & A10_PLL3_FACTOR_M) >> A10_PLL3_FACTOR_M_SHIFT;
*freq = A10_PLL3_REF_FREQ * m;
} else {
/* In fractional mode, output is either 270MHz or 297MHz */
if ((val & A10_PLL3_FUNC_SET) == A10_PLL3_FUNC_SET_270MHZ)
*freq = 270000000;
else
*freq = 297000000;
}
return (0);
}
static int
a10_pll3_set_freq(struct aw_pll_sc *sc, uint64_t fin, uint64_t *fout,
int flags)
{
uint32_t val, m, mode, func;
if (*fout == 297000000) {
func = A10_PLL3_FUNC_SET_297MHZ;
mode = A10_PLL3_MODE_SEL_FRACT;
m = 0;
} else if (*fout == 270000000) {
func = A10_PLL3_FUNC_SET_270MHZ;
mode = A10_PLL3_MODE_SEL_FRACT;
m = 0;
} else {
mode = A10_PLL3_MODE_SEL_INT;
func = 0;
m = *fout / A10_PLL3_REF_FREQ;
*fout = m * A10_PLL3_REF_FREQ;
}
if ((flags & CLK_SET_DRYRUN) != 0)
return (0);
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
val &= ~(A10_PLL3_MODE_SEL | A10_PLL3_FUNC_SET | A10_PLL3_FACTOR_M);
val |= mode;
val |= func;
val |= (m << A10_PLL3_FACTOR_M_SHIFT);
PLL_WRITE(sc, val);
DEVICE_UNLOCK(sc);
return (0);
}
static int
a10_pll3_init(device_t dev, bus_addr_t reg, struct clknode_init_def *def)
{
uint32_t val;
/* Allow changing PLL frequency while enabled */
def->flags = CLK_NODE_GLITCH_FREE;
/* Set PLL to 297MHz */
CLKDEV_DEVICE_LOCK(dev);
CLKDEV_READ_4(dev, reg, &val);
val &= ~(A10_PLL3_MODE_SEL | A10_PLL3_FUNC_SET | A10_PLL3_FACTOR_M);
val |= A10_PLL3_MODE_SEL_FRACT;
val |= A10_PLL3_FUNC_SET_297MHZ;
CLKDEV_WRITE_4(dev, reg, val);
CLKDEV_DEVICE_UNLOCK(dev);
return (0);
}
static int
a10_pll5_recalc(struct aw_pll_sc *sc, uint64_t *freq)
{
uint32_t val, m, n, k, p;
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
DEVICE_UNLOCK(sc);
p = 1 << ((val & A10_PLL5_OUT_EXT_DIVP) >> A10_PLL5_OUT_EXT_DIVP_SHIFT);
m = ((val & A10_PLL5_FACTOR_M) >> A10_PLL5_FACTOR_M_SHIFT) + 1;
k = ((val & A10_PLL5_FACTOR_K) >> A10_PLL5_FACTOR_K_SHIFT) + 1;
n = (val & A10_PLL5_FACTOR_N) >> A10_PLL5_FACTOR_N_SHIFT;
if (n == 0)
return (ENXIO);
switch (sc->id) {
case CLKID_A10_PLL5_DDR:
*freq = (*freq * n * k) / m;
break;
case CLKID_A10_PLL5_OTHER:
*freq = (*freq * n * k) / p;
break;
default:
return (ENXIO);
}
return (0);
}
static int
a10_pll6_init(device_t dev, bus_addr_t reg, struct clknode_init_def *def)
{
uint32_t val, m, n, k;
/*
* SATA needs PLL6 to be a 100MHz clock.
*
* The SATA output frequency is (24MHz * n * k) / m / 6.
* To get to 100MHz, k & m must be equal and n must be 25.
*/
m = k = 0;
n = 25;
CLKDEV_DEVICE_LOCK(dev);
CLKDEV_READ_4(dev, reg, &val);
val &= ~(A10_PLL6_FACTOR_N | A10_PLL6_FACTOR_K | A10_PLL6_FACTOR_M);
val &= ~A10_PLL6_BYPASS_EN;
val |= A10_PLL6_SATA_CLK_EN;
val |= (n << A10_PLL6_FACTOR_N_SHIFT);
val |= (k << A10_PLL6_FACTOR_K_SHIFT);
val |= (m << A10_PLL6_FACTOR_M_SHIFT);
CLKDEV_WRITE_4(dev, reg, val);
CLKDEV_DEVICE_UNLOCK(dev);
return (0);
}
static int
a10_pll6_recalc(struct aw_pll_sc *sc, uint64_t *freq)
{
uint32_t val, m, k, n;
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
DEVICE_UNLOCK(sc);
m = ((val & A10_PLL6_FACTOR_M) >> A10_PLL6_FACTOR_M_SHIFT) + 1;
k = ((val & A10_PLL6_FACTOR_K) >> A10_PLL6_FACTOR_K_SHIFT) + 1;
n = (val & A10_PLL6_FACTOR_N) >> A10_PLL6_FACTOR_N_SHIFT;
if (n == 0)
return (ENXIO);
switch (sc->id) {
case CLKID_A10_PLL6_SATA:
*freq = (*freq * n * k) / m / 6;
break;
case CLKID_A10_PLL6_OTHER:
*freq = (*freq * n * k) / 2;
break;
case CLKID_A10_PLL6:
*freq = (*freq * n * k);
break;
case CLKID_A10_PLL6_DIV_4:
*freq = (*freq * n * k) / 4;
break;
default:
return (ENXIO);
}
return (0);
}
static int
a10_pll6_set_freq(struct aw_pll_sc *sc, uint64_t fin, uint64_t *fout,
int flags)
{
if (sc->id != CLKID_A10_PLL6_SATA)
return (ENXIO);
/* PLL6 SATA output has been set to 100MHz in a10_pll6_init */
if (*fout != 100000000)
return (ERANGE);
return (0);
}
static int
a13_pll2_recalc(struct aw_pll_sc *sc, uint64_t *freq)
{
uint32_t val, post_div, n, pre_div;
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
DEVICE_UNLOCK(sc);
post_div = ((val & A13_PLL2_POST_DIV) >> A13_PLL2_POST_DIV_SHIFT) + 1;
if (post_div == 0)
post_div = 1;
n = (val & A13_PLL2_FACTOR_N) >> A13_PLL2_FACTOR_N_SHIFT;
if (n == 0)
n = 1;
pre_div = ((val & A13_PLL2_PRE_DIV) >> A13_PLL2_PRE_DIV_SHIFT) + 1;
if (pre_div == 0)
pre_div = 1;
switch (sc->id) {
case SUN4I_A10_PLL2_1X:
*freq = (*freq * 2 * n) / pre_div / post_div / 2;
break;
case SUN4I_A10_PLL2_2X:
*freq = (*freq * 2 * n) / pre_div / 4;
break;
case SUN4I_A10_PLL2_4X:
*freq = (*freq * 2 * n) / pre_div / 2;
break;
case SUN4I_A10_PLL2_8X:
*freq = (*freq * 2 * n) / pre_div;
break;
default:
return (EINVAL);
}
return (0);
}
static int
a13_pll2_set_freq(struct aw_pll_sc *sc, uint64_t fin, uint64_t *fout,
int flags)
{
uint32_t val, post_div, n, pre_div;
if (sc->id != SUN4I_A10_PLL2_1X)
return (ENXIO);
/*
* Audio Codec needs PLL2-1X to be either 24576000 or 22579200.
*
* PLL2-1X output frequency is (48MHz * n) / pre_div / post_div / 2.
* To get as close as possible to the desired rate, we use a
* pre-divider of 21 and a post-divider of 4. With these values,
* a multiplier of 86 or 79 gets us close to the target rates.
*/
if (*fout != 24576000 && *fout != 22579200)
return (EINVAL);
pre_div = 21;
post_div = 4;
n = (*fout * pre_div * post_div * 2) / (2 * fin);
if ((flags & CLK_SET_DRYRUN) != 0)
return (0);
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
val &= ~(A13_PLL2_POST_DIV | A13_PLL2_FACTOR_N | A13_PLL2_PRE_DIV);
val |= ((post_div - 1) << A13_PLL2_POST_DIV_SHIFT);
val |= (n << A13_PLL2_FACTOR_N_SHIFT);
val |= ((pre_div - 1) << A13_PLL2_PRE_DIV_SHIFT);
PLL_WRITE(sc, val);
DEVICE_UNLOCK(sc);
return (0);
}
static int
h3_pll2_recalc(struct aw_pll_sc *sc, uint64_t *freq)
{
uint32_t val, p, n, m;
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
DEVICE_UNLOCK(sc);
p = ((val & H3_PLL2_POST_DIV) >> H3_PLL2_POST_DIV_SHIFT) + 1;
n = ((val & H3_PLL2_FACTOR_N) >> H3_PLL2_FACTOR_N_SHIFT) + 1;
m = ((val & H3_PLL2_PRE_DIV) >> H3_PLL2_PRE_DIV_SHIFT) + 1;
switch (sc->id) {
case SUN4I_A10_PLL2_1X:
*freq = (*freq * n) / (m * p);
break;
case SUN4I_A10_PLL2_2X:
*freq = (*freq * 2 * n) / m / 4;
break;
case SUN4I_A10_PLL2_4X:
*freq = (*freq * 2 * n) / m / 2;
break;
case SUN4I_A10_PLL2_8X:
*freq = (*freq * 2 * n) / m;
break;
default:
return (EINVAL);
}
return (0);
}
static int
h3_pll2_set_freq(struct aw_pll_sc *sc, uint64_t fin, uint64_t *fout,
int flags)
{
struct aw_pll_factor *f;
uint32_t val;
int n, error, retry;
if (sc->id != SUN4I_A10_PLL2_1X)
return (ENXIO);
f = NULL;
for (n = 0; n < nitems(aw_h3_pll2_factors); n++) {
if (aw_h3_pll2_factors[n].freq == *fout) {
f = &aw_h3_pll2_factors[n];
break;
}
}
if (f == NULL)
return (EINVAL);
if ((flags & CLK_SET_DRYRUN) != 0)
return (0);
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
val &= ~(H3_PLL2_POST_DIV|H3_PLL2_FACTOR_N|H3_PLL2_PRE_DIV);
val |= (f->p << H3_PLL2_POST_DIV_SHIFT);
val |= (f->n << H3_PLL2_FACTOR_N_SHIFT);
val |= (f->m << H3_PLL2_PRE_DIV_SHIFT);
val |= AW_PLL_ENABLE;
PLL_WRITE(sc, val);
/* Wait for lock */
error = 0;
for (retry = 0; retry < 1000; retry++) {
PLL_READ(sc, &val);
if ((val & H3_PLL2_LOCK) != 0)
break;
DELAY(100);
}
if (retry == 0)
error = ETIMEDOUT;
DEVICE_UNLOCK(sc);
return (error);
}
static int
a23_pll1_set_freq(struct aw_pll_sc *sc, uint64_t fin, uint64_t *fout,
int flags)
{
struct aw_pll_factor *f;
uint32_t val;
int n;
f = NULL;
for (n = 0; n < nitems(aw_a23_pll1_factors); n++) {
if (aw_a23_pll1_factors[n].freq == *fout) {
f = &aw_a23_pll1_factors[n];
break;
}
}
if (f == NULL)
return (EINVAL);
if ((flags & CLK_SET_DRYRUN) != 0)
return (0);
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
val &= ~(A23_PLL1_FACTOR_N|A23_PLL1_FACTOR_K|A23_PLL1_FACTOR_M|
A23_PLL1_FACTOR_P);
val |= (f->n << A23_PLL1_FACTOR_N_SHIFT);
val |= (f->k << A23_PLL1_FACTOR_K_SHIFT);
val |= (f->m << A23_PLL1_FACTOR_M_SHIFT);
val |= (f->p << A23_PLL1_FACTOR_P_SHIFT);
PLL_WRITE(sc, val);
DEVICE_UNLOCK(sc);
return (0);
}
static int
a23_pll1_recalc(struct aw_pll_sc *sc, uint64_t *freq)
{
uint32_t val, m, n, k, p;
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
DEVICE_UNLOCK(sc);
m = ((val & A23_PLL1_FACTOR_M) >> A23_PLL1_FACTOR_M_SHIFT) + 1;
k = ((val & A23_PLL1_FACTOR_K) >> A23_PLL1_FACTOR_K_SHIFT) + 1;
n = ((val & A23_PLL1_FACTOR_N) >> A23_PLL1_FACTOR_N_SHIFT) + 1;
p = ((val & A23_PLL1_FACTOR_P) >> A23_PLL1_FACTOR_P_SHIFT) + 1;
*freq = (*freq * n * k) / (m * p);
return (0);
}
static int
h3_pll1_set_freq(struct aw_pll_sc *sc, uint64_t fin, uint64_t *fout,
int flags)
{
struct aw_pll_factor *f;
- uint32_t val, n, k, m, p;
+ uint32_t val, m, p;
int i;
f = NULL;
for (i = 0; i < nitems(aw_a23_pll1_factors); i++) {
if (aw_a23_pll1_factors[i].freq == *fout) {
f = &aw_a23_pll1_factors[i];
break;
}
}
if (f == NULL)
return (EINVAL);
if ((flags & CLK_SET_DRYRUN) != 0)
return (0);
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
- n = (val & A23_PLL1_FACTOR_N) >> A23_PLL1_FACTOR_N_SHIFT;
- k = (val & A23_PLL1_FACTOR_K) >> A23_PLL1_FACTOR_K_SHIFT;
m = (val & A23_PLL1_FACTOR_M) >> A23_PLL1_FACTOR_M_SHIFT;
p = (val & A23_PLL1_FACTOR_P) >> A23_PLL1_FACTOR_P_SHIFT;
if (p < f->p) {
val &= ~A23_PLL1_FACTOR_P;
val |= (f->p << A23_PLL1_FACTOR_P_SHIFT);
PLL_WRITE(sc, val);
DELAY(2000);
}
if (m < f->m) {
val &= ~A23_PLL1_FACTOR_M;
val |= (f->m << A23_PLL1_FACTOR_M_SHIFT);
PLL_WRITE(sc, val);
DELAY(2000);
}
val &= ~(A23_PLL1_FACTOR_N|A23_PLL1_FACTOR_K);
val |= (f->n << A23_PLL1_FACTOR_N_SHIFT);
val |= (f->k << A23_PLL1_FACTOR_K_SHIFT);
PLL_WRITE(sc, val);
DELAY(2000);
if (m > f->m) {
val &= ~A23_PLL1_FACTOR_M;
val |= (f->m << A23_PLL1_FACTOR_M_SHIFT);
PLL_WRITE(sc, val);
DELAY(2000);
}
if (p > f->p) {
val &= ~A23_PLL1_FACTOR_P;
val |= (f->p << A23_PLL1_FACTOR_P_SHIFT);
PLL_WRITE(sc, val);
DELAY(2000);
}
DEVICE_UNLOCK(sc);
return (0);
}
static int
a31_pll1_recalc(struct aw_pll_sc *sc, uint64_t *freq)
{
uint32_t val, m, n, k;
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
DEVICE_UNLOCK(sc);
m = ((val & A31_PLL1_FACTOR_M) >> A31_PLL1_FACTOR_M_SHIFT) + 1;
k = ((val & A31_PLL1_FACTOR_K) >> A31_PLL1_FACTOR_K_SHIFT) + 1;
n = ((val & A31_PLL1_FACTOR_N) >> A31_PLL1_FACTOR_N_SHIFT) + 1;
*freq = (*freq * n * k) / m;
return (0);
}
static int
a31_pll6_init(device_t dev, bus_addr_t reg, struct clknode_init_def *def)
{
uint32_t val;
int retry;
if (def->id != CLKID_A31_PLL6)
return (0);
/*
* The datasheet recommends that PLL6 output should be fixed to
* 600MHz.
*/
CLKDEV_DEVICE_LOCK(dev);
CLKDEV_READ_4(dev, reg, &val);
val &= ~(A31_PLL6_FACTOR_N | A31_PLL6_FACTOR_K | A31_PLL6_BYPASS_EN);
val |= (A31_PLL6_DEFAULT_N << A31_PLL6_FACTOR_N_SHIFT);
val |= (A31_PLL6_DEFAULT_K << A31_PLL6_FACTOR_K_SHIFT);
val |= AW_PLL_ENABLE;
CLKDEV_WRITE_4(dev, reg, val);
/* Wait for PLL to become stable */
for (retry = A31_PLL6_TIMEOUT; retry > 0; retry--) {
CLKDEV_READ_4(dev, reg, &val);
if ((val & A31_PLL6_LOCK) == A31_PLL6_LOCK)
break;
DELAY(1);
}
CLKDEV_DEVICE_UNLOCK(dev);
return (0);
}
static int
a31_pll6_recalc(struct aw_pll_sc *sc, uint64_t *freq)
{
uint32_t val, k, n;
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
DEVICE_UNLOCK(sc);
k = ((val & A10_PLL6_FACTOR_K) >> A10_PLL6_FACTOR_K_SHIFT) + 1;
n = ((val & A10_PLL6_FACTOR_N) >> A10_PLL6_FACTOR_N_SHIFT) + 1;
switch (sc->id) {
case CLKID_A31_PLL6:
*freq = (*freq * n * k) / 2;
break;
case CLKID_A31_PLL6_X2:
*freq = *freq * n * k;
break;
default:
return (ENXIO);
}
return (0);
}
static int
a80_pll4_recalc(struct aw_pll_sc *sc, uint64_t *freq)
{
uint32_t val, n, div1, div2;
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
DEVICE_UNLOCK(sc);
n = (val & A80_PLL4_FACTOR_N) >> A80_PLL4_FACTOR_N_SHIFT;
div1 = (val & A80_PLL4_PLL_DIV1) == 0 ? 1 : 2;
div2 = (val & A80_PLL4_PLL_DIV2) == 0 ? 1 : 2;
*freq = (*freq * n) / div1 / div2;
return (0);
}
static int
a64_pllhsic_recalc(struct aw_pll_sc *sc, uint64_t *freq)
{
uint32_t val, n, m;
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
DEVICE_UNLOCK(sc);
n = ((val & A64_PLLHSIC_FACTOR_N) >> A64_PLLHSIC_FACTOR_N_SHIFT) + 1;
m = ((val & A64_PLLHSIC_PRE_DIV_M) >> A64_PLLHSIC_PRE_DIV_M_SHIFT) + 1;
*freq = (*freq * n) / m;
return (0);
}
static int
a64_pllhsic_init(device_t dev, bus_addr_t reg, struct clknode_init_def *def)
{
uint32_t val;
/*
* PLL_HSIC default is 480MHz, just enable it.
*/
CLKDEV_DEVICE_LOCK(dev);
CLKDEV_READ_4(dev, reg, &val);
val |= AW_PLL_ENABLE;
CLKDEV_WRITE_4(dev, reg, val);
CLKDEV_DEVICE_UNLOCK(dev);
return (0);
}
static int
a83t_pllcpux_recalc(struct aw_pll_sc *sc, uint64_t *freq)
{
uint32_t val, n, p;
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
DEVICE_UNLOCK(sc);
n = (val & A83T_PLLCPUX_FACTOR_N) >> A83T_PLLCPUX_FACTOR_N_SHIFT;
p = (val & A83T_PLLCPUX_OUT_EXT_DIVP) ? 4 : 1;
*freq = (*freq * n) / p;
return (0);
}
static int
a83t_pllcpux_set_freq(struct aw_pll_sc *sc, uint64_t fin, uint64_t *fout,
int flags)
{
uint32_t val;
u_int n;
n = *fout / fin;
if (n < A83T_PLLCPUX_FACTOR_N_MIN || n > A83T_PLLCPUX_FACTOR_N_MAX)
return (EINVAL);
if ((flags & CLK_SET_DRYRUN) != 0)
return (0);
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
val &= ~A83T_PLLCPUX_FACTOR_N;
val |= (n << A83T_PLLCPUX_FACTOR_N_SHIFT);
val &= ~A83T_PLLCPUX_CLOCK_OUTPUT_DIS;
PLL_WRITE(sc, val);
DEVICE_UNLOCK(sc);
return (0);
}
#define PLL(_type, _recalc, _set_freq, _init) \
[(_type)] = { \
.recalc = (_recalc), \
.set_freq = (_set_freq), \
.init = (_init) \
}
static struct aw_pll_funcs aw_pll_func[] = {
PLL(AWPLL_A10_PLL1, a10_pll1_recalc, a10_pll1_set_freq, NULL),
PLL(AWPLL_A10_PLL2, a10_pll2_recalc, a10_pll2_set_freq, NULL),
PLL(AWPLL_A10_PLL3, a10_pll3_recalc, a10_pll3_set_freq, a10_pll3_init),
PLL(AWPLL_A10_PLL5, a10_pll5_recalc, NULL, NULL),
PLL(AWPLL_A10_PLL6, a10_pll6_recalc, a10_pll6_set_freq, a10_pll6_init),
PLL(AWPLL_A13_PLL2, a13_pll2_recalc, a13_pll2_set_freq, NULL),
PLL(AWPLL_A23_PLL1, a23_pll1_recalc, a23_pll1_set_freq, NULL),
PLL(AWPLL_A31_PLL1, a31_pll1_recalc, NULL, NULL),
PLL(AWPLL_A31_PLL6, a31_pll6_recalc, NULL, a31_pll6_init),
PLL(AWPLL_A80_PLL4, a80_pll4_recalc, NULL, NULL),
PLL(AWPLL_A83T_PLLCPUX, a83t_pllcpux_recalc, a83t_pllcpux_set_freq, NULL),
PLL(AWPLL_A64_PLLHSIC, a64_pllhsic_recalc, NULL, a64_pllhsic_init),
PLL(AWPLL_H3_PLL1, a23_pll1_recalc, h3_pll1_set_freq, NULL),
PLL(AWPLL_H3_PLL2, h3_pll2_recalc, h3_pll2_set_freq, NULL),
};
static struct ofw_compat_data compat_data[] = {
{ "allwinner,sun4i-a10-pll1-clk", AWPLL_A10_PLL1 },
{ "allwinner,sun4i-a10-pll2-clk", AWPLL_A10_PLL2 },
{ "allwinner,sun4i-a10-pll3-clk", AWPLL_A10_PLL3 },
{ "allwinner,sun4i-a10-pll5-clk", AWPLL_A10_PLL5 },
{ "allwinner,sun4i-a10-pll6-clk", AWPLL_A10_PLL6 },
{ "allwinner,sun5i-a13-pll2-clk", AWPLL_A13_PLL2 },
{ "allwinner,sun6i-a31-pll1-clk", AWPLL_A31_PLL1 },
{ "allwinner,sun6i-a31-pll6-clk", AWPLL_A31_PLL6 },
{ "allwinner,sun8i-a23-pll1-clk", AWPLL_A23_PLL1 },
{ "allwinner,sun8i-a83t-pllcpux-clk", AWPLL_A83T_PLLCPUX },
{ "allwinner,sun8i-h3-pll1-clk", AWPLL_H3_PLL1 },
{ "allwinner,sun8i-h3-pll2-clk", AWPLL_H3_PLL2 },
{ "allwinner,sun9i-a80-pll4-clk", AWPLL_A80_PLL4 },
{ "allwinner,sun50i-a64-pllhsic-clk", AWPLL_A64_PLLHSIC },
{ NULL, 0 }
};
static int
aw_pll_init(struct clknode *clk, device_t dev)
{
clknode_init_parent_idx(clk, 0);
return (0);
}
static int
aw_pll_set_gate(struct clknode *clk, bool enable)
{
struct aw_pll_sc *sc;
uint32_t val;
sc = clknode_get_softc(clk);
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
if (enable)
val |= AW_PLL_ENABLE;
else
val &= ~AW_PLL_ENABLE;
PLL_WRITE(sc, val);
DEVICE_UNLOCK(sc);
return (0);
}
static int
aw_pll_recalc(struct clknode *clk, uint64_t *freq)
{
struct aw_pll_sc *sc;
sc = clknode_get_softc(clk);
if (aw_pll_func[sc->type].recalc == NULL)
return (ENXIO);
return (aw_pll_func[sc->type].recalc(sc, freq));
}
static int
aw_pll_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout,
int flags, int *stop)
{
struct aw_pll_sc *sc;
sc = clknode_get_softc(clk);
*stop = 1;
if (aw_pll_func[sc->type].set_freq == NULL)
return (ENXIO);
return (aw_pll_func[sc->type].set_freq(sc, fin, fout, flags));
}
static clknode_method_t aw_pll_clknode_methods[] = {
/* Device interface */
CLKNODEMETHOD(clknode_init, aw_pll_init),
CLKNODEMETHOD(clknode_set_gate, aw_pll_set_gate),
CLKNODEMETHOD(clknode_recalc_freq, aw_pll_recalc),
CLKNODEMETHOD(clknode_set_freq, aw_pll_set_freq),
CLKNODEMETHOD_END
};
DEFINE_CLASS_1(aw_pll_clknode, aw_pll_clknode_class, aw_pll_clknode_methods,
sizeof(struct aw_pll_sc), clknode_class);
static int
aw_pll_create(device_t dev, bus_addr_t paddr, struct clkdom *clkdom,
const char *pclkname, const char *clkname, int index)
{
enum aw_pll_type type;
struct clknode_init_def clkdef;
struct aw_pll_sc *sc;
struct clknode *clk;
int error;
type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
memset(&clkdef, 0, sizeof(clkdef));
clkdef.id = index;
clkdef.name = clkname;
if (pclkname != NULL) {
clkdef.parent_names = malloc(sizeof(char *), M_OFWPROP,
M_WAITOK);
clkdef.parent_names[0] = pclkname;
clkdef.parent_cnt = 1;
} else
clkdef.parent_cnt = 0;
if (aw_pll_func[type].init != NULL) {
error = aw_pll_func[type].init(device_get_parent(dev),
paddr, &clkdef);
if (error != 0) {
device_printf(dev, "clock %s init failed\n", clkname);
return (error);
}
}
clk = clknode_create(clkdom, &aw_pll_clknode_class, &clkdef);
if (clk == NULL) {
device_printf(dev, "cannot create clock node\n");
return (ENXIO);
}
sc = clknode_get_softc(clk);
sc->clkdev = device_get_parent(dev);
sc->reg = paddr;
sc->type = type;
sc->id = clkdef.id;
clknode_register(clkdom, clk);
OF_prop_free(__DECONST(char *, clkdef.parent_names));
return (0);
}
static int
aw_pll_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
return (ENXIO);
device_set_desc(dev, "Allwinner PLL Clock");
return (BUS_PROBE_DEFAULT);
}
static int
aw_pll_attach(device_t dev)
{
struct clkdom *clkdom;
const char **names;
int index, nout, error;
clk_t clk_parent;
uint32_t *indices;
bus_addr_t paddr;
bus_size_t psize;
phandle_t node;
node = ofw_bus_get_node(dev);
if (ofw_reg_to_paddr(node, 0, &paddr, &psize, NULL) != 0) {
device_printf(dev, "couldn't parse 'reg' property\n");
return (ENXIO);
}
clkdom = clkdom_create(dev);
nout = clk_parse_ofw_out_names(dev, node, &names, &indices);
if (nout == 0) {
device_printf(dev, "no clock outputs found\n");
error = ENOENT;
goto fail;
}
if (clk_get_by_ofw_index(dev, 0, 0, &clk_parent) != 0)
clk_parent = NULL;
for (index = 0; index < nout; index++) {
error = aw_pll_create(dev, paddr, clkdom,
clk_parent ? clk_get_name(clk_parent) : NULL,
names[index], nout == 1 ? 1 : index);
if (error)
goto fail;
}
if (clkdom_finit(clkdom) != 0) {
device_printf(dev, "cannot finalize clkdom initialization\n");
error = ENXIO;
goto fail;
}
if (bootverbose)
clkdom_dump(clkdom);
return (0);
fail:
return (error);
}
static device_method_t aw_pll_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, aw_pll_probe),
DEVMETHOD(device_attach, aw_pll_attach),
DEVMETHOD_END
};
static driver_t aw_pll_driver = {
"aw_pll",
aw_pll_methods,
0,
};
static devclass_t aw_pll_devclass;
EARLY_DRIVER_MODULE(aw_pll, simplebus, aw_pll_driver,
aw_pll_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
Index: head/sys/arm/allwinner/if_awg.c
===================================================================
--- head/sys/arm/allwinner/if_awg.c (revision 327172)
+++ head/sys/arm/allwinner/if_awg.c (revision 327173)
@@ -1,1816 +1,1812 @@
/*-
* Copyright (c) 2016 Jared McNeill <jmcneill@invisible.ca>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Allwinner Gigabit Ethernet MAC (EMAC) controller
*/
#include "opt_device_polling.h"
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/kernel.h>
#include <sys/endian.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/module.h>
#include <sys/taskqueue.h>
#include <sys/gpio.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_var.h>
#include <machine/bus.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <arm/allwinner/if_awgreg.h>
#include <arm/allwinner/aw_sid.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/extres/clk/clk.h>
#include <dev/extres/hwreset/hwreset.h>
#include <dev/extres/regulator/regulator.h>
#include "miibus_if.h"
#include "gpio_if.h"
#define RD4(sc, reg) bus_read_4((sc)->res[_RES_EMAC], (reg))
#define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_EMAC], (reg), (val))
#define AWG_LOCK(sc) mtx_lock(&(sc)->mtx)
#define AWG_UNLOCK(sc) mtx_unlock(&(sc)->mtx);
#define AWG_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
#define AWG_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED)
#define DESC_ALIGN 4
#define TX_DESC_COUNT 1024
#define TX_DESC_SIZE (sizeof(struct emac_desc) * TX_DESC_COUNT)
#define RX_DESC_COUNT 256
#define RX_DESC_SIZE (sizeof(struct emac_desc) * RX_DESC_COUNT)
#define DESC_OFF(n) ((n) * sizeof(struct emac_desc))
#define TX_NEXT(n) (((n) + 1) & (TX_DESC_COUNT - 1))
#define TX_SKIP(n, o) (((n) + (o)) & (TX_DESC_COUNT - 1))
#define RX_NEXT(n) (((n) + 1) & (RX_DESC_COUNT - 1))
#define TX_MAX_SEGS 20
#define SOFT_RST_RETRY 1000
#define MII_BUSY_RETRY 1000
#define MDIO_FREQ 2500000
#define BURST_LEN_DEFAULT 8
#define RX_TX_PRI_DEFAULT 0
#define PAUSE_TIME_DEFAULT 0x400
#define TX_INTERVAL_DEFAULT 64
#define RX_BATCH_DEFAULT 64
/* syscon EMAC clock register */
#define EMAC_CLK_EPHY_ADDR (0x1f << 20) /* H3 */
#define EMAC_CLK_EPHY_ADDR_SHIFT 20
#define EMAC_CLK_EPHY_LED_POL (1 << 17) /* H3 */
#define EMAC_CLK_EPHY_SHUTDOWN (1 << 16) /* H3 */
#define EMAC_CLK_EPHY_SELECT (1 << 15) /* H3 */
#define EMAC_CLK_RMII_EN (1 << 13)
#define EMAC_CLK_ETXDC (0x7 << 10)
#define EMAC_CLK_ETXDC_SHIFT 10
#define EMAC_CLK_ERXDC (0x1f << 5)
#define EMAC_CLK_ERXDC_SHIFT 5
#define EMAC_CLK_PIT (0x1 << 2)
#define EMAC_CLK_PIT_MII (0 << 2)
#define EMAC_CLK_PIT_RGMII (1 << 2)
#define EMAC_CLK_SRC (0x3 << 0)
#define EMAC_CLK_SRC_MII (0 << 0)
#define EMAC_CLK_SRC_EXT_RGMII (1 << 0)
#define EMAC_CLK_SRC_RGMII (2 << 0)
/* Burst length of RX and TX DMA transfers */
static int awg_burst_len = BURST_LEN_DEFAULT;
TUNABLE_INT("hw.awg.burst_len", &awg_burst_len);
/* RX / TX DMA priority. If 1, RX DMA has priority over TX DMA. */
static int awg_rx_tx_pri = RX_TX_PRI_DEFAULT;
TUNABLE_INT("hw.awg.rx_tx_pri", &awg_rx_tx_pri);
/* Pause time field in the transmitted control frame */
static int awg_pause_time = PAUSE_TIME_DEFAULT;
TUNABLE_INT("hw.awg.pause_time", &awg_pause_time);
/* Request a TX interrupt every <n> descriptors */
static int awg_tx_interval = TX_INTERVAL_DEFAULT;
TUNABLE_INT("hw.awg.tx_interval", &awg_tx_interval);
/* Maximum number of mbufs to send to if_input */
static int awg_rx_batch = RX_BATCH_DEFAULT;
TUNABLE_INT("hw.awg.rx_batch", &awg_rx_batch);
enum awg_type {
EMAC_A83T = 1,
EMAC_H3,
EMAC_A64,
};
static struct ofw_compat_data compat_data[] = {
{ "allwinner,sun8i-a83t-emac", EMAC_A83T },
{ "allwinner,sun8i-h3-emac", EMAC_H3 },
{ "allwinner,sun50i-a64-emac", EMAC_A64 },
{ NULL, 0 }
};
struct awg_bufmap {
bus_dmamap_t map;
struct mbuf *mbuf;
};
struct awg_txring {
bus_dma_tag_t desc_tag;
bus_dmamap_t desc_map;
struct emac_desc *desc_ring;
bus_addr_t desc_ring_paddr;
bus_dma_tag_t buf_tag;
struct awg_bufmap buf_map[TX_DESC_COUNT];
u_int cur, next, queued;
u_int segs;
};
struct awg_rxring {
bus_dma_tag_t desc_tag;
bus_dmamap_t desc_map;
struct emac_desc *desc_ring;
bus_addr_t desc_ring_paddr;
bus_dma_tag_t buf_tag;
struct awg_bufmap buf_map[RX_DESC_COUNT];
bus_dmamap_t buf_spare_map;
u_int cur;
};
enum {
_RES_EMAC,
_RES_IRQ,
_RES_SYSCON,
_RES_NITEMS
};
struct awg_softc {
struct resource *res[_RES_NITEMS];
struct mtx mtx;
if_t ifp;
device_t dev;
device_t miibus;
struct callout stat_ch;
struct task link_task;
void *ih;
u_int mdc_div_ratio_m;
int link;
int if_flags;
enum awg_type type;
struct awg_txring tx;
struct awg_rxring rx;
};
static struct resource_spec awg_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE },
{ SYS_RES_MEMORY, 1, RF_ACTIVE | RF_OPTIONAL },
{ -1, 0 }
};
static void awg_txeof(struct awg_softc *sc);
static int
awg_miibus_readreg(device_t dev, int phy, int reg)
{
struct awg_softc *sc;
int retry, val;
sc = device_get_softc(dev);
val = 0;
WR4(sc, EMAC_MII_CMD,
(sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) |
(phy << PHY_ADDR_SHIFT) |
(reg << PHY_REG_ADDR_SHIFT) |
MII_BUSY);
for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) {
val = RD4(sc, EMAC_MII_DATA);
break;
}
DELAY(10);
}
if (retry == 0)
device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
phy, reg);
return (val);
}
static int
awg_miibus_writereg(device_t dev, int phy, int reg, int val)
{
struct awg_softc *sc;
int retry;
sc = device_get_softc(dev);
WR4(sc, EMAC_MII_DATA, val);
WR4(sc, EMAC_MII_CMD,
(sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) |
(phy << PHY_ADDR_SHIFT) |
(reg << PHY_REG_ADDR_SHIFT) |
MII_WR | MII_BUSY);
for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0)
break;
DELAY(10);
}
if (retry == 0)
device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
phy, reg);
return (0);
}
static void
awg_update_link_locked(struct awg_softc *sc)
{
struct mii_data *mii;
uint32_t val;
AWG_ASSERT_LOCKED(sc);
if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
return;
mii = device_get_softc(sc->miibus);
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_1000_T:
case IFM_1000_SX:
case IFM_100_TX:
case IFM_10_T:
sc->link = 1;
break;
default:
sc->link = 0;
break;
}
} else
sc->link = 0;
if (sc->link == 0)
return;
val = RD4(sc, EMAC_BASIC_CTL_0);
val &= ~(BASIC_CTL_SPEED | BASIC_CTL_DUPLEX);
if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
val |= BASIC_CTL_SPEED_1000 << BASIC_CTL_SPEED_SHIFT;
else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
val |= BASIC_CTL_SPEED_100 << BASIC_CTL_SPEED_SHIFT;
else
val |= BASIC_CTL_SPEED_10 << BASIC_CTL_SPEED_SHIFT;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
val |= BASIC_CTL_DUPLEX;
WR4(sc, EMAC_BASIC_CTL_0, val);
val = RD4(sc, EMAC_RX_CTL_0);
val &= ~RX_FLOW_CTL_EN;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
val |= RX_FLOW_CTL_EN;
WR4(sc, EMAC_RX_CTL_0, val);
val = RD4(sc, EMAC_TX_FLOW_CTL);
val &= ~(PAUSE_TIME|TX_FLOW_CTL_EN);
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
val |= TX_FLOW_CTL_EN;
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
val |= awg_pause_time << PAUSE_TIME_SHIFT;
WR4(sc, EMAC_TX_FLOW_CTL, val);
}
static void
awg_link_task(void *arg, int pending)
{
struct awg_softc *sc;
sc = arg;
AWG_LOCK(sc);
awg_update_link_locked(sc);
AWG_UNLOCK(sc);
}
static void
awg_miibus_statchg(device_t dev)
{
struct awg_softc *sc;
sc = device_get_softc(dev);
taskqueue_enqueue(taskqueue_swi, &sc->link_task);
}
static void
awg_media_status(if_t ifp, struct ifmediareq *ifmr)
{
struct awg_softc *sc;
struct mii_data *mii;
sc = if_getsoftc(ifp);
mii = device_get_softc(sc->miibus);
AWG_LOCK(sc);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
AWG_UNLOCK(sc);
}
static int
awg_media_change(if_t ifp)
{
struct awg_softc *sc;
struct mii_data *mii;
int error;
sc = if_getsoftc(ifp);
mii = device_get_softc(sc->miibus);
AWG_LOCK(sc);
error = mii_mediachg(mii);
AWG_UNLOCK(sc);
return (error);
}
static int
awg_encap(struct awg_softc *sc, struct mbuf **mp)
{
bus_dmamap_t map;
bus_dma_segment_t segs[TX_MAX_SEGS];
int error, nsegs, cur, first, last, i;
u_int csum_flags;
uint32_t flags, status;
struct mbuf *m;
cur = first = sc->tx.cur;
map = sc->tx.buf_map[first].map;
m = *mp;
error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, map, m, segs,
&nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS);
if (m == NULL) {
device_printf(sc->dev, "awg_encap: m_collapse failed\n");
m_freem(*mp);
*mp = NULL;
return (ENOMEM);
}
*mp = m;
error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, map, m,
segs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
m_freem(*mp);
*mp = NULL;
}
}
if (error != 0) {
device_printf(sc->dev, "awg_encap: bus_dmamap_load_mbuf_sg failed\n");
return (error);
}
if (nsegs == 0) {
m_freem(*mp);
*mp = NULL;
return (EIO);
}
if (sc->tx.queued + nsegs > TX_DESC_COUNT) {
bus_dmamap_unload(sc->tx.buf_tag, map);
return (ENOBUFS);
}
bus_dmamap_sync(sc->tx.buf_tag, map, BUS_DMASYNC_PREWRITE);
flags = TX_FIR_DESC;
status = 0;
if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) {
if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0)
csum_flags = TX_CHECKSUM_CTL_FULL;
else
csum_flags = TX_CHECKSUM_CTL_IP;
flags |= (csum_flags << TX_CHECKSUM_CTL_SHIFT);
}
for (i = 0; i < nsegs; i++) {
sc->tx.segs++;
if (i == nsegs - 1) {
flags |= TX_LAST_DESC;
/*
* Can only request TX completion
* interrupt on last descriptor.
*/
if (sc->tx.segs >= awg_tx_interval) {
sc->tx.segs = 0;
flags |= TX_INT_CTL;
}
}
sc->tx.desc_ring[cur].addr = htole32((uint32_t)segs[i].ds_addr);
sc->tx.desc_ring[cur].size = htole32(flags | segs[i].ds_len);
sc->tx.desc_ring[cur].status = htole32(status);
flags &= ~TX_FIR_DESC;
/*
* Setting of the valid bit in the first descriptor is
* deferred until the whole chain is fully set up.
*/
status = TX_DESC_CTL;
++sc->tx.queued;
cur = TX_NEXT(cur);
}
sc->tx.cur = cur;
/* Store mapping and mbuf in the last segment */
last = TX_SKIP(cur, TX_DESC_COUNT - 1);
sc->tx.buf_map[first].map = sc->tx.buf_map[last].map;
sc->tx.buf_map[last].map = map;
sc->tx.buf_map[last].mbuf = m;
/*
* The whole mbuf chain has been DMA mapped,
* fix the first descriptor.
*/
sc->tx.desc_ring[first].status = htole32(TX_DESC_CTL);
return (0);
}
static void
awg_clean_txbuf(struct awg_softc *sc, int index)
{
struct awg_bufmap *bmap;
--sc->tx.queued;
bmap = &sc->tx.buf_map[index];
if (bmap->mbuf != NULL) {
bus_dmamap_sync(sc->tx.buf_tag, bmap->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->tx.buf_tag, bmap->map);
m_freem(bmap->mbuf);
bmap->mbuf = NULL;
}
}
static void
awg_setup_rxdesc(struct awg_softc *sc, int index, bus_addr_t paddr)
{
uint32_t status, size;
status = RX_DESC_CTL;
size = MCLBYTES - 1;
sc->rx.desc_ring[index].addr = htole32((uint32_t)paddr);
sc->rx.desc_ring[index].size = htole32(size);
sc->rx.desc_ring[index].status = htole32(status);
}
static void
awg_reuse_rxdesc(struct awg_softc *sc, int index)
{
sc->rx.desc_ring[index].status = htole32(RX_DESC_CTL);
}
static int
awg_newbuf_rx(struct awg_softc *sc, int index)
{
struct mbuf *m;
bus_dma_segment_t seg;
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
m_adj(m, ETHER_ALIGN);
if (bus_dmamap_load_mbuf_sg(sc->rx.buf_tag, sc->rx.buf_spare_map,
m, &seg, &nsegs, BUS_DMA_NOWAIT) != 0) {
m_freem(m);
return (ENOBUFS);
}
if (sc->rx.buf_map[index].mbuf != NULL) {
bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->rx.buf_tag, sc->rx.buf_map[index].map);
}
map = sc->rx.buf_map[index].map;
sc->rx.buf_map[index].map = sc->rx.buf_spare_map;
sc->rx.buf_spare_map = map;
bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map,
BUS_DMASYNC_PREREAD);
sc->rx.buf_map[index].mbuf = m;
awg_setup_rxdesc(sc, index, seg.ds_addr);
return (0);
}
static void
awg_start_locked(struct awg_softc *sc)
{
struct mbuf *m;
uint32_t val;
if_t ifp;
int cnt, err;
AWG_ASSERT_LOCKED(sc);
if (!sc->link)
return;
ifp = sc->ifp;
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return;
for (cnt = 0; ; cnt++) {
m = if_dequeue(ifp);
if (m == NULL)
break;
err = awg_encap(sc, &m);
if (err != 0) {
if (err == ENOBUFS)
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
if (m != NULL)
if_sendq_prepend(ifp, m);
break;
}
if_bpfmtap(ifp, m);
}
if (cnt != 0) {
bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
/* Start and run TX DMA */
val = RD4(sc, EMAC_TX_CTL_1);
WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_START);
}
}
static void
awg_start(if_t ifp)
{
struct awg_softc *sc;
sc = if_getsoftc(ifp);
AWG_LOCK(sc);
awg_start_locked(sc);
AWG_UNLOCK(sc);
}
static void
awg_tick(void *softc)
{
struct awg_softc *sc;
struct mii_data *mii;
if_t ifp;
int link;
sc = softc;
ifp = sc->ifp;
mii = device_get_softc(sc->miibus);
AWG_ASSERT_LOCKED(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
return;
link = sc->link;
mii_tick(mii);
if (sc->link && !link)
awg_start_locked(sc);
callout_reset(&sc->stat_ch, hz, awg_tick, sc);
}
/* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */
static uint32_t
bitrev32(uint32_t x)
{
x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
return (x >> 16) | (x << 16);
}
static void
awg_setup_rxfilter(struct awg_softc *sc)
{
uint32_t val, crc, hashreg, hashbit, hash[2], machi, maclo;
int mc_count, mcnt, i;
uint8_t *eaddr, *mta;
if_t ifp;
AWG_ASSERT_LOCKED(sc);
ifp = sc->ifp;
val = 0;
hash[0] = hash[1] = 0;
mc_count = if_multiaddr_count(ifp, -1);
if (if_getflags(ifp) & IFF_PROMISC)
val |= DIS_ADDR_FILTER;
else if (if_getflags(ifp) & IFF_ALLMULTI) {
val |= RX_ALL_MULTICAST;
hash[0] = hash[1] = ~0;
} else if (mc_count > 0) {
val |= HASH_MULTICAST;
mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count,
M_DEVBUF, M_NOWAIT);
if (mta == NULL) {
if_printf(ifp,
"failed to allocate temporary multicast list\n");
return;
}
if_multiaddr_array(ifp, mta, &mcnt, mc_count);
for (i = 0; i < mcnt; i++) {
crc = ether_crc32_le(mta + (i * ETHER_ADDR_LEN),
ETHER_ADDR_LEN) & 0x7f;
crc = bitrev32(~crc) >> 26;
hashreg = (crc >> 5);
hashbit = (crc & 0x1f);
hash[hashreg] |= (1 << hashbit);
}
free(mta, M_DEVBUF);
}
/* Write our unicast address */
eaddr = IF_LLADDR(ifp);
machi = (eaddr[5] << 8) | eaddr[4];
maclo = (eaddr[3] << 24) | (eaddr[2] << 16) | (eaddr[1] << 8) |
(eaddr[0] << 0);
WR4(sc, EMAC_ADDR_HIGH(0), machi);
WR4(sc, EMAC_ADDR_LOW(0), maclo);
/* Multicast hash filters */
WR4(sc, EMAC_RX_HASH_0, hash[1]);
WR4(sc, EMAC_RX_HASH_1, hash[0]);
/* RX frame filter config */
WR4(sc, EMAC_RX_FRM_FLT, val);
}
static void
awg_enable_intr(struct awg_softc *sc)
{
/* Enable interrupts */
WR4(sc, EMAC_INT_EN, RX_INT_EN | TX_INT_EN | TX_BUF_UA_INT_EN);
}
static void
awg_disable_intr(struct awg_softc *sc)
{
/* Disable interrupts */
WR4(sc, EMAC_INT_EN, 0);
}
static void
awg_init_locked(struct awg_softc *sc)
{
struct mii_data *mii;
uint32_t val;
if_t ifp;
mii = device_get_softc(sc->miibus);
ifp = sc->ifp;
AWG_ASSERT_LOCKED(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
return;
awg_setup_rxfilter(sc);
/* Configure DMA burst length and priorities */
val = awg_burst_len << BASIC_CTL_BURST_LEN_SHIFT;
if (awg_rx_tx_pri)
val |= BASIC_CTL_RX_TX_PRI;
WR4(sc, EMAC_BASIC_CTL_1, val);
/* Enable interrupts */
#ifdef DEVICE_POLLING
if ((if_getcapenable(ifp) & IFCAP_POLLING) == 0)
awg_enable_intr(sc);
else
awg_disable_intr(sc);
#else
awg_enable_intr(sc);
#endif
/* Enable transmit DMA */
val = RD4(sc, EMAC_TX_CTL_1);
WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_EN | TX_MD | TX_NEXT_FRAME);
/* Enable receive DMA */
val = RD4(sc, EMAC_RX_CTL_1);
WR4(sc, EMAC_RX_CTL_1, val | RX_DMA_EN | RX_MD);
/* Enable transmitter */
val = RD4(sc, EMAC_TX_CTL_0);
WR4(sc, EMAC_TX_CTL_0, val | TX_EN);
/* Enable receiver */
val = RD4(sc, EMAC_RX_CTL_0);
WR4(sc, EMAC_RX_CTL_0, val | RX_EN | CHECK_CRC);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
mii_mediachg(mii);
callout_reset(&sc->stat_ch, hz, awg_tick, sc);
}
static void
awg_init(void *softc)
{
struct awg_softc *sc;
sc = softc;
AWG_LOCK(sc);
awg_init_locked(sc);
AWG_UNLOCK(sc);
}
static void
awg_stop(struct awg_softc *sc)
{
if_t ifp;
uint32_t val;
int i;
AWG_ASSERT_LOCKED(sc);
ifp = sc->ifp;
callout_stop(&sc->stat_ch);
/* Stop transmit DMA and flush data in the TX FIFO */
val = RD4(sc, EMAC_TX_CTL_1);
val &= ~TX_DMA_EN;
val |= FLUSH_TX_FIFO;
WR4(sc, EMAC_TX_CTL_1, val);
/* Disable transmitter */
val = RD4(sc, EMAC_TX_CTL_0);
WR4(sc, EMAC_TX_CTL_0, val & ~TX_EN);
/* Disable receiver */
val = RD4(sc, EMAC_RX_CTL_0);
WR4(sc, EMAC_RX_CTL_0, val & ~RX_EN);
/* Disable interrupts */
awg_disable_intr(sc);
/* Disable transmit DMA */
val = RD4(sc, EMAC_TX_CTL_1);
WR4(sc, EMAC_TX_CTL_1, val & ~TX_DMA_EN);
/* Disable receive DMA */
val = RD4(sc, EMAC_RX_CTL_1);
WR4(sc, EMAC_RX_CTL_1, val & ~RX_DMA_EN);
sc->link = 0;
/* Finish handling transmitted buffers */
awg_txeof(sc);
/* Release any untransmitted buffers. */
for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) {
val = le32toh(sc->tx.desc_ring[i].status);
if ((val & TX_DESC_CTL) != 0)
break;
awg_clean_txbuf(sc, i);
}
sc->tx.next = i;
for (; sc->tx.queued > 0; i = TX_NEXT(i)) {
sc->tx.desc_ring[i].status = 0;
awg_clean_txbuf(sc, i);
}
sc->tx.cur = sc->tx.next;
bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Setup RX buffers for reuse */
bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (i = sc->rx.cur; ; i = RX_NEXT(i)) {
val = le32toh(sc->rx.desc_ring[i].status);
if ((val & RX_DESC_CTL) != 0)
break;
awg_reuse_rxdesc(sc, i);
}
sc->rx.cur = i;
bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
}
static int
awg_rxintr(struct awg_softc *sc)
{
if_t ifp;
struct mbuf *m, *mh, *mt;
int error, index, len, cnt, npkt;
uint32_t status;
ifp = sc->ifp;
mh = mt = NULL;
cnt = 0;
npkt = 0;
bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (index = sc->rx.cur; ; index = RX_NEXT(index)) {
status = le32toh(sc->rx.desc_ring[index].status);
if ((status & RX_DESC_CTL) != 0)
break;
len = (status & RX_FRM_LEN) >> RX_FRM_LEN_SHIFT;
if (len == 0) {
if ((status & (RX_NO_ENOUGH_BUF_ERR | RX_OVERFLOW_ERR)) != 0)
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
awg_reuse_rxdesc(sc, index);
continue;
}
m = sc->rx.buf_map[index].mbuf;
error = awg_newbuf_rx(sc, index);
if (error != 0) {
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
awg_reuse_rxdesc(sc, index);
continue;
}
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = len;
m->m_len = len;
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
(status & RX_FRM_TYPE) != 0) {
m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
if ((status & RX_HEADER_ERR) == 0)
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
if ((status & RX_PAYLOAD_ERR) == 0) {
m->m_pkthdr.csum_flags |=
CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
m->m_pkthdr.csum_data = 0xffff;
}
}
m->m_nextpkt = NULL;
if (mh == NULL)
mh = m;
else
mt->m_nextpkt = m;
mt = m;
++cnt;
++npkt;
if (cnt == awg_rx_batch) {
AWG_UNLOCK(sc);
if_input(ifp, mh);
AWG_LOCK(sc);
mh = mt = NULL;
cnt = 0;
}
}
if (index != sc->rx.cur) {
bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
if (mh != NULL) {
AWG_UNLOCK(sc);
if_input(ifp, mh);
AWG_LOCK(sc);
}
sc->rx.cur = index;
return (npkt);
}
static void
awg_txeof(struct awg_softc *sc)
{
struct emac_desc *desc;
uint32_t status, size;
if_t ifp;
int i, prog;
AWG_ASSERT_LOCKED(sc);
bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
ifp = sc->ifp;
prog = 0;
for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) {
desc = &sc->tx.desc_ring[i];
status = le32toh(desc->status);
if ((status & TX_DESC_CTL) != 0)
break;
size = le32toh(desc->size);
if (size & TX_LAST_DESC) {
if ((status & (TX_HEADER_ERR | TX_PAYLOAD_ERR)) != 0)
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
else
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
}
prog++;
awg_clean_txbuf(sc, i);
}
if (prog > 0) {
sc->tx.next = i;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
}
}
static void
awg_intr(void *arg)
{
struct awg_softc *sc;
uint32_t val;
sc = arg;
AWG_LOCK(sc);
val = RD4(sc, EMAC_INT_STA);
WR4(sc, EMAC_INT_STA, val);
if (val & RX_INT)
awg_rxintr(sc);
if (val & TX_INT)
awg_txeof(sc);
if (val & (TX_INT | TX_BUF_UA_INT)) {
if (!if_sendq_empty(sc->ifp))
awg_start_locked(sc);
}
AWG_UNLOCK(sc);
}
#ifdef DEVICE_POLLING
static int
awg_poll(if_t ifp, enum poll_cmd cmd, int count)
{
struct awg_softc *sc;
uint32_t val;
int rx_npkts;
sc = if_getsoftc(ifp);
rx_npkts = 0;
AWG_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
AWG_UNLOCK(sc);
return (0);
}
rx_npkts = awg_rxintr(sc);
awg_txeof(sc);
if (!if_sendq_empty(ifp))
awg_start_locked(sc);
if (cmd == POLL_AND_CHECK_STATUS) {
val = RD4(sc, EMAC_INT_STA);
if (val != 0)
WR4(sc, EMAC_INT_STA, val);
}
AWG_UNLOCK(sc);
return (rx_npkts);
}
#endif
static int
awg_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
struct awg_softc *sc;
struct mii_data *mii;
struct ifreq *ifr;
int flags, mask, error;
sc = if_getsoftc(ifp);
mii = device_get_softc(sc->miibus);
ifr = (struct ifreq *)data;
error = 0;
switch (cmd) {
case SIOCSIFFLAGS:
AWG_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
flags = if_getflags(ifp) ^ sc->if_flags;
if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0)
awg_setup_rxfilter(sc);
} else
awg_init_locked(sc);
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
awg_stop(sc);
}
sc->if_flags = if_getflags(ifp);
AWG_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
AWG_LOCK(sc);
awg_setup_rxfilter(sc);
AWG_UNLOCK(sc);
}
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break;
case SIOCSIFCAP:
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
#ifdef DEVICE_POLLING
if (mask & IFCAP_POLLING) {
if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
error = ether_poll_register(awg_poll, ifp);
if (error != 0)
break;
AWG_LOCK(sc);
awg_disable_intr(sc);
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
AWG_UNLOCK(sc);
} else {
error = ether_poll_deregister(ifp);
AWG_LOCK(sc);
awg_enable_intr(sc);
if_setcapenablebit(ifp, 0, IFCAP_POLLING);
AWG_UNLOCK(sc);
}
}
#endif
if (mask & IFCAP_VLAN_MTU)
if_togglecapenable(ifp, IFCAP_VLAN_MTU);
if (mask & IFCAP_RXCSUM)
if_togglecapenable(ifp, IFCAP_RXCSUM);
if (mask & IFCAP_TXCSUM)
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
if_sethwassistbits(ifp, CSUM_IP | CSUM_UDP | CSUM_TCP, 0);
else
if_sethwassistbits(ifp, 0, CSUM_IP | CSUM_UDP | CSUM_TCP);
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static int
awg_setup_phy(device_t dev)
{
struct awg_softc *sc;
clk_t clk_tx, clk_tx_parent;
const char *tx_parent_name;
char *phy_type;
phandle_t node;
uint32_t reg, tx_delay, rx_delay;
int error;
sc = device_get_softc(dev);
node = ofw_bus_get_node(dev);
if (OF_getprop_alloc(node, "phy-mode", 1, (void **)&phy_type) == 0)
return (0);
if (bootverbose)
device_printf(dev, "PHY type: %s, conf mode: %s\n", phy_type,
sc->res[_RES_SYSCON] != NULL ? "reg" : "clk");
if (sc->res[_RES_SYSCON] != NULL) {
reg = bus_read_4(sc->res[_RES_SYSCON], 0);
reg &= ~(EMAC_CLK_PIT | EMAC_CLK_SRC | EMAC_CLK_RMII_EN);
if (strcmp(phy_type, "rgmii") == 0)
reg |= EMAC_CLK_PIT_RGMII | EMAC_CLK_SRC_RGMII;
else if (strcmp(phy_type, "rmii") == 0)
reg |= EMAC_CLK_RMII_EN;
else
reg |= EMAC_CLK_PIT_MII | EMAC_CLK_SRC_MII;
if (OF_getencprop(node, "tx-delay", &tx_delay,
sizeof(tx_delay)) > 0) {
reg &= ~EMAC_CLK_ETXDC;
reg |= (tx_delay << EMAC_CLK_ETXDC_SHIFT);
}
if (OF_getencprop(node, "rx-delay", &rx_delay,
sizeof(rx_delay)) > 0) {
reg &= ~EMAC_CLK_ERXDC;
reg |= (rx_delay << EMAC_CLK_ERXDC_SHIFT);
}
if (sc->type == EMAC_H3) {
if (OF_hasprop(node, "allwinner,use-internal-phy")) {
reg |= EMAC_CLK_EPHY_SELECT;
reg &= ~EMAC_CLK_EPHY_SHUTDOWN;
if (OF_hasprop(node,
"allwinner,leds-active-low"))
reg |= EMAC_CLK_EPHY_LED_POL;
else
reg &= ~EMAC_CLK_EPHY_LED_POL;
/* Set internal PHY addr to 1 */
reg &= ~EMAC_CLK_EPHY_ADDR;
reg |= (1 << EMAC_CLK_EPHY_ADDR_SHIFT);
} else {
reg &= ~EMAC_CLK_EPHY_SELECT;
}
}
if (bootverbose)
device_printf(dev, "EMAC clock: 0x%08x\n", reg);
bus_write_4(sc->res[_RES_SYSCON], 0, reg);
} else {
if (strcmp(phy_type, "rgmii") == 0)
tx_parent_name = "emac_int_tx";
else
tx_parent_name = "mii_phy_tx";
/* Get the TX clock */
error = clk_get_by_ofw_name(dev, 0, "tx", &clk_tx);
if (error != 0) {
device_printf(dev, "cannot get tx clock\n");
goto fail;
}
/* Find the desired parent clock based on phy-mode property */
error = clk_get_by_name(dev, tx_parent_name, &clk_tx_parent);
if (error != 0) {
device_printf(dev, "cannot get clock '%s'\n",
tx_parent_name);
goto fail;
}
/* Set TX clock parent */
error = clk_set_parent_by_clk(clk_tx, clk_tx_parent);
if (error != 0) {
device_printf(dev, "cannot set tx clock parent\n");
goto fail;
}
/* Enable TX clock */
error = clk_enable(clk_tx);
if (error != 0) {
device_printf(dev, "cannot enable tx clock\n");
goto fail;
}
}
error = 0;
fail:
OF_prop_free(phy_type);
return (error);
}
static int
awg_setup_extres(device_t dev)
{
struct awg_softc *sc;
hwreset_t rst_ahb, rst_ephy;
clk_t clk_ahb, clk_ephy;
regulator_t reg;
- phandle_t node;
uint64_t freq;
int error, div;
sc = device_get_softc(dev);
- node = ofw_bus_get_node(dev);
rst_ahb = rst_ephy = NULL;
clk_ahb = clk_ephy = NULL;
reg = NULL;
/* Get AHB clock and reset resources */
error = hwreset_get_by_ofw_name(dev, 0, "ahb", &rst_ahb);
if (error != 0) {
device_printf(dev, "cannot get ahb reset\n");
goto fail;
}
if (hwreset_get_by_ofw_name(dev, 0, "ephy", &rst_ephy) != 0)
rst_ephy = NULL;
error = clk_get_by_ofw_name(dev, 0, "ahb", &clk_ahb);
if (error != 0) {
device_printf(dev, "cannot get ahb clock\n");
goto fail;
}
if (clk_get_by_ofw_name(dev, 0, "ephy", &clk_ephy) != 0)
clk_ephy = NULL;
/* Configure PHY for MII or RGMII mode */
if (awg_setup_phy(dev) != 0)
goto fail;
/* Enable clocks */
error = clk_enable(clk_ahb);
if (error != 0) {
device_printf(dev, "cannot enable ahb clock\n");
goto fail;
}
if (clk_ephy != NULL) {
error = clk_enable(clk_ephy);
if (error != 0) {
device_printf(dev, "cannot enable ephy clock\n");
goto fail;
}
}
/* De-assert reset */
error = hwreset_deassert(rst_ahb);
if (error != 0) {
device_printf(dev, "cannot de-assert ahb reset\n");
goto fail;
}
if (rst_ephy != NULL) {
error = hwreset_deassert(rst_ephy);
if (error != 0) {
device_printf(dev, "cannot de-assert ephy reset\n");
goto fail;
}
}
/* Enable PHY regulator if applicable */
if (regulator_get_by_ofw_property(dev, 0, "phy-supply", &reg) == 0) {
error = regulator_enable(reg);
if (error != 0) {
device_printf(dev, "cannot enable PHY regulator\n");
goto fail;
}
}
/* Determine MDC clock divide ratio based on AHB clock */
error = clk_get_freq(clk_ahb, &freq);
if (error != 0) {
device_printf(dev, "cannot get AHB clock frequency\n");
goto fail;
}
div = freq / MDIO_FREQ;
if (div <= 16)
sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_16;
else if (div <= 32)
sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_32;
else if (div <= 64)
sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_64;
else if (div <= 128)
sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_128;
else {
device_printf(dev, "cannot determine MDC clock divide ratio\n");
error = ENXIO;
goto fail;
}
if (bootverbose)
device_printf(dev, "AHB frequency %ju Hz, MDC div: 0x%x\n",
(uintmax_t)freq, sc->mdc_div_ratio_m);
return (0);
fail:
if (reg != NULL)
regulator_release(reg);
if (clk_ephy != NULL)
clk_release(clk_ephy);
if (clk_ahb != NULL)
clk_release(clk_ahb);
if (rst_ephy != NULL)
hwreset_release(rst_ephy);
if (rst_ahb != NULL)
hwreset_release(rst_ahb);
return (error);
}
static void
awg_get_eaddr(device_t dev, uint8_t *eaddr)
{
struct awg_softc *sc;
uint32_t maclo, machi, rnd;
u_char rootkey[16];
sc = device_get_softc(dev);
machi = RD4(sc, EMAC_ADDR_HIGH(0)) & 0xffff;
maclo = RD4(sc, EMAC_ADDR_LOW(0));
if (maclo == 0xffffffff && machi == 0xffff) {
/* MAC address in hardware is invalid, create one */
if (aw_sid_get_rootkey(rootkey) == 0 &&
(rootkey[3] | rootkey[12] | rootkey[13] | rootkey[14] |
rootkey[15]) != 0) {
/* MAC address is derived from the root key in SID */
maclo = (rootkey[13] << 24) | (rootkey[12] << 16) |
(rootkey[3] << 8) | 0x02;
machi = (rootkey[15] << 8) | rootkey[14];
} else {
/* Create one */
rnd = arc4random();
maclo = 0x00f2 | (rnd & 0xffff0000);
machi = rnd & 0xffff;
}
}
eaddr[0] = maclo & 0xff;
eaddr[1] = (maclo >> 8) & 0xff;
eaddr[2] = (maclo >> 16) & 0xff;
eaddr[3] = (maclo >> 24) & 0xff;
eaddr[4] = machi & 0xff;
eaddr[5] = (machi >> 8) & 0xff;
}
#ifdef AWG_DEBUG
static void
awg_dump_regs(device_t dev)
{
static const struct {
const char *name;
u_int reg;
} regs[] = {
{ "BASIC_CTL_0", EMAC_BASIC_CTL_0 },
{ "BASIC_CTL_1", EMAC_BASIC_CTL_1 },
{ "INT_STA", EMAC_INT_STA },
{ "INT_EN", EMAC_INT_EN },
{ "TX_CTL_0", EMAC_TX_CTL_0 },
{ "TX_CTL_1", EMAC_TX_CTL_1 },
{ "TX_FLOW_CTL", EMAC_TX_FLOW_CTL },
{ "TX_DMA_LIST", EMAC_TX_DMA_LIST },
{ "RX_CTL_0", EMAC_RX_CTL_0 },
{ "RX_CTL_1", EMAC_RX_CTL_1 },
{ "RX_DMA_LIST", EMAC_RX_DMA_LIST },
{ "RX_FRM_FLT", EMAC_RX_FRM_FLT },
{ "RX_HASH_0", EMAC_RX_HASH_0 },
{ "RX_HASH_1", EMAC_RX_HASH_1 },
{ "MII_CMD", EMAC_MII_CMD },
{ "ADDR_HIGH0", EMAC_ADDR_HIGH(0) },
{ "ADDR_LOW0", EMAC_ADDR_LOW(0) },
{ "TX_DMA_STA", EMAC_TX_DMA_STA },
{ "TX_DMA_CUR_DESC", EMAC_TX_DMA_CUR_DESC },
{ "TX_DMA_CUR_BUF", EMAC_TX_DMA_CUR_BUF },
{ "RX_DMA_STA", EMAC_RX_DMA_STA },
{ "RX_DMA_CUR_DESC", EMAC_RX_DMA_CUR_DESC },
{ "RX_DMA_CUR_BUF", EMAC_RX_DMA_CUR_BUF },
{ "RGMII_STA", EMAC_RGMII_STA },
};
struct awg_softc *sc;
unsigned int n;
sc = device_get_softc(dev);
for (n = 0; n < nitems(regs); n++)
device_printf(dev, " %-20s %08x\n", regs[n].name,
RD4(sc, regs[n].reg));
}
#endif
#define GPIO_ACTIVE_LOW 1
static int
awg_phy_reset(device_t dev)
{
pcell_t gpio_prop[4], delay_prop[3];
phandle_t node, gpio_node;
device_t gpio;
uint32_t pin, flags;
uint32_t pin_value;
node = ofw_bus_get_node(dev);
if (OF_getencprop(node, "allwinner,reset-gpio", gpio_prop,
sizeof(gpio_prop)) <= 0)
return (0);
if (OF_getencprop(node, "allwinner,reset-delays-us", delay_prop,
sizeof(delay_prop)) <= 0)
return (ENXIO);
gpio_node = OF_node_from_xref(gpio_prop[0]);
if ((gpio = OF_device_from_xref(gpio_prop[0])) == NULL)
return (ENXIO);
if (GPIO_MAP_GPIOS(gpio, node, gpio_node, nitems(gpio_prop) - 1,
gpio_prop + 1, &pin, &flags) != 0)
return (ENXIO);
pin_value = GPIO_PIN_LOW;
if (OF_hasprop(node, "allwinner,reset-active-low"))
pin_value = GPIO_PIN_HIGH;
if (flags & GPIO_ACTIVE_LOW)
pin_value = !pin_value;
GPIO_PIN_SETFLAGS(gpio, pin, GPIO_PIN_OUTPUT);
GPIO_PIN_SET(gpio, pin, pin_value);
DELAY(delay_prop[0]);
GPIO_PIN_SET(gpio, pin, !pin_value);
DELAY(delay_prop[1]);
GPIO_PIN_SET(gpio, pin, pin_value);
DELAY(delay_prop[2]);
return (0);
}
static int
awg_reset(device_t dev)
{
struct awg_softc *sc;
int retry;
sc = device_get_softc(dev);
/* Reset PHY if necessary */
if (awg_phy_reset(dev) != 0) {
device_printf(dev, "failed to reset PHY\n");
return (ENXIO);
}
/* Soft reset all registers and logic */
WR4(sc, EMAC_BASIC_CTL_1, BASIC_CTL_SOFT_RST);
/* Wait for soft reset bit to self-clear */
for (retry = SOFT_RST_RETRY; retry > 0; retry--) {
if ((RD4(sc, EMAC_BASIC_CTL_1) & BASIC_CTL_SOFT_RST) == 0)
break;
DELAY(10);
}
if (retry == 0) {
device_printf(dev, "soft reset timed out\n");
#ifdef AWG_DEBUG
awg_dump_regs(dev);
#endif
return (ETIMEDOUT);
}
return (0);
}
static void
awg_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
if (error != 0)
return;
*(bus_addr_t *)arg = segs[0].ds_addr;
}
static int
awg_setup_dma(device_t dev)
{
struct awg_softc *sc;
int error, i;
sc = device_get_softc(dev);
/* Setup TX ring */
error = bus_dma_tag_create(
bus_get_dma_tag(dev), /* Parent tag */
DESC_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
TX_DESC_SIZE, 1, /* maxsize, nsegs */
TX_DESC_SIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->tx.desc_tag);
if (error != 0) {
device_printf(dev, "cannot create TX descriptor ring tag\n");
return (error);
}
error = bus_dmamem_alloc(sc->tx.desc_tag, (void **)&sc->tx.desc_ring,
BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->tx.desc_map);
if (error != 0) {
device_printf(dev, "cannot allocate TX descriptor ring\n");
return (error);
}
error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map,
sc->tx.desc_ring, TX_DESC_SIZE, awg_dmamap_cb,
&sc->tx.desc_ring_paddr, 0);
if (error != 0) {
device_printf(dev, "cannot load TX descriptor ring\n");
return (error);
}
for (i = 0; i < TX_DESC_COUNT; i++)
sc->tx.desc_ring[i].next =
htole32(sc->tx.desc_ring_paddr + DESC_OFF(TX_NEXT(i)));
error = bus_dma_tag_create(
bus_get_dma_tag(dev), /* Parent tag */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, TX_MAX_SEGS, /* maxsize, nsegs */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->tx.buf_tag);
if (error != 0) {
device_printf(dev, "cannot create TX buffer tag\n");
return (error);
}
sc->tx.queued = 0;
for (i = 0; i < TX_DESC_COUNT; i++) {
error = bus_dmamap_create(sc->tx.buf_tag, 0,
&sc->tx.buf_map[i].map);
if (error != 0) {
device_printf(dev, "cannot create TX buffer map\n");
return (error);
}
}
/* Setup RX ring */
error = bus_dma_tag_create(
bus_get_dma_tag(dev), /* Parent tag */
DESC_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
RX_DESC_SIZE, 1, /* maxsize, nsegs */
RX_DESC_SIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->rx.desc_tag);
if (error != 0) {
device_printf(dev, "cannot create RX descriptor ring tag\n");
return (error);
}
error = bus_dmamem_alloc(sc->rx.desc_tag, (void **)&sc->rx.desc_ring,
BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rx.desc_map);
if (error != 0) {
device_printf(dev, "cannot allocate RX descriptor ring\n");
return (error);
}
error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map,
sc->rx.desc_ring, RX_DESC_SIZE, awg_dmamap_cb,
&sc->rx.desc_ring_paddr, 0);
if (error != 0) {
device_printf(dev, "cannot load RX descriptor ring\n");
return (error);
}
error = bus_dma_tag_create(
bus_get_dma_tag(dev), /* Parent tag */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, 1, /* maxsize, nsegs */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->rx.buf_tag);
if (error != 0) {
device_printf(dev, "cannot create RX buffer tag\n");
return (error);
}
error = bus_dmamap_create(sc->rx.buf_tag, 0, &sc->rx.buf_spare_map);
if (error != 0) {
device_printf(dev,
"cannot create RX buffer spare map\n");
return (error);
}
for (i = 0; i < RX_DESC_COUNT; i++) {
sc->rx.desc_ring[i].next =
htole32(sc->rx.desc_ring_paddr + DESC_OFF(RX_NEXT(i)));
error = bus_dmamap_create(sc->rx.buf_tag, 0,
&sc->rx.buf_map[i].map);
if (error != 0) {
device_printf(dev, "cannot create RX buffer map\n");
return (error);
}
sc->rx.buf_map[i].mbuf = NULL;
error = awg_newbuf_rx(sc, i);
if (error != 0) {
device_printf(dev, "cannot create RX buffer\n");
return (error);
}
}
bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
BUS_DMASYNC_PREWRITE);
/* Write transmit and receive descriptor base address registers */
WR4(sc, EMAC_TX_DMA_LIST, sc->tx.desc_ring_paddr);
WR4(sc, EMAC_RX_DMA_LIST, sc->rx.desc_ring_paddr);
return (0);
}
static int
awg_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
return (ENXIO);
device_set_desc(dev, "Allwinner Gigabit Ethernet");
return (BUS_PROBE_DEFAULT);
}
static int
awg_attach(device_t dev)
{
uint8_t eaddr[ETHER_ADDR_LEN];
struct awg_softc *sc;
- phandle_t node;
int error;
sc = device_get_softc(dev);
sc->dev = dev;
sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
- node = ofw_bus_get_node(dev);
if (bus_alloc_resources(dev, awg_spec, sc->res) != 0) {
device_printf(dev, "cannot allocate resources for device\n");
return (ENXIO);
}
mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
callout_init_mtx(&sc->stat_ch, &sc->mtx, 0);
TASK_INIT(&sc->link_task, 0, awg_link_task, sc);
/* Setup clocks and regulators */
error = awg_setup_extres(dev);
if (error != 0)
return (error);
/* Read MAC address before resetting the chip */
awg_get_eaddr(dev, eaddr);
/* Soft reset EMAC core */
error = awg_reset(dev);
if (error != 0)
return (error);
/* Setup DMA descriptors */
error = awg_setup_dma(dev);
if (error != 0)
return (error);
/* Install interrupt handler */
error = bus_setup_intr(dev, sc->res[_RES_IRQ],
INTR_TYPE_NET | INTR_MPSAFE, NULL, awg_intr, sc, &sc->ih);
if (error != 0) {
device_printf(dev, "cannot setup interrupt handler\n");
return (error);
}
/* Setup ethernet interface */
sc->ifp = if_alloc(IFT_ETHER);
if_setsoftc(sc->ifp, sc);
if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setstartfn(sc->ifp, awg_start);
if_setioctlfn(sc->ifp, awg_ioctl);
if_setinitfn(sc->ifp, awg_init);
if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1);
if_setsendqready(sc->ifp);
if_sethwassist(sc->ifp, CSUM_IP | CSUM_UDP | CSUM_TCP);
if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM);
if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
#ifdef DEVICE_POLLING
if_setcapabilitiesbit(sc->ifp, IFCAP_POLLING, 0);
#endif
/* Attach MII driver */
error = mii_attach(dev, &sc->miibus, sc->ifp, awg_media_change,
awg_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
MIIF_DOPAUSE);
if (error != 0) {
device_printf(dev, "cannot attach PHY\n");
return (error);
}
/* Attach ethernet interface */
ether_ifattach(sc->ifp, eaddr);
return (0);
}
static device_method_t awg_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, awg_probe),
DEVMETHOD(device_attach, awg_attach),
/* MII interface */
DEVMETHOD(miibus_readreg, awg_miibus_readreg),
DEVMETHOD(miibus_writereg, awg_miibus_writereg),
DEVMETHOD(miibus_statchg, awg_miibus_statchg),
DEVMETHOD_END
};
static driver_t awg_driver = {
"awg",
awg_methods,
sizeof(struct awg_softc),
};
static devclass_t awg_devclass;
DRIVER_MODULE(awg, simplebus, awg_driver, awg_devclass, 0, 0);
DRIVER_MODULE(miibus, awg, miibus_driver, miibus_devclass, 0, 0);
MODULE_DEPEND(awg, ether, 1, 1, 1);
MODULE_DEPEND(awg, miibus, 1, 1, 1);
Index: head/sys/arm/arm/gic.c
===================================================================
--- head/sys/arm/arm/gic.c (revision 327172)
+++ head/sys/arm/arm/gic.c (revision 327173)
@@ -1,1599 +1,1597 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2011 The FreeBSD Foundation
* All rights reserved.
*
* Developed by Damjan Marion <damjan.marion@gmail.com>
*
* Based on OMAP4 GIC code by Ben Gray
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company nor the name of the author may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_platform.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/module.h>
#include <sys/malloc.h>
#include <sys/rman.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
#include <sys/cpuset.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/smp.h>
#ifdef INTRNG
#include <sys/sched.h>
#endif
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/bus.h>
#include <machine/intr.h>
#include <machine/smp.h>
#ifdef FDT
#include <dev/fdt/fdt_intr.h>
#include <dev/ofw/ofw_bus_subr.h>
#endif
#include <arm/arm/gic.h>
#include <arm/arm/gic_common.h>
#ifdef INTRNG
#include "pic_if.h"
#include "msi_if.h"
#endif
/* We are using GICv2 register naming */
/* Distributor Registers */
/* CPU Registers */
#define GICC_CTLR 0x0000 /* v1 ICCICR */
#define GICC_PMR 0x0004 /* v1 ICCPMR */
#define GICC_BPR 0x0008 /* v1 ICCBPR */
#define GICC_IAR 0x000C /* v1 ICCIAR */
#define GICC_EOIR 0x0010 /* v1 ICCEOIR */
#define GICC_RPR 0x0014 /* v1 ICCRPR */
#define GICC_HPPIR 0x0018 /* v1 ICCHPIR */
#define GICC_ABPR 0x001C /* v1 ICCABPR */
#define GICC_IIDR 0x00FC /* v1 ICCIIDR*/
/* TYPER Registers */
#define GICD_TYPER_SECURITYEXT 0x400
#define GIC_SUPPORT_SECEXT(_sc) \
((_sc->typer & GICD_TYPER_SECURITYEXT) == GICD_TYPER_SECURITYEXT)
#ifndef GIC_DEFAULT_ICFGR_INIT
#define GIC_DEFAULT_ICFGR_INIT 0x00000000
#endif
#ifdef INTRNG
struct gic_irqsrc {
struct intr_irqsrc gi_isrc;
uint32_t gi_irq;
enum intr_polarity gi_pol;
enum intr_trigger gi_trig;
#define GI_FLAG_EARLY_EOI (1 << 0)
#define GI_FLAG_MSI (1 << 1) /* This interrupt source should only */
/* be used for MSI/MSI-X interrupts */
#define GI_FLAG_MSI_USED (1 << 2) /* This irq is already allocated */
/* for a MSI/MSI-X interrupt */
u_int gi_flags;
};
static u_int gic_irq_cpu;
static int arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc);
#ifdef SMP
static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
static u_int sgi_first_unused = GIC_FIRST_SGI;
#endif
#define GIC_INTR_ISRC(sc, irq) (&sc->gic_irqs[irq].gi_isrc)
#else /* !INTRNG */
static struct ofw_compat_data compat_data[] = {
{"arm,gic", true}, /* Non-standard, used in FreeBSD dts. */
{"arm,gic-400", true},
{"arm,cortex-a15-gic", true},
{"arm,cortex-a9-gic", true},
{"arm,cortex-a7-gic", true},
{"arm,arm11mp-gic", true},
{"brcm,brahma-b15-gic", true},
{"qcom,msm-qgic2", true},
{NULL, false}
};
#endif
static struct resource_spec arm_gic_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Distributor registers */
{ SYS_RES_MEMORY, 1, RF_ACTIVE }, /* CPU Interrupt Intf. registers */
#ifdef INTRNG
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_OPTIONAL }, /* Parent interrupt */
#endif
{ -1, 0 }
};
#if defined(__arm__) && defined(INVARIANTS)
static int gic_debug_spurious = 1;
#else
static int gic_debug_spurious = 0;
#endif
TUNABLE_INT("hw.gic.debug_spurious", &gic_debug_spurious);
static u_int arm_gic_map[MAXCPU];
static struct arm_gic_softc *gic_sc = NULL;
#define gic_c_read_4(_sc, _reg) \
bus_space_read_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg))
#define gic_c_write_4(_sc, _reg, _val) \
bus_space_write_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg), (_val))
#define gic_d_read_4(_sc, _reg) \
bus_space_read_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg))
#define gic_d_write_1(_sc, _reg, _val) \
bus_space_write_1((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val))
#define gic_d_write_4(_sc, _reg, _val) \
bus_space_write_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val))
#ifndef INTRNG
static int gic_config_irq(int irq, enum intr_trigger trig,
enum intr_polarity pol);
static void gic_post_filter(void *);
#endif
#ifdef INTRNG
static inline void
gic_irq_unmask(struct arm_gic_softc *sc, u_int irq)
{
gic_d_write_4(sc, GICD_ISENABLER(irq), GICD_I_MASK(irq));
}
static inline void
gic_irq_mask(struct arm_gic_softc *sc, u_int irq)
{
gic_d_write_4(sc, GICD_ICENABLER(irq), GICD_I_MASK(irq));
}
#endif
static uint8_t
gic_cpu_mask(struct arm_gic_softc *sc)
{
uint32_t mask;
int i;
/* Read the current cpuid mask by reading ITARGETSR{0..7} */
for (i = 0; i < 8; i++) {
mask = gic_d_read_4(sc, GICD_ITARGETSR(4 * i));
if (mask != 0)
break;
}
/* No mask found, assume we are on CPU interface 0 */
if (mask == 0)
return (1);
/* Collect the mask in the lower byte */
mask |= mask >> 16;
mask |= mask >> 8;
return (mask);
}
#ifdef SMP
#ifdef INTRNG
static void
arm_gic_init_secondary(device_t dev)
{
struct arm_gic_softc *sc = device_get_softc(dev);
u_int irq, cpu;
/* Set the mask so we can find this CPU to send it IPIs */
cpu = PCPU_GET(cpuid);
arm_gic_map[cpu] = gic_cpu_mask(sc);
for (irq = 0; irq < sc->nirqs; irq += 4)
gic_d_write_4(sc, GICD_IPRIORITYR(irq), 0);
/* Set all the interrupts to be in Group 0 (secure) */
for (irq = 0; GIC_SUPPORT_SECEXT(sc) && irq < sc->nirqs; irq += 32) {
gic_d_write_4(sc, GICD_IGROUPR(irq), 0);
}
/* Enable CPU interface */
gic_c_write_4(sc, GICC_CTLR, 1);
/* Set priority mask register. */
gic_c_write_4(sc, GICC_PMR, 0xff);
/* Enable interrupt distribution */
gic_d_write_4(sc, GICD_CTLR, 0x01);
/* Unmask attached SGI interrupts. */
for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++)
if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
gic_irq_unmask(sc, irq);
/* Unmask attached PPI interrupts. */
for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++)
if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
gic_irq_unmask(sc, irq);
}
#else
static void
arm_gic_init_secondary(device_t dev)
{
struct arm_gic_softc *sc = device_get_softc(dev);
int i;
/* Set the mask so we can find this CPU to send it IPIs */
arm_gic_map[PCPU_GET(cpuid)] = gic_cpu_mask(sc);
for (i = 0; i < sc->nirqs; i += 4)
gic_d_write_4(sc, GICD_IPRIORITYR(i), 0);
/* Set all the interrupts to be in Group 0 (secure) */
for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) {
gic_d_write_4(sc, GICD_IGROUPR(i), 0);
}
/* Enable CPU interface */
gic_c_write_4(sc, GICC_CTLR, 1);
/* Set priority mask register. */
gic_c_write_4(sc, GICC_PMR, 0xff);
/* Enable interrupt distribution */
gic_d_write_4(sc, GICD_CTLR, 0x01);
/*
* Activate the timer interrupts: virtual, secure, and non-secure.
*/
gic_d_write_4(sc, GICD_ISENABLER(27), GICD_I_MASK(27));
gic_d_write_4(sc, GICD_ISENABLER(29), GICD_I_MASK(29));
gic_d_write_4(sc, GICD_ISENABLER(30), GICD_I_MASK(30));
}
#endif /* INTRNG */
#endif /* SMP */
#ifndef INTRNG
int
gic_decode_fdt(phandle_t iparent, pcell_t *intr, int *interrupt,
int *trig, int *pol)
{
static u_int num_intr_cells;
static phandle_t self;
struct ofw_compat_data *ocd;
if (self == 0) {
for (ocd = compat_data; ocd->ocd_str != NULL; ocd++) {
if (ofw_bus_node_is_compatible(iparent, ocd->ocd_str)) {
self = iparent;
break;
}
}
}
if (self != iparent)
return (ENXIO);
if (num_intr_cells == 0) {
if (OF_searchencprop(OF_node_from_xref(iparent),
"#interrupt-cells", &num_intr_cells,
sizeof(num_intr_cells)) == -1) {
num_intr_cells = 1;
}
}
if (num_intr_cells == 1) {
*interrupt = fdt32_to_cpu(intr[0]);
*trig = INTR_TRIGGER_CONFORM;
*pol = INTR_POLARITY_CONFORM;
} else {
if (fdt32_to_cpu(intr[0]) == 0)
*interrupt = fdt32_to_cpu(intr[1]) + GIC_FIRST_SPI;
else
*interrupt = fdt32_to_cpu(intr[1]) + GIC_FIRST_PPI;
/*
* In intr[2], bits[3:0] are trigger type and level flags.
* 1 = low-to-high edge triggered
* 2 = high-to-low edge triggered
* 4 = active high level-sensitive
* 8 = active low level-sensitive
* The hardware only supports active-high-level or rising-edge
* for SPIs
*/
if (*interrupt >= GIC_FIRST_SPI &&
fdt32_to_cpu(intr[2]) & 0x0a) {
printf("unsupported trigger/polarity configuration "
"0x%02x\n", fdt32_to_cpu(intr[2]) & 0x0f);
}
*pol = INTR_POLARITY_CONFORM;
if (fdt32_to_cpu(intr[2]) & 0x03)
*trig = INTR_TRIGGER_EDGE;
else
*trig = INTR_TRIGGER_LEVEL;
}
return (0);
}
#endif
#ifdef INTRNG
static int
arm_gic_register_isrcs(struct arm_gic_softc *sc, uint32_t num)
{
int error;
uint32_t irq;
struct gic_irqsrc *irqs;
struct intr_irqsrc *isrc;
const char *name;
irqs = malloc(num * sizeof(struct gic_irqsrc), M_DEVBUF,
M_WAITOK | M_ZERO);
name = device_get_nameunit(sc->gic_dev);
for (irq = 0; irq < num; irq++) {
irqs[irq].gi_irq = irq;
irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
isrc = &irqs[irq].gi_isrc;
if (irq <= GIC_LAST_SGI) {
error = intr_isrc_register(isrc, sc->gic_dev,
INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
} else if (irq <= GIC_LAST_PPI) {
error = intr_isrc_register(isrc, sc->gic_dev,
INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
} else {
error = intr_isrc_register(isrc, sc->gic_dev, 0,
"%s,s%u", name, irq - GIC_FIRST_SPI);
}
if (error != 0) {
/* XXX call intr_isrc_deregister() */
free(irqs, M_DEVBUF);
return (error);
}
}
sc->gic_irqs = irqs;
sc->nirqs = num;
return (0);
}
static void
arm_gic_reserve_msi_range(device_t dev, u_int start, u_int count)
{
struct arm_gic_softc *sc;
int i;
sc = device_get_softc(dev);
KASSERT((start + count) < sc->nirqs,
("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
start, count, sc->nirqs));
for (i = 0; i < count; i++) {
KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
("%s: MSI interrupt %d already has a handler", __func__,
count + i));
KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
("%s: MSI interrupt %d already has a polarity", __func__,
count + i));
KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
("%s: MSI interrupt %d already has a trigger", __func__,
count + i));
sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
}
}
#endif
int
arm_gic_attach(device_t dev)
{
struct arm_gic_softc *sc;
int i;
uint32_t icciidr, mask, nirqs;
if (gic_sc)
return (ENXIO);
sc = device_get_softc(dev);
if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) {
device_printf(dev, "could not allocate resources\n");
return (ENXIO);
}
sc->gic_dev = dev;
gic_sc = sc;
/* Initialize mutex */
mtx_init(&sc->mutex, "GIC lock", "", MTX_SPIN);
/* Distributor Interface */
sc->gic_d_bst = rman_get_bustag(sc->gic_res[0]);
sc->gic_d_bsh = rman_get_bushandle(sc->gic_res[0]);
/* CPU Interface */
sc->gic_c_bst = rman_get_bustag(sc->gic_res[1]);
sc->gic_c_bsh = rman_get_bushandle(sc->gic_res[1]);
/* Disable interrupt forwarding to the CPU interface */
gic_d_write_4(sc, GICD_CTLR, 0x00);
/* Get the number of interrupts */
sc->typer = gic_d_read_4(sc, GICD_TYPER);
nirqs = GICD_TYPER_I_NUM(sc->typer);
#ifdef INTRNG
if (arm_gic_register_isrcs(sc, nirqs)) {
device_printf(dev, "could not register irqs\n");
goto cleanup;
}
#else
sc->nirqs = nirqs;
/* Set up function pointers */
arm_post_filter = gic_post_filter;
arm_config_irq = gic_config_irq;
#endif
icciidr = gic_c_read_4(sc, GICC_IIDR);
device_printf(dev,
"pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n",
GICD_IIDR_PROD(icciidr), GICD_IIDR_VAR(icciidr),
GICD_IIDR_REV(icciidr), GICD_IIDR_IMPL(icciidr), sc->nirqs);
#ifdef INTRNG
sc->gic_iidr = icciidr;
#endif
/* Set all global interrupts to be level triggered, active low. */
for (i = 32; i < sc->nirqs; i += 16) {
gic_d_write_4(sc, GICD_ICFGR(i), GIC_DEFAULT_ICFGR_INIT);
}
/* Disable all interrupts. */
for (i = 32; i < sc->nirqs; i += 32) {
gic_d_write_4(sc, GICD_ICENABLER(i), 0xFFFFFFFF);
}
/* Find the current cpu mask */
mask = gic_cpu_mask(sc);
/* Set the mask so we can find this CPU to send it IPIs */
arm_gic_map[PCPU_GET(cpuid)] = mask;
/* Set all four targets to this cpu */
mask |= mask << 8;
mask |= mask << 16;
for (i = 0; i < sc->nirqs; i += 4) {
gic_d_write_4(sc, GICD_IPRIORITYR(i), 0);
if (i > 32) {
gic_d_write_4(sc, GICD_ITARGETSR(i), mask);
}
}
/* Set all the interrupts to be in Group 0 (secure) */
for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) {
gic_d_write_4(sc, GICD_IGROUPR(i), 0);
}
/* Enable CPU interface */
gic_c_write_4(sc, GICC_CTLR, 1);
/* Set priority mask register. */
gic_c_write_4(sc, GICC_PMR, 0xff);
/* Enable interrupt distribution */
gic_d_write_4(sc, GICD_CTLR, 0x01);
return (0);
#ifdef INTRNG
cleanup:
arm_gic_detach(dev);
return(ENXIO);
#endif
}
int
arm_gic_detach(device_t dev)
{
#ifdef INTRNG
struct arm_gic_softc *sc;
sc = device_get_softc(dev);
if (sc->gic_irqs != NULL)
free(sc->gic_irqs, M_DEVBUF);
bus_release_resources(dev, arm_gic_spec, sc->gic_res);
#endif
return (0);
}
#ifdef INTRNG
static int
arm_gic_print_child(device_t bus, device_t child)
{
struct resource_list *rl;
int rv;
rv = bus_print_child_header(bus, child);
rl = BUS_GET_RESOURCE_LIST(bus, child);
if (rl != NULL) {
rv += resource_list_print_type(rl, "mem", SYS_RES_MEMORY,
"%#jx");
rv += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
}
rv += bus_print_child_footer(bus, child);
return (rv);
}
static struct resource *
arm_gic_alloc_resource(device_t bus, device_t child, int type, int *rid,
rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
struct arm_gic_softc *sc;
struct resource_list_entry *rle;
struct resource_list *rl;
int j;
KASSERT(type == SYS_RES_MEMORY, ("Invalid resoure type %x", type));
sc = device_get_softc(bus);
/*
* Request for the default allocation with a given rid: use resource
* list stored in the local device info.
*/
if (RMAN_IS_DEFAULT_RANGE(start, end)) {
rl = BUS_GET_RESOURCE_LIST(bus, child);
if (type == SYS_RES_IOPORT)
type = SYS_RES_MEMORY;
rle = resource_list_find(rl, type, *rid);
if (rle == NULL) {
if (bootverbose)
device_printf(bus, "no default resources for "
"rid = %d, type = %d\n", *rid, type);
return (NULL);
}
start = rle->start;
end = rle->end;
count = rle->count;
}
/* Remap through ranges property */
for (j = 0; j < sc->nranges; j++) {
if (start >= sc->ranges[j].bus && end <
sc->ranges[j].bus + sc->ranges[j].size) {
start -= sc->ranges[j].bus;
start += sc->ranges[j].host;
end -= sc->ranges[j].bus;
end += sc->ranges[j].host;
break;
}
}
if (j == sc->nranges && sc->nranges != 0) {
if (bootverbose)
device_printf(bus, "Could not map resource "
"%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end);
return (NULL);
}
return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
count, flags));
}
static int
arm_gic_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
{
struct arm_gic_softc *sc;
sc = device_get_softc(dev);
switch(which) {
case GIC_IVAR_HW_REV:
KASSERT(GICD_IIDR_VAR(sc->gic_iidr) < 3 &&
GICD_IIDR_VAR(sc->gic_iidr) != 0,
("arm_gic_read_ivar: Unknown IIDR revision %u (%.08x)",
GICD_IIDR_VAR(sc->gic_iidr), sc->gic_iidr));
*result = GICD_IIDR_VAR(sc->gic_iidr);
return (0);
case GIC_IVAR_BUS:
KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
("arm_gic_read_ivar: Unknown bus type"));
KASSERT(sc->gic_bus <= GIC_BUS_MAX,
("arm_gic_read_ivar: Invalid bus type %u", sc->gic_bus));
*result = sc->gic_bus;
return (0);
}
return (ENOENT);
}
int
arm_gic_intr(void *arg)
{
struct arm_gic_softc *sc = arg;
struct gic_irqsrc *gi;
uint32_t irq_active_reg, irq;
struct trapframe *tf;
irq_active_reg = gic_c_read_4(sc, GICC_IAR);
irq = irq_active_reg & 0x3FF;
/*
* 1. We do EOI here because recent read value from active interrupt
* register must be used for it. Another approach is to save this
* value into associated interrupt source.
* 2. EOI must be done on same CPU where interrupt has fired. Thus
* we must ensure that interrupted thread does not migrate to
* another CPU.
* 3. EOI cannot be delayed by any preemption which could happen on
* critical_exit() used in MI intr code, when interrupt thread is
* scheduled. See next point.
* 4. IPI_RENDEZVOUS assumes that no preemption is permitted during
* an action and any use of critical_exit() could break this
* assumption. See comments within smp_rendezvous_action().
* 5. We always return FILTER_HANDLED as this is an interrupt
* controller dispatch function. Otherwise, in cascaded interrupt
* case, the whole interrupt subtree would be masked.
*/
if (irq >= sc->nirqs) {
if (gic_debug_spurious)
device_printf(sc->gic_dev,
"Spurious interrupt detected: last irq: %d on CPU%d\n",
sc->last_irq[PCPU_GET(cpuid)], PCPU_GET(cpuid));
return (FILTER_HANDLED);
}
tf = curthread->td_intr_frame;
dispatch_irq:
gi = sc->gic_irqs + irq;
/*
* Note that GIC_FIRST_SGI is zero and is not used in 'if' statement
* as compiler complains that comparing u_int >= 0 is always true.
*/
if (irq <= GIC_LAST_SGI) {
#ifdef SMP
/* Call EOI for all IPI before dispatch. */
gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
goto next_irq;
#else
device_printf(sc->gic_dev, "SGI %u on UP system detected\n",
irq - GIC_FIRST_SGI);
gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
goto next_irq;
#endif
}
if (gic_debug_spurious)
sc->last_irq[PCPU_GET(cpuid)] = irq;
if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
gic_irq_mask(sc, irq);
if ((gi->gi_flags & GI_FLAG_EARLY_EOI) != GI_FLAG_EARLY_EOI)
gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
device_printf(sc->gic_dev, "Stray irq %u disabled\n", irq);
}
next_irq:
arm_irq_memory_barrier(irq);
irq_active_reg = gic_c_read_4(sc, GICC_IAR);
irq = irq_active_reg & 0x3FF;
if (irq < sc->nirqs)
goto dispatch_irq;
return (FILTER_HANDLED);
}
static void
gic_config(struct arm_gic_softc *sc, u_int irq, enum intr_trigger trig,
enum intr_polarity pol)
{
uint32_t reg;
uint32_t mask;
if (irq < GIC_FIRST_SPI)
return;
mtx_lock_spin(&sc->mutex);
reg = gic_d_read_4(sc, GICD_ICFGR(irq));
mask = (reg >> 2*(irq % 16)) & 0x3;
if (pol == INTR_POLARITY_LOW) {
mask &= ~GICD_ICFGR_POL_MASK;
mask |= GICD_ICFGR_POL_LOW;
} else if (pol == INTR_POLARITY_HIGH) {
mask &= ~GICD_ICFGR_POL_MASK;
mask |= GICD_ICFGR_POL_HIGH;
}
if (trig == INTR_TRIGGER_LEVEL) {
mask &= ~GICD_ICFGR_TRIG_MASK;
mask |= GICD_ICFGR_TRIG_LVL;
} else if (trig == INTR_TRIGGER_EDGE) {
mask &= ~GICD_ICFGR_TRIG_MASK;
mask |= GICD_ICFGR_TRIG_EDGE;
}
/* Set mask */
reg = reg & ~(0x3 << 2*(irq % 16));
reg = reg | (mask << 2*(irq % 16));
gic_d_write_4(sc, GICD_ICFGR(irq), reg);
mtx_unlock_spin(&sc->mutex);
}
static int
gic_bind(struct arm_gic_softc *sc, u_int irq, cpuset_t *cpus)
{
uint32_t cpu, end, mask;
end = min(mp_ncpus, 8);
for (cpu = end; cpu < MAXCPU; cpu++)
if (CPU_ISSET(cpu, cpus))
return (EINVAL);
for (mask = 0, cpu = 0; cpu < end; cpu++)
if (CPU_ISSET(cpu, cpus))
mask |= arm_gic_map[cpu];
gic_d_write_1(sc, GICD_ITARGETSR(0) + irq, mask);
return (0);
}
#ifdef FDT
static int
gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
enum intr_polarity *polp, enum intr_trigger *trigp)
{
if (ncells == 1) {
*irqp = cells[0];
*polp = INTR_POLARITY_CONFORM;
*trigp = INTR_TRIGGER_CONFORM;
return (0);
}
if (ncells == 3) {
u_int irq, tripol;
/*
* The 1st cell is the interrupt type:
* 0 = SPI
* 1 = PPI
* The 2nd cell contains the interrupt number:
* [0 - 987] for SPI
* [0 - 15] for PPI
* The 3rd cell is the flags, encoded as follows:
* bits[3:0] trigger type and level flags
* 1 = low-to-high edge triggered
* 2 = high-to-low edge triggered
* 4 = active high level-sensitive
* 8 = active low level-sensitive
* bits[15:8] PPI interrupt cpu mask
* Each bit corresponds to each of the 8 possible cpus
* attached to the GIC. A bit set to '1' indicated
* the interrupt is wired to that CPU.
*/
switch (cells[0]) {
case 0:
irq = GIC_FIRST_SPI + cells[1];
/* SPI irq is checked later. */
break;
case 1:
irq = GIC_FIRST_PPI + cells[1];
if (irq > GIC_LAST_PPI) {
device_printf(dev, "unsupported PPI interrupt "
"number %u\n", cells[1]);
return (EINVAL);
}
break;
default:
device_printf(dev, "unsupported interrupt type "
"configuration %u\n", cells[0]);
return (EINVAL);
}
tripol = cells[2] & 0xff;
if (tripol & 0xf0 || (tripol & FDT_INTR_LOW_MASK &&
cells[0] == 0))
device_printf(dev, "unsupported trigger/polarity "
"configuration 0x%02x\n", tripol);
*irqp = irq;
*polp = INTR_POLARITY_CONFORM;
*trigp = tripol & FDT_INTR_EDGE_MASK ?
INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL;
return (0);
}
return (EINVAL);
}
#endif
static int
gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
enum intr_polarity *polp, enum intr_trigger *trigp)
{
struct gic_irqsrc *gi;
/* Map a non-GICv2m MSI */
gi = (struct gic_irqsrc *)msi_data->isrc;
if (gi == NULL)
return (ENXIO);
*irqp = gi->gi_irq;
/* MSI/MSI-X interrupts are always edge triggered with high polarity */
*polp = INTR_POLARITY_HIGH;
*trigp = INTR_TRIGGER_EDGE;
return (0);
}
static int
gic_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
enum intr_polarity *polp, enum intr_trigger *trigp)
{
u_int irq;
enum intr_polarity pol;
enum intr_trigger trig;
struct arm_gic_softc *sc;
struct intr_map_data_msi *dam;
#ifdef FDT
struct intr_map_data_fdt *daf;
#endif
sc = device_get_softc(dev);
switch (data->type) {
#ifdef FDT
case INTR_MAP_DATA_FDT:
daf = (struct intr_map_data_fdt *)data;
if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
&trig) != 0)
return (EINVAL);
KASSERT(irq >= sc->nirqs ||
(sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) == 0,
("%s: Attempting to map a MSI interrupt from FDT",
__func__));
break;
#endif
case INTR_MAP_DATA_MSI:
/* Non-GICv2m MSI */
dam = (struct intr_map_data_msi *)data;
if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
return (EINVAL);
break;
default:
return (ENOTSUP);
}
if (irq >= sc->nirqs)
return (EINVAL);
if (pol != INTR_POLARITY_CONFORM && pol != INTR_POLARITY_LOW &&
pol != INTR_POLARITY_HIGH)
return (EINVAL);
if (trig != INTR_TRIGGER_CONFORM && trig != INTR_TRIGGER_EDGE &&
trig != INTR_TRIGGER_LEVEL)
return (EINVAL);
*irqp = irq;
if (polp != NULL)
*polp = pol;
if (trigp != NULL)
*trigp = trig;
return (0);
}
static int
arm_gic_map_intr(device_t dev, struct intr_map_data *data,
struct intr_irqsrc **isrcp)
{
int error;
u_int irq;
struct arm_gic_softc *sc;
error = gic_map_intr(dev, data, &irq, NULL, NULL);
if (error == 0) {
sc = device_get_softc(dev);
*isrcp = GIC_INTR_ISRC(sc, irq);
}
return (error);
}
static int
arm_gic_setup_intr(device_t dev, struct intr_irqsrc *isrc,
struct resource *res, struct intr_map_data *data)
{
struct arm_gic_softc *sc = device_get_softc(dev);
struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
enum intr_trigger trig;
enum intr_polarity pol;
if ((gi->gi_flags & GI_FLAG_MSI) == GI_FLAG_MSI) {
/* GICv2m MSI */
pol = gi->gi_pol;
trig = gi->gi_trig;
KASSERT(pol == INTR_POLARITY_HIGH,
("%s: MSI interrupts must be active-high", __func__));
KASSERT(trig == INTR_TRIGGER_EDGE,
("%s: MSI interrupts must be edge triggered", __func__));
} else if (data != NULL) {
u_int irq;
/* Get config for resource. */
if (gic_map_intr(dev, data, &irq, &pol, &trig) ||
gi->gi_irq != irq)
return (EINVAL);
} else {
pol = INTR_POLARITY_CONFORM;
trig = INTR_TRIGGER_CONFORM;
}
/* Compare config if this is not first setup. */
if (isrc->isrc_handlers != 0) {
if ((pol != INTR_POLARITY_CONFORM && pol != gi->gi_pol) ||
(trig != INTR_TRIGGER_CONFORM && trig != gi->gi_trig))
return (EINVAL);
else
return (0);
}
/* For MSI/MSI-X we should have already configured these */
if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
if (pol == INTR_POLARITY_CONFORM)
pol = INTR_POLARITY_LOW; /* just pick some */
if (trig == INTR_TRIGGER_CONFORM)
trig = INTR_TRIGGER_EDGE; /* just pick some */
gi->gi_pol = pol;
gi->gi_trig = trig;
/* Edge triggered interrupts need an early EOI sent */
if (gi->gi_trig == INTR_TRIGGER_EDGE)
gi->gi_flags |= GI_FLAG_EARLY_EOI;
}
/*
* XXX - In case that per CPU interrupt is going to be enabled in time
* when SMP is already started, we need some IPI call which
* enables it on others CPUs. Further, it's more complicated as
* pic_enable_source() and pic_disable_source() should act on
* per CPU basis only. Thus, it should be solved here somehow.
*/
if (isrc->isrc_flags & INTR_ISRCF_PPI)
CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
gic_config(sc, gi->gi_irq, gi->gi_trig, gi->gi_pol);
arm_gic_bind_intr(dev, isrc);
return (0);
}
static int
arm_gic_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
struct resource *res, struct intr_map_data *data)
{
struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
gi->gi_pol = INTR_POLARITY_CONFORM;
gi->gi_trig = INTR_TRIGGER_CONFORM;
}
return (0);
}
static void
arm_gic_enable_intr(device_t dev, struct intr_irqsrc *isrc)
{
struct arm_gic_softc *sc = device_get_softc(dev);
struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
arm_irq_memory_barrier(gi->gi_irq);
gic_irq_unmask(sc, gi->gi_irq);
}
static void
arm_gic_disable_intr(device_t dev, struct intr_irqsrc *isrc)
{
struct arm_gic_softc *sc = device_get_softc(dev);
struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
gic_irq_mask(sc, gi->gi_irq);
}
static void
arm_gic_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
{
struct arm_gic_softc *sc = device_get_softc(dev);
struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
arm_gic_disable_intr(dev, isrc);
gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
}
static void
arm_gic_post_ithread(device_t dev, struct intr_irqsrc *isrc)
{
arm_irq_memory_barrier(0);
arm_gic_enable_intr(dev, isrc);
}
static void
arm_gic_post_filter(device_t dev, struct intr_irqsrc *isrc)
{
struct arm_gic_softc *sc = device_get_softc(dev);
struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
/* EOI for edge-triggered done earlier. */
if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
return;
arm_irq_memory_barrier(0);
gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
}
static int
arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc)
{
struct arm_gic_softc *sc = device_get_softc(dev);
struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
if (gi->gi_irq < GIC_FIRST_SPI)
return (EINVAL);
if (CPU_EMPTY(&isrc->isrc_cpu)) {
gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
}
return (gic_bind(sc, gi->gi_irq, &isrc->isrc_cpu));
}
#ifdef SMP
static void
arm_gic_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
u_int ipi)
{
struct arm_gic_softc *sc = device_get_softc(dev);
struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
uint32_t val = 0, i;
for (i = 0; i < MAXCPU; i++)
if (CPU_ISSET(i, &cpus))
val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT;
gic_d_write_4(sc, GICD_SGIR, val | gi->gi_irq);
}
static int
arm_gic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
{
struct intr_irqsrc *isrc;
struct arm_gic_softc *sc = device_get_softc(dev);
if (sgi_first_unused > GIC_LAST_SGI)
return (ENOSPC);
isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
sgi_to_ipi[sgi_first_unused++] = ipi;
CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
*isrcp = isrc;
return (0);
}
#endif
#else
static int
arm_gic_next_irq(struct arm_gic_softc *sc, int last_irq)
{
uint32_t active_irq;
active_irq = gic_c_read_4(sc, GICC_IAR);
/*
* Immediately EOIR the SGIs, because doing so requires the other
* bits (ie CPU number), not just the IRQ number, and we do not
* have this information later.
*/
if ((active_irq & 0x3ff) <= GIC_LAST_SGI)
gic_c_write_4(sc, GICC_EOIR, active_irq);
active_irq &= 0x3FF;
if (active_irq == 0x3FF) {
if (last_irq == -1)
device_printf(sc->gic_dev,
"Spurious interrupt detected\n");
return -1;
}
return active_irq;
}
static int
arm_gic_config(device_t dev, int irq, enum intr_trigger trig,
enum intr_polarity pol)
{
struct arm_gic_softc *sc = device_get_softc(dev);
uint32_t reg;
uint32_t mask;
/* Function is public-accessible, so validate input arguments */
if ((irq < 0) || (irq >= sc->nirqs))
goto invalid_args;
if ((trig != INTR_TRIGGER_EDGE) && (trig != INTR_TRIGGER_LEVEL) &&
(trig != INTR_TRIGGER_CONFORM))
goto invalid_args;
if ((pol != INTR_POLARITY_HIGH) && (pol != INTR_POLARITY_LOW) &&
(pol != INTR_POLARITY_CONFORM))
goto invalid_args;
mtx_lock_spin(&sc->mutex);
reg = gic_d_read_4(sc, GICD_ICFGR(irq));
mask = (reg >> 2*(irq % 16)) & 0x3;
if (pol == INTR_POLARITY_LOW) {
mask &= ~GICD_ICFGR_POL_MASK;
mask |= GICD_ICFGR_POL_LOW;
} else if (pol == INTR_POLARITY_HIGH) {
mask &= ~GICD_ICFGR_POL_MASK;
mask |= GICD_ICFGR_POL_HIGH;
}
if (trig == INTR_TRIGGER_LEVEL) {
mask &= ~GICD_ICFGR_TRIG_MASK;
mask |= GICD_ICFGR_TRIG_LVL;
} else if (trig == INTR_TRIGGER_EDGE) {
mask &= ~GICD_ICFGR_TRIG_MASK;
mask |= GICD_ICFGR_TRIG_EDGE;
}
/* Set mask */
reg = reg & ~(0x3 << 2*(irq % 16));
reg = reg | (mask << 2*(irq % 16));
gic_d_write_4(sc, GICD_ICFGR(irq), reg);
mtx_unlock_spin(&sc->mutex);
return (0);
invalid_args:
device_printf(dev, "gic_config_irg, invalid parameters\n");
return (EINVAL);
}
static void
arm_gic_mask(device_t dev, int irq)
{
struct arm_gic_softc *sc = device_get_softc(dev);
gic_d_write_4(sc, GICD_ICENABLER(irq), (1UL << (irq & 0x1F)));
gic_c_write_4(sc, GICC_EOIR, irq); /* XXX - not allowed */
}
static void
arm_gic_unmask(device_t dev, int irq)
{
struct arm_gic_softc *sc = device_get_softc(dev);
if (irq > GIC_LAST_SGI)
arm_irq_memory_barrier(irq);
gic_d_write_4(sc, GICD_ISENABLER(irq), (1UL << (irq & 0x1F)));
}
#ifdef SMP
static void
arm_gic_ipi_send(device_t dev, cpuset_t cpus, u_int ipi)
{
struct arm_gic_softc *sc = device_get_softc(dev);
uint32_t val = 0, i;
for (i = 0; i < MAXCPU; i++)
if (CPU_ISSET(i, &cpus))
val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT;
gic_d_write_4(sc, GICD_SGIR, val | ipi);
}
static int
arm_gic_ipi_read(device_t dev, int i)
{
if (i != -1) {
/*
* The intr code will automagically give the frame pointer
* if the interrupt argument is 0.
*/
if ((unsigned int)i > 16)
return (0);
return (i);
}
return (0x3ff);
}
static void
arm_gic_ipi_clear(device_t dev, int ipi)
{
/* no-op */
}
#endif
static void
gic_post_filter(void *arg)
{
struct arm_gic_softc *sc = gic_sc;
uintptr_t irq = (uintptr_t) arg;
if (irq > GIC_LAST_SGI)
arm_irq_memory_barrier(irq);
gic_c_write_4(sc, GICC_EOIR, irq);
}
static int
gic_config_irq(int irq, enum intr_trigger trig, enum intr_polarity pol)
{
return (arm_gic_config(gic_sc->gic_dev, irq, trig, pol));
}
void
arm_mask_irq(uintptr_t nb)
{
arm_gic_mask(gic_sc->gic_dev, nb);
}
void
arm_unmask_irq(uintptr_t nb)
{
arm_gic_unmask(gic_sc->gic_dev, nb);
}
int
arm_get_next_irq(int last_irq)
{
return (arm_gic_next_irq(gic_sc, last_irq));
}
#ifdef SMP
void
intr_pic_init_secondary(void)
{
arm_gic_init_secondary(gic_sc->gic_dev);
}
void
pic_ipi_send(cpuset_t cpus, u_int ipi)
{
arm_gic_ipi_send(gic_sc->gic_dev, cpus, ipi);
}
int
pic_ipi_read(int i)
{
return (arm_gic_ipi_read(gic_sc->gic_dev, i));
}
void
pic_ipi_clear(int ipi)
{
arm_gic_ipi_clear(gic_sc->gic_dev, ipi);
}
#endif
#endif /* INTRNG */
static device_method_t arm_gic_methods[] = {
#ifdef INTRNG
/* Bus interface */
DEVMETHOD(bus_print_child, arm_gic_print_child),
DEVMETHOD(bus_add_child, bus_generic_add_child),
DEVMETHOD(bus_alloc_resource, arm_gic_alloc_resource),
DEVMETHOD(bus_release_resource, bus_generic_release_resource),
DEVMETHOD(bus_activate_resource,bus_generic_activate_resource),
DEVMETHOD(bus_read_ivar, arm_gic_read_ivar),
/* Interrupt controller interface */
DEVMETHOD(pic_disable_intr, arm_gic_disable_intr),
DEVMETHOD(pic_enable_intr, arm_gic_enable_intr),
DEVMETHOD(pic_map_intr, arm_gic_map_intr),
DEVMETHOD(pic_setup_intr, arm_gic_setup_intr),
DEVMETHOD(pic_teardown_intr, arm_gic_teardown_intr),
DEVMETHOD(pic_post_filter, arm_gic_post_filter),
DEVMETHOD(pic_post_ithread, arm_gic_post_ithread),
DEVMETHOD(pic_pre_ithread, arm_gic_pre_ithread),
#ifdef SMP
DEVMETHOD(pic_bind_intr, arm_gic_bind_intr),
DEVMETHOD(pic_init_secondary, arm_gic_init_secondary),
DEVMETHOD(pic_ipi_send, arm_gic_ipi_send),
DEVMETHOD(pic_ipi_setup, arm_gic_ipi_setup),
#endif
#endif
{ 0, 0 }
};
DEFINE_CLASS_0(gic, arm_gic_driver, arm_gic_methods,
sizeof(struct arm_gic_softc));
#ifdef INTRNG
/*
* GICv2m support -- the GICv2 MSI/MSI-X controller.
*/
#define GICV2M_MSI_TYPER 0x008
#define MSI_TYPER_SPI_BASE(x) (((x) >> 16) & 0x3ff)
#define MSI_TYPER_SPI_COUNT(x) (((x) >> 0) & 0x3ff)
#define GICv2M_MSI_SETSPI_NS 0x040
#define GICV2M_MSI_IIDR 0xFCC
int
arm_gicv2m_attach(device_t dev)
{
struct arm_gicv2m_softc *sc;
- struct arm_gic_softc *psc;
uint32_t typer;
int rid;
- psc = device_get_softc(device_get_parent(dev));
sc = device_get_softc(dev);
rid = 0;
sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->sc_mem == NULL) {
device_printf(dev, "Unable to allocate resources\n");
return (ENXIO);
}
typer = bus_read_4(sc->sc_mem, GICV2M_MSI_TYPER);
sc->sc_spi_start = MSI_TYPER_SPI_BASE(typer);
sc->sc_spi_count = MSI_TYPER_SPI_COUNT(typer);
sc->sc_spi_end = sc->sc_spi_start + sc->sc_spi_count;
/* Reserve these interrupts for MSI/MSI-X use */
arm_gic_reserve_msi_range(device_get_parent(dev), sc->sc_spi_start,
sc->sc_spi_count);
mtx_init(&sc->sc_mutex, "GICv2m lock", "", MTX_DEF);
intr_msi_register(dev, sc->sc_xref);
if (bootverbose)
device_printf(dev, "using spi %u to %u\n", sc->sc_spi_start,
sc->sc_spi_start + sc->sc_spi_count - 1);
return (0);
}
static int
arm_gicv2m_alloc_msi(device_t dev, device_t child, int count, int maxcount,
device_t *pic, struct intr_irqsrc **srcs)
{
struct arm_gic_softc *psc;
struct arm_gicv2m_softc *sc;
int i, irq, end_irq;
bool found;
KASSERT(powerof2(count), ("%s: bad count", __func__));
KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
psc = device_get_softc(device_get_parent(dev));
sc = device_get_softc(dev);
mtx_lock(&sc->sc_mutex);
found = false;
for (irq = sc->sc_spi_start; irq < sc->sc_spi_end; irq++) {
/* Start on an aligned interrupt */
if ((irq & (maxcount - 1)) != 0)
continue;
/* Assume we found a valid range until shown otherwise */
found = true;
/* Check this range is valid */
for (end_irq = irq; end_irq != irq + count; end_irq++) {
/* No free interrupts */
if (end_irq == sc->sc_spi_end) {
found = false;
break;
}
KASSERT((psc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
("%s: Non-MSI interrupt found", __func__));
/* This is already used */
if ((psc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
GI_FLAG_MSI_USED) {
found = false;
break;
}
}
if (found)
break;
}
/* Not enough interrupts were found */
if (!found || irq == sc->sc_spi_end) {
mtx_unlock(&sc->sc_mutex);
return (ENXIO);
}
for (i = 0; i < count; i++) {
/* Mark the interrupt as used */
psc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
}
mtx_unlock(&sc->sc_mutex);
for (i = 0; i < count; i++)
srcs[i] = (struct intr_irqsrc *)&psc->gic_irqs[irq + i];
*pic = device_get_parent(dev);
return (0);
}
static int
arm_gicv2m_release_msi(device_t dev, device_t child, int count,
struct intr_irqsrc **isrc)
{
struct arm_gicv2m_softc *sc;
struct gic_irqsrc *gi;
int i;
sc = device_get_softc(dev);
mtx_lock(&sc->sc_mutex);
for (i = 0; i < count; i++) {
gi = (struct gic_irqsrc *)isrc[i];
KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
("%s: Trying to release an unused MSI-X interrupt",
__func__));
gi->gi_flags &= ~GI_FLAG_MSI_USED;
}
mtx_unlock(&sc->sc_mutex);
return (0);
}
static int
arm_gicv2m_alloc_msix(device_t dev, device_t child, device_t *pic,
struct intr_irqsrc **isrcp)
{
struct arm_gicv2m_softc *sc;
struct arm_gic_softc *psc;
int irq;
psc = device_get_softc(device_get_parent(dev));
sc = device_get_softc(dev);
mtx_lock(&sc->sc_mutex);
/* Find an unused interrupt */
for (irq = sc->sc_spi_start; irq < sc->sc_spi_end; irq++) {
KASSERT((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
("%s: Non-MSI interrupt found", __func__));
if ((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
break;
}
/* No free interrupt was found */
if (irq == sc->sc_spi_end) {
mtx_unlock(&sc->sc_mutex);
return (ENXIO);
}
/* Mark the interrupt as used */
psc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
mtx_unlock(&sc->sc_mutex);
*isrcp = (struct intr_irqsrc *)&psc->gic_irqs[irq];
*pic = device_get_parent(dev);
return (0);
}
static int
arm_gicv2m_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
{
struct arm_gicv2m_softc *sc;
struct gic_irqsrc *gi;
sc = device_get_softc(dev);
gi = (struct gic_irqsrc *)isrc;
KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
("%s: Trying to release an unused MSI-X interrupt", __func__));
mtx_lock(&sc->sc_mutex);
gi->gi_flags &= ~GI_FLAG_MSI_USED;
mtx_unlock(&sc->sc_mutex);
return (0);
}
static int
arm_gicv2m_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
uint64_t *addr, uint32_t *data)
{
struct arm_gicv2m_softc *sc = device_get_softc(dev);
struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
*addr = vtophys(rman_get_virtual(sc->sc_mem)) + GICv2M_MSI_SETSPI_NS;
*data = gi->gi_irq;
return (0);
}
static device_method_t arm_gicv2m_methods[] = {
/* Device interface */
DEVMETHOD(device_attach, arm_gicv2m_attach),
/* MSI/MSI-X */
DEVMETHOD(msi_alloc_msi, arm_gicv2m_alloc_msi),
DEVMETHOD(msi_release_msi, arm_gicv2m_release_msi),
DEVMETHOD(msi_alloc_msix, arm_gicv2m_alloc_msix),
DEVMETHOD(msi_release_msix, arm_gicv2m_release_msix),
DEVMETHOD(msi_map_msi, arm_gicv2m_map_msi),
/* End */
DEVMETHOD_END
};
DEFINE_CLASS_0(gicv2m, arm_gicv2m_driver, arm_gicv2m_methods,
sizeof(struct arm_gicv2m_softc));
#endif
Index: head/sys/arm/broadcom/bcm2835/bcm2835_cpufreq.c
===================================================================
--- head/sys/arm/broadcom/bcm2835/bcm2835_cpufreq.c (revision 327172)
+++ head/sys/arm/broadcom/bcm2835/bcm2835_cpufreq.c (revision 327173)
@@ -1,1640 +1,1642 @@
/*-
* Copyright (C) 2013-2015 Daisuke Aoyama <aoyama@peach.ne.jp>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/cpu.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/sema.h>
#include <sys/sysctl.h>
#include <machine/bus.h>
#include <machine/cpu.h>
#include <machine/intr.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <arm/broadcom/bcm2835/bcm2835_mbox.h>
#include <arm/broadcom/bcm2835/bcm2835_mbox_prop.h>
#include <arm/broadcom/bcm2835/bcm2835_vcbus.h>
#include "cpufreq_if.h"
#include "mbox_if.h"
#ifdef DEBUG
#define DPRINTF(fmt, ...) do { \
printf("%s:%u: ", __func__, __LINE__); \
printf(fmt, ##__VA_ARGS__); \
} while (0)
#else
#define DPRINTF(fmt, ...)
#endif
#define HZ2MHZ(freq) ((freq) / (1000 * 1000))
#define MHZ2HZ(freq) ((freq) * (1000 * 1000))
#ifdef SOC_BCM2835
#define OFFSET2MVOLT(val) (1200 + ((val) * 25))
#define MVOLT2OFFSET(val) (((val) - 1200) / 25)
#define DEFAULT_ARM_FREQUENCY 700
#define DEFAULT_LOWEST_FREQ 300
#else
#define OFFSET2MVOLT(val) (((val) / 1000))
#define MVOLT2OFFSET(val) (((val) * 1000))
#define DEFAULT_ARM_FREQUENCY 600
#define DEFAULT_LOWEST_FREQ 600
#endif
#define DEFAULT_CORE_FREQUENCY 250
#define DEFAULT_SDRAM_FREQUENCY 400
#define TRANSITION_LATENCY 1000
#define MIN_OVER_VOLTAGE -16
#define MAX_OVER_VOLTAGE 6
#define MSG_ERROR -999999999
#define MHZSTEP 100
#define HZSTEP (MHZ2HZ(MHZSTEP))
#define TZ_ZEROC 2731
#define VC_LOCK(sc) do { \
sema_wait(&vc_sema); \
} while (0)
#define VC_UNLOCK(sc) do { \
sema_post(&vc_sema); \
} while (0)
/* ARM->VC mailbox property semaphore */
static struct sema vc_sema;
static struct sysctl_ctx_list bcm2835_sysctl_ctx;
struct bcm2835_cpufreq_softc {
device_t dev;
int arm_max_freq;
int arm_min_freq;
int core_max_freq;
int core_min_freq;
int sdram_max_freq;
int sdram_min_freq;
int max_voltage_core;
int min_voltage_core;
/* the values written in mbox */
int voltage_core;
int voltage_sdram;
int voltage_sdram_c;
int voltage_sdram_i;
int voltage_sdram_p;
int turbo_mode;
/* initial hook for waiting mbox intr */
struct intr_config_hook init_hook;
};
static struct ofw_compat_data compat_data[] = {
{ "broadcom,bcm2835-vc", 1 },
{ "broadcom,bcm2708-vc", 1 },
{ "brcm,bcm2709", 1 },
{ "brcm,bcm2836", 1 },
{ NULL, 0 }
};
static int cpufreq_verbose = 0;
TUNABLE_INT("hw.bcm2835.cpufreq.verbose", &cpufreq_verbose);
static int cpufreq_lowest_freq = DEFAULT_LOWEST_FREQ;
TUNABLE_INT("hw.bcm2835.cpufreq.lowest_freq", &cpufreq_lowest_freq);
#ifdef PROP_DEBUG
static void
bcm2835_dump(const void *data, int len)
{
const uint8_t *p = (const uint8_t*)data;
int i;
printf("dump @ %p:\n", data);
for (i = 0; i < len; i++) {
printf("%2.2x ", p[i]);
if ((i % 4) == 3)
printf(" ");
if ((i % 16) == 15)
printf("\n");
}
printf("\n");
}
#endif
static int
bcm2835_cpufreq_get_clock_rate(struct bcm2835_cpufreq_softc *sc,
uint32_t clock_id)
{
struct msg_get_clock_rate msg;
int rate;
int err;
/*
* Get clock rate
* Tag: 0x00030002
* Request:
* Length: 4
* Value:
* u32: clock id
* Response:
* Length: 8
* Value:
* u32: clock id
* u32: rate (in Hz)
*/
/* setup single tag buffer */
memset(&msg, 0, sizeof(msg));
msg.hdr.buf_size = sizeof(msg);
msg.hdr.code = BCM2835_MBOX_CODE_REQ;
msg.tag_hdr.tag = BCM2835_MBOX_TAG_GET_CLOCK_RATE;
msg.tag_hdr.val_buf_size = sizeof(msg.body);
msg.tag_hdr.val_len = sizeof(msg.body.req);
msg.body.req.clock_id = clock_id;
msg.end_tag = 0;
/* call mailbox property */
err = bcm2835_mbox_property(&msg, sizeof(msg));
if (err) {
device_printf(sc->dev, "can't get clock rate (id=%u)\n",
clock_id);
return (MSG_ERROR);
}
/* result (Hz) */
rate = (int)msg.body.resp.rate_hz;
DPRINTF("clock = %d(Hz)\n", rate);
return (rate);
}
static int
bcm2835_cpufreq_get_max_clock_rate(struct bcm2835_cpufreq_softc *sc,
uint32_t clock_id)
{
struct msg_get_max_clock_rate msg;
int rate;
int err;
/*
* Get max clock rate
* Tag: 0x00030004
* Request:
* Length: 4
* Value:
* u32: clock id
* Response:
* Length: 8
* Value:
* u32: clock id
* u32: rate (in Hz)
*/
/* setup single tag buffer */
memset(&msg, 0, sizeof(msg));
msg.hdr.buf_size = sizeof(msg);
msg.hdr.code = BCM2835_MBOX_CODE_REQ;
msg.tag_hdr.tag = BCM2835_MBOX_TAG_GET_MAX_CLOCK_RATE;
msg.tag_hdr.val_buf_size = sizeof(msg.body);
msg.tag_hdr.val_len = sizeof(msg.body.req);
msg.body.req.clock_id = clock_id;
msg.end_tag = 0;
/* call mailbox property */
err = bcm2835_mbox_property(&msg, sizeof(msg));
if (err) {
device_printf(sc->dev, "can't get max clock rate (id=%u)\n",
clock_id);
return (MSG_ERROR);
}
/* result (Hz) */
rate = (int)msg.body.resp.rate_hz;
DPRINTF("clock = %d(Hz)\n", rate);
return (rate);
}
static int
bcm2835_cpufreq_get_min_clock_rate(struct bcm2835_cpufreq_softc *sc,
uint32_t clock_id)
{
struct msg_get_min_clock_rate msg;
int rate;
int err;
/*
* Get min clock rate
* Tag: 0x00030007
* Request:
* Length: 4
* Value:
* u32: clock id
* Response:
* Length: 8
* Value:
* u32: clock id
* u32: rate (in Hz)
*/
/* setup single tag buffer */
memset(&msg, 0, sizeof(msg));
msg.hdr.buf_size = sizeof(msg);
msg.hdr.code = BCM2835_MBOX_CODE_REQ;
msg.tag_hdr.tag = BCM2835_MBOX_TAG_GET_MIN_CLOCK_RATE;
msg.tag_hdr.val_buf_size = sizeof(msg.body);
msg.tag_hdr.val_len = sizeof(msg.body.req);
msg.body.req.clock_id = clock_id;
msg.end_tag = 0;
/* call mailbox property */
err = bcm2835_mbox_property(&msg, sizeof(msg));
if (err) {
device_printf(sc->dev, "can't get min clock rate (id=%u)\n",
clock_id);
return (MSG_ERROR);
}
/* result (Hz) */
rate = (int)msg.body.resp.rate_hz;
DPRINTF("clock = %d(Hz)\n", rate);
return (rate);
}
static int
bcm2835_cpufreq_set_clock_rate(struct bcm2835_cpufreq_softc *sc,
uint32_t clock_id, uint32_t rate_hz)
{
struct msg_set_clock_rate msg;
int rate;
int err;
/*
* Set clock rate
* Tag: 0x00038002
* Request:
* Length: 8
* Value:
* u32: clock id
* u32: rate (in Hz)
* Response:
* Length: 8
* Value:
* u32: clock id
* u32: rate (in Hz)
*/
/* setup single tag buffer */
memset(&msg, 0, sizeof(msg));
msg.hdr.buf_size = sizeof(msg);
msg.hdr.code = BCM2835_MBOX_CODE_REQ;
msg.tag_hdr.tag = BCM2835_MBOX_TAG_SET_CLOCK_RATE;
msg.tag_hdr.val_buf_size = sizeof(msg.body);
msg.tag_hdr.val_len = sizeof(msg.body.req);
msg.body.req.clock_id = clock_id;
msg.body.req.rate_hz = rate_hz;
msg.end_tag = 0;
/* call mailbox property */
err = bcm2835_mbox_property(&msg, sizeof(msg));
if (err) {
device_printf(sc->dev, "can't set clock rate (id=%u)\n",
clock_id);
return (MSG_ERROR);
}
/* workaround for core clock */
if (clock_id == BCM2835_MBOX_CLOCK_ID_CORE) {
/* for safety (may change voltage without changing clock) */
DELAY(TRANSITION_LATENCY);
/*
* XXX: the core clock is unable to change at once,
* to change certainly, write it twice now.
*/
/* setup single tag buffer */
memset(&msg, 0, sizeof(msg));
msg.hdr.buf_size = sizeof(msg);
msg.hdr.code = BCM2835_MBOX_CODE_REQ;
msg.tag_hdr.tag = BCM2835_MBOX_TAG_SET_CLOCK_RATE;
msg.tag_hdr.val_buf_size = sizeof(msg.body);
msg.tag_hdr.val_len = sizeof(msg.body.req);
msg.body.req.clock_id = clock_id;
msg.body.req.rate_hz = rate_hz;
msg.end_tag = 0;
/* call mailbox property */
err = bcm2835_mbox_property(&msg, sizeof(msg));
if (err) {
device_printf(sc->dev,
"can't set clock rate (id=%u)\n", clock_id);
return (MSG_ERROR);
}
}
/* result (Hz) */
rate = (int)msg.body.resp.rate_hz;
DPRINTF("clock = %d(Hz)\n", rate);
return (rate);
}
static int
bcm2835_cpufreq_get_turbo(struct bcm2835_cpufreq_softc *sc)
{
struct msg_get_turbo msg;
int level;
int err;
/*
* Get turbo
* Tag: 0x00030009
* Request:
* Length: 4
* Value:
* u32: id
* Response:
* Length: 8
* Value:
* u32: id
* u32: level
*/
/* setup single tag buffer */
memset(&msg, 0, sizeof(msg));
msg.hdr.buf_size = sizeof(msg);
msg.hdr.code = BCM2835_MBOX_CODE_REQ;
msg.tag_hdr.tag = BCM2835_MBOX_TAG_GET_TURBO;
msg.tag_hdr.val_buf_size = sizeof(msg.body);
msg.tag_hdr.val_len = sizeof(msg.body.req);
msg.body.req.id = 0;
msg.end_tag = 0;
/* call mailbox property */
err = bcm2835_mbox_property(&msg, sizeof(msg));
if (err) {
device_printf(sc->dev, "can't get turbo\n");
return (MSG_ERROR);
}
/* result 0=non-turbo, 1=turbo */
level = (int)msg.body.resp.level;
DPRINTF("level = %d\n", level);
return (level);
}
static int
bcm2835_cpufreq_set_turbo(struct bcm2835_cpufreq_softc *sc, uint32_t level)
{
struct msg_set_turbo msg;
int value;
int err;
/*
* Set turbo
* Tag: 0x00038009
* Request:
* Length: 8
* Value:
* u32: id
* u32: level
* Response:
* Length: 8
* Value:
* u32: id
* u32: level
*/
/* replace unknown value to OFF */
if (level != BCM2835_MBOX_TURBO_ON && level != BCM2835_MBOX_TURBO_OFF)
level = BCM2835_MBOX_TURBO_OFF;
/* setup single tag buffer */
memset(&msg, 0, sizeof(msg));
msg.hdr.buf_size = sizeof(msg);
msg.hdr.code = BCM2835_MBOX_CODE_REQ;
msg.tag_hdr.tag = BCM2835_MBOX_TAG_SET_TURBO;
msg.tag_hdr.val_buf_size = sizeof(msg.body);
msg.tag_hdr.val_len = sizeof(msg.body.req);
msg.body.req.id = 0;
msg.body.req.level = level;
msg.end_tag = 0;
/* call mailbox property */
err = bcm2835_mbox_property(&msg, sizeof(msg));
if (err) {
device_printf(sc->dev, "can't set turbo\n");
return (MSG_ERROR);
}
/* result 0=non-turbo, 1=turbo */
value = (int)msg.body.resp.level;
DPRINTF("level = %d\n", value);
return (value);
}
static int
bcm2835_cpufreq_get_voltage(struct bcm2835_cpufreq_softc *sc,
uint32_t voltage_id)
{
struct msg_get_voltage msg;
int value;
int err;
/*
* Get voltage
* Tag: 0x00030003
* Request:
* Length: 4
* Value:
* u32: voltage id
* Response:
* Length: 8
* Value:
* u32: voltage id
* u32: value (offset from 1.2V in units of 0.025V)
*/
/* setup single tag buffer */
memset(&msg, 0, sizeof(msg));
msg.hdr.buf_size = sizeof(msg);
msg.hdr.code = BCM2835_MBOX_CODE_REQ;
msg.tag_hdr.tag = BCM2835_MBOX_TAG_GET_VOLTAGE;
msg.tag_hdr.val_buf_size = sizeof(msg.body);
msg.tag_hdr.val_len = sizeof(msg.body.req);
msg.body.req.voltage_id = voltage_id;
msg.end_tag = 0;
/* call mailbox property */
err = bcm2835_mbox_property(&msg, sizeof(msg));
if (err) {
device_printf(sc->dev, "can't get voltage\n");
return (MSG_ERROR);
}
/* result (offset from 1.2V) */
value = (int)msg.body.resp.value;
DPRINTF("value = %d\n", value);
return (value);
}
static int
bcm2835_cpufreq_get_max_voltage(struct bcm2835_cpufreq_softc *sc,
uint32_t voltage_id)
{
struct msg_get_max_voltage msg;
int value;
int err;
/*
* Get voltage
* Tag: 0x00030005
* Request:
* Length: 4
* Value:
* u32: voltage id
* Response:
* Length: 8
* Value:
* u32: voltage id
* u32: value (offset from 1.2V in units of 0.025V)
*/
/* setup single tag buffer */
memset(&msg, 0, sizeof(msg));
msg.hdr.buf_size = sizeof(msg);
msg.hdr.code = BCM2835_MBOX_CODE_REQ;
msg.tag_hdr.tag = BCM2835_MBOX_TAG_GET_MAX_VOLTAGE;
msg.tag_hdr.val_buf_size = sizeof(msg.body);
msg.tag_hdr.val_len = sizeof(msg.body.req);
msg.body.req.voltage_id = voltage_id;
msg.end_tag = 0;
/* call mailbox property */
err = bcm2835_mbox_property(&msg, sizeof(msg));
if (err) {
device_printf(sc->dev, "can't get max voltage\n");
return (MSG_ERROR);
}
/* result (offset from 1.2V) */
value = (int)msg.body.resp.value;
DPRINTF("value = %d\n", value);
return (value);
}
static int
bcm2835_cpufreq_get_min_voltage(struct bcm2835_cpufreq_softc *sc,
uint32_t voltage_id)
{
struct msg_get_min_voltage msg;
int value;
int err;
/*
* Get voltage
* Tag: 0x00030008
* Request:
* Length: 4
* Value:
* u32: voltage id
* Response:
* Length: 8
* Value:
* u32: voltage id
* u32: value (offset from 1.2V in units of 0.025V)
*/
/* setup single tag buffer */
memset(&msg, 0, sizeof(msg));
msg.hdr.buf_size = sizeof(msg);
msg.hdr.code = BCM2835_MBOX_CODE_REQ;
msg.tag_hdr.tag = BCM2835_MBOX_TAG_GET_MIN_VOLTAGE;
msg.tag_hdr.val_buf_size = sizeof(msg.body);
msg.tag_hdr.val_len = sizeof(msg.body.req);
msg.body.req.voltage_id = voltage_id;
msg.end_tag = 0;
/* call mailbox property */
err = bcm2835_mbox_property(&msg, sizeof(msg));
if (err) {
device_printf(sc->dev, "can't get min voltage\n");
return (MSG_ERROR);
}
/* result (offset from 1.2V) */
value = (int)msg.body.resp.value;
DPRINTF("value = %d\n", value);
return (value);
}
static int
bcm2835_cpufreq_set_voltage(struct bcm2835_cpufreq_softc *sc,
uint32_t voltage_id, int32_t value)
{
struct msg_set_voltage msg;
int err;
/*
* Set voltage
* Tag: 0x00038003
* Request:
* Length: 4
* Value:
* u32: voltage id
* u32: value (offset from 1.2V in units of 0.025V)
* Response:
* Length: 8
* Value:
* u32: voltage id
* u32: value (offset from 1.2V in units of 0.025V)
*/
/*
* over_voltage:
* 0 (1.2 V). Values above 6 are only allowed when force_turbo or
* current_limit_override are specified (which set the warranty bit).
*/
if (value > MAX_OVER_VOLTAGE || value < MIN_OVER_VOLTAGE) {
/* currently not supported */
device_printf(sc->dev, "not supported voltage: %d\n", value);
return (MSG_ERROR);
}
/* setup single tag buffer */
memset(&msg, 0, sizeof(msg));
msg.hdr.buf_size = sizeof(msg);
msg.hdr.code = BCM2835_MBOX_CODE_REQ;
msg.tag_hdr.tag = BCM2835_MBOX_TAG_SET_VOLTAGE;
msg.tag_hdr.val_buf_size = sizeof(msg.body);
msg.tag_hdr.val_len = sizeof(msg.body.req);
msg.body.req.voltage_id = voltage_id;
msg.body.req.value = (uint32_t)value;
msg.end_tag = 0;
/* call mailbox property */
err = bcm2835_mbox_property(&msg, sizeof(msg));
if (err) {
device_printf(sc->dev, "can't set voltage\n");
return (MSG_ERROR);
}
/* result (offset from 1.2V) */
value = (int)msg.body.resp.value;
DPRINTF("value = %d\n", value);
return (value);
}
static int
bcm2835_cpufreq_get_temperature(struct bcm2835_cpufreq_softc *sc)
{
struct msg_get_temperature msg;
int value;
int err;
/*
* Get temperature
* Tag: 0x00030006
* Request:
* Length: 4
* Value:
* u32: temperature id
* Response:
* Length: 8
* Value:
* u32: temperature id
* u32: value
*/
/* setup single tag buffer */
memset(&msg, 0, sizeof(msg));
msg.hdr.buf_size = sizeof(msg);
msg.hdr.code = BCM2835_MBOX_CODE_REQ;
msg.tag_hdr.tag = BCM2835_MBOX_TAG_GET_TEMPERATURE;
msg.tag_hdr.val_buf_size = sizeof(msg.body);
msg.tag_hdr.val_len = sizeof(msg.body.req);
msg.body.req.temperature_id = 0;
msg.end_tag = 0;
/* call mailbox property */
err = bcm2835_mbox_property(&msg, sizeof(msg));
if (err) {
device_printf(sc->dev, "can't get temperature\n");
return (MSG_ERROR);
}
/* result (temperature of degree C) */
value = (int)msg.body.resp.value;
DPRINTF("value = %d\n", value);
return (value);
}
static int
sysctl_bcm2835_cpufreq_arm_freq(SYSCTL_HANDLER_ARGS)
{
struct bcm2835_cpufreq_softc *sc = arg1;
int val;
int err;
/* get realtime value */
VC_LOCK(sc);
val = bcm2835_cpufreq_get_clock_rate(sc, BCM2835_MBOX_CLOCK_ID_ARM);
VC_UNLOCK(sc);
if (val == MSG_ERROR)
return (EIO);
err = sysctl_handle_int(oidp, &val, 0, req);
if (err || !req->newptr) /* error || read request */
return (err);
/* write request */
VC_LOCK(sc);
err = bcm2835_cpufreq_set_clock_rate(sc, BCM2835_MBOX_CLOCK_ID_ARM,
val);
VC_UNLOCK(sc);
if (err == MSG_ERROR) {
device_printf(sc->dev, "set clock arm_freq error\n");
return (EIO);
}
DELAY(TRANSITION_LATENCY);
return (0);
}
static int
sysctl_bcm2835_cpufreq_core_freq(SYSCTL_HANDLER_ARGS)
{
struct bcm2835_cpufreq_softc *sc = arg1;
int val;
int err;
/* get realtime value */
VC_LOCK(sc);
val = bcm2835_cpufreq_get_clock_rate(sc, BCM2835_MBOX_CLOCK_ID_CORE);
VC_UNLOCK(sc);
if (val == MSG_ERROR)
return (EIO);
err = sysctl_handle_int(oidp, &val, 0, req);
if (err || !req->newptr) /* error || read request */
return (err);
/* write request */
VC_LOCK(sc);
err = bcm2835_cpufreq_set_clock_rate(sc, BCM2835_MBOX_CLOCK_ID_CORE,
val);
if (err == MSG_ERROR) {
VC_UNLOCK(sc);
device_printf(sc->dev, "set clock core_freq error\n");
return (EIO);
}
VC_UNLOCK(sc);
DELAY(TRANSITION_LATENCY);
return (0);
}
static int
sysctl_bcm2835_cpufreq_sdram_freq(SYSCTL_HANDLER_ARGS)
{
struct bcm2835_cpufreq_softc *sc = arg1;
int val;
int err;
/* get realtime value */
VC_LOCK(sc);
val = bcm2835_cpufreq_get_clock_rate(sc, BCM2835_MBOX_CLOCK_ID_SDRAM);
VC_UNLOCK(sc);
if (val == MSG_ERROR)
return (EIO);
err = sysctl_handle_int(oidp, &val, 0, req);
if (err || !req->newptr) /* error || read request */
return (err);
/* write request */
VC_LOCK(sc);
err = bcm2835_cpufreq_set_clock_rate(sc, BCM2835_MBOX_CLOCK_ID_SDRAM,
val);
VC_UNLOCK(sc);
if (err == MSG_ERROR) {
device_printf(sc->dev, "set clock sdram_freq error\n");
return (EIO);
}
DELAY(TRANSITION_LATENCY);
return (0);
}
static int
sysctl_bcm2835_cpufreq_turbo(SYSCTL_HANDLER_ARGS)
{
struct bcm2835_cpufreq_softc *sc = arg1;
int val;
int err;
/* get realtime value */
VC_LOCK(sc);
val = bcm2835_cpufreq_get_turbo(sc);
VC_UNLOCK(sc);
if (val == MSG_ERROR)
return (EIO);
err = sysctl_handle_int(oidp, &val, 0, req);
if (err || !req->newptr) /* error || read request */
return (err);
/* write request */
if (val > 0)
sc->turbo_mode = BCM2835_MBOX_TURBO_ON;
else
sc->turbo_mode = BCM2835_MBOX_TURBO_OFF;
VC_LOCK(sc);
err = bcm2835_cpufreq_set_turbo(sc, sc->turbo_mode);
VC_UNLOCK(sc);
if (err == MSG_ERROR) {
device_printf(sc->dev, "set turbo error\n");
return (EIO);
}
DELAY(TRANSITION_LATENCY);
return (0);
}
static int
sysctl_bcm2835_cpufreq_voltage_core(SYSCTL_HANDLER_ARGS)
{
struct bcm2835_cpufreq_softc *sc = arg1;
int val;
int err;
/* get realtime value */
VC_LOCK(sc);
val = bcm2835_cpufreq_get_voltage(sc, BCM2835_MBOX_VOLTAGE_ID_CORE);
VC_UNLOCK(sc);
if (val == MSG_ERROR)
return (EIO);
err = sysctl_handle_int(oidp, &val, 0, req);
if (err || !req->newptr) /* error || read request */
return (err);
/* write request */
if (val > MAX_OVER_VOLTAGE || val < MIN_OVER_VOLTAGE)
return (EINVAL);
sc->voltage_core = val;
VC_LOCK(sc);
err = bcm2835_cpufreq_set_voltage(sc, BCM2835_MBOX_VOLTAGE_ID_CORE,
sc->voltage_core);
VC_UNLOCK(sc);
if (err == MSG_ERROR) {
device_printf(sc->dev, "set voltage core error\n");
return (EIO);
}
DELAY(TRANSITION_LATENCY);
return (0);
}
static int
sysctl_bcm2835_cpufreq_voltage_sdram_c(SYSCTL_HANDLER_ARGS)
{
struct bcm2835_cpufreq_softc *sc = arg1;
int val;
int err;
/* get realtime value */
VC_LOCK(sc);
val = bcm2835_cpufreq_get_voltage(sc, BCM2835_MBOX_VOLTAGE_ID_SDRAM_C);
VC_UNLOCK(sc);
if (val == MSG_ERROR)
return (EIO);
err = sysctl_handle_int(oidp, &val, 0, req);
if (err || !req->newptr) /* error || read request */
return (err);
/* write request */
if (val > MAX_OVER_VOLTAGE || val < MIN_OVER_VOLTAGE)
return (EINVAL);
sc->voltage_sdram_c = val;
VC_LOCK(sc);
err = bcm2835_cpufreq_set_voltage(sc, BCM2835_MBOX_VOLTAGE_ID_SDRAM_C,
sc->voltage_sdram_c);
VC_UNLOCK(sc);
if (err == MSG_ERROR) {
device_printf(sc->dev, "set voltage sdram_c error\n");
return (EIO);
}
DELAY(TRANSITION_LATENCY);
return (0);
}
static int
sysctl_bcm2835_cpufreq_voltage_sdram_i(SYSCTL_HANDLER_ARGS)
{
struct bcm2835_cpufreq_softc *sc = arg1;
int val;
int err;
/* get realtime value */
VC_LOCK(sc);
val = bcm2835_cpufreq_get_voltage(sc, BCM2835_MBOX_VOLTAGE_ID_SDRAM_I);
VC_UNLOCK(sc);
if (val == MSG_ERROR)
return (EIO);
err = sysctl_handle_int(oidp, &val, 0, req);
if (err || !req->newptr) /* error || read request */
return (err);
/* write request */
if (val > MAX_OVER_VOLTAGE || val < MIN_OVER_VOLTAGE)
return (EINVAL);
sc->voltage_sdram_i = val;
VC_LOCK(sc);
err = bcm2835_cpufreq_set_voltage(sc, BCM2835_MBOX_VOLTAGE_ID_SDRAM_I,
sc->voltage_sdram_i);
VC_UNLOCK(sc);
if (err == MSG_ERROR) {
device_printf(sc->dev, "set voltage sdram_i error\n");
return (EIO);
}
DELAY(TRANSITION_LATENCY);
return (0);
}
static int
sysctl_bcm2835_cpufreq_voltage_sdram_p(SYSCTL_HANDLER_ARGS)
{
struct bcm2835_cpufreq_softc *sc = arg1;
int val;
int err;
/* get realtime value */
VC_LOCK(sc);
val = bcm2835_cpufreq_get_voltage(sc, BCM2835_MBOX_VOLTAGE_ID_SDRAM_P);
VC_UNLOCK(sc);
if (val == MSG_ERROR)
return (EIO);
err = sysctl_handle_int(oidp, &val, 0, req);
if (err || !req->newptr) /* error || read request */
return (err);
/* write request */
if (val > MAX_OVER_VOLTAGE || val < MIN_OVER_VOLTAGE)
return (EINVAL);
sc->voltage_sdram_p = val;
VC_LOCK(sc);
err = bcm2835_cpufreq_set_voltage(sc, BCM2835_MBOX_VOLTAGE_ID_SDRAM_P,
sc->voltage_sdram_p);
VC_UNLOCK(sc);
if (err == MSG_ERROR) {
device_printf(sc->dev, "set voltage sdram_p error\n");
return (EIO);
}
DELAY(TRANSITION_LATENCY);
return (0);
}
static int
sysctl_bcm2835_cpufreq_voltage_sdram(SYSCTL_HANDLER_ARGS)
{
struct bcm2835_cpufreq_softc *sc = arg1;
int val;
int err;
/* multiple write only */
if (!req->newptr)
return (EINVAL);
val = 0;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err)
return (err);
/* write request */
if (val > MAX_OVER_VOLTAGE || val < MIN_OVER_VOLTAGE)
return (EINVAL);
sc->voltage_sdram = val;
VC_LOCK(sc);
err = bcm2835_cpufreq_set_voltage(sc, BCM2835_MBOX_VOLTAGE_ID_SDRAM_C,
val);
if (err == MSG_ERROR) {
VC_UNLOCK(sc);
device_printf(sc->dev, "set voltage sdram_c error\n");
return (EIO);
}
err = bcm2835_cpufreq_set_voltage(sc, BCM2835_MBOX_VOLTAGE_ID_SDRAM_I,
val);
if (err == MSG_ERROR) {
VC_UNLOCK(sc);
device_printf(sc->dev, "set voltage sdram_i error\n");
return (EIO);
}
err = bcm2835_cpufreq_set_voltage(sc, BCM2835_MBOX_VOLTAGE_ID_SDRAM_P,
val);
if (err == MSG_ERROR) {
VC_UNLOCK(sc);
device_printf(sc->dev, "set voltage sdram_p error\n");
return (EIO);
}
VC_UNLOCK(sc);
DELAY(TRANSITION_LATENCY);
return (0);
}
static int
sysctl_bcm2835_cpufreq_temperature(SYSCTL_HANDLER_ARGS)
{
struct bcm2835_cpufreq_softc *sc = arg1;
int val;
int err;
/* get realtime value */
VC_LOCK(sc);
val = bcm2835_cpufreq_get_temperature(sc);
VC_UNLOCK(sc);
if (val == MSG_ERROR)
return (EIO);
err = sysctl_handle_int(oidp, &val, 0, req);
if (err || !req->newptr) /* error || read request */
return (err);
/* write request */
return (EINVAL);
}
static int
sysctl_bcm2835_devcpu_temperature(SYSCTL_HANDLER_ARGS)
{
struct bcm2835_cpufreq_softc *sc = arg1;
int val;
int err;
/* get realtime value */
VC_LOCK(sc);
val = bcm2835_cpufreq_get_temperature(sc);
VC_UNLOCK(sc);
if (val == MSG_ERROR)
return (EIO);
/* 1/1000 celsius (raw) to 1/10 kelvin */
val = val / 100 + TZ_ZEROC;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err || !req->newptr) /* error || read request */
return (err);
/* write request */
return (EINVAL);
}
static void
bcm2835_cpufreq_init(void *arg)
{
struct bcm2835_cpufreq_softc *sc = arg;
struct sysctl_ctx_list *ctx;
device_t cpu;
int arm_freq, core_freq, sdram_freq;
int arm_max_freq, arm_min_freq, core_max_freq, core_min_freq;
int sdram_max_freq, sdram_min_freq;
int voltage_core, voltage_sdram_c, voltage_sdram_i, voltage_sdram_p;
int max_voltage_core, min_voltage_core;
int max_voltage_sdram_c, min_voltage_sdram_c;
int max_voltage_sdram_i, min_voltage_sdram_i;
int max_voltage_sdram_p, min_voltage_sdram_p;
int turbo, temperature;
VC_LOCK(sc);
/* current clock */
arm_freq = bcm2835_cpufreq_get_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_ARM);
core_freq = bcm2835_cpufreq_get_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_CORE);
sdram_freq = bcm2835_cpufreq_get_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_SDRAM);
/* max/min clock */
arm_max_freq = bcm2835_cpufreq_get_max_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_ARM);
arm_min_freq = bcm2835_cpufreq_get_min_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_ARM);
core_max_freq = bcm2835_cpufreq_get_max_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_CORE);
core_min_freq = bcm2835_cpufreq_get_min_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_CORE);
sdram_max_freq = bcm2835_cpufreq_get_max_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_SDRAM);
sdram_min_freq = bcm2835_cpufreq_get_min_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_SDRAM);
/* turbo mode */
turbo = bcm2835_cpufreq_get_turbo(sc);
if (turbo > 0)
sc->turbo_mode = BCM2835_MBOX_TURBO_ON;
else
sc->turbo_mode = BCM2835_MBOX_TURBO_OFF;
/* voltage */
voltage_core = bcm2835_cpufreq_get_voltage(sc,
BCM2835_MBOX_VOLTAGE_ID_CORE);
voltage_sdram_c = bcm2835_cpufreq_get_voltage(sc,
BCM2835_MBOX_VOLTAGE_ID_SDRAM_C);
voltage_sdram_i = bcm2835_cpufreq_get_voltage(sc,
BCM2835_MBOX_VOLTAGE_ID_SDRAM_I);
voltage_sdram_p = bcm2835_cpufreq_get_voltage(sc,
BCM2835_MBOX_VOLTAGE_ID_SDRAM_P);
/* current values (offset from 1.2V) */
sc->voltage_core = voltage_core;
sc->voltage_sdram = voltage_sdram_c;
sc->voltage_sdram_c = voltage_sdram_c;
sc->voltage_sdram_i = voltage_sdram_i;
sc->voltage_sdram_p = voltage_sdram_p;
/* max/min voltage */
max_voltage_core = bcm2835_cpufreq_get_max_voltage(sc,
BCM2835_MBOX_VOLTAGE_ID_CORE);
min_voltage_core = bcm2835_cpufreq_get_min_voltage(sc,
BCM2835_MBOX_VOLTAGE_ID_CORE);
max_voltage_sdram_c = bcm2835_cpufreq_get_max_voltage(sc,
BCM2835_MBOX_VOLTAGE_ID_SDRAM_C);
max_voltage_sdram_i = bcm2835_cpufreq_get_max_voltage(sc,
BCM2835_MBOX_VOLTAGE_ID_SDRAM_I);
max_voltage_sdram_p = bcm2835_cpufreq_get_max_voltage(sc,
BCM2835_MBOX_VOLTAGE_ID_SDRAM_P);
min_voltage_sdram_c = bcm2835_cpufreq_get_min_voltage(sc,
BCM2835_MBOX_VOLTAGE_ID_SDRAM_C);
min_voltage_sdram_i = bcm2835_cpufreq_get_min_voltage(sc,
BCM2835_MBOX_VOLTAGE_ID_SDRAM_I);
min_voltage_sdram_p = bcm2835_cpufreq_get_min_voltage(sc,
BCM2835_MBOX_VOLTAGE_ID_SDRAM_P);
/* temperature */
temperature = bcm2835_cpufreq_get_temperature(sc);
/* show result */
if (cpufreq_verbose || bootverbose) {
device_printf(sc->dev, "Boot settings:\n");
device_printf(sc->dev,
"current ARM %dMHz, Core %dMHz, SDRAM %dMHz, Turbo %s\n",
HZ2MHZ(arm_freq), HZ2MHZ(core_freq), HZ2MHZ(sdram_freq),
(sc->turbo_mode == BCM2835_MBOX_TURBO_ON) ? "ON" : "OFF");
device_printf(sc->dev,
"max/min ARM %d/%dMHz, Core %d/%dMHz, SDRAM %d/%dMHz\n",
HZ2MHZ(arm_max_freq), HZ2MHZ(arm_min_freq),
HZ2MHZ(core_max_freq), HZ2MHZ(core_min_freq),
HZ2MHZ(sdram_max_freq), HZ2MHZ(sdram_min_freq));
device_printf(sc->dev,
"current Core %dmV, SDRAM_C %dmV, SDRAM_I %dmV, "
"SDRAM_P %dmV\n",
OFFSET2MVOLT(voltage_core), OFFSET2MVOLT(voltage_sdram_c),
OFFSET2MVOLT(voltage_sdram_i),
OFFSET2MVOLT(voltage_sdram_p));
device_printf(sc->dev,
"max/min Core %d/%dmV, SDRAM_C %d/%dmV, SDRAM_I %d/%dmV, "
"SDRAM_P %d/%dmV\n",
OFFSET2MVOLT(max_voltage_core),
OFFSET2MVOLT(min_voltage_core),
OFFSET2MVOLT(max_voltage_sdram_c),
OFFSET2MVOLT(min_voltage_sdram_c),
OFFSET2MVOLT(max_voltage_sdram_i),
OFFSET2MVOLT(min_voltage_sdram_i),
OFFSET2MVOLT(max_voltage_sdram_p),
OFFSET2MVOLT(min_voltage_sdram_p));
device_printf(sc->dev,
"Temperature %d.%dC\n", (temperature / 1000),
(temperature % 1000) / 100);
} else { /* !cpufreq_verbose && !bootverbose */
device_printf(sc->dev,
"ARM %dMHz, Core %dMHz, SDRAM %dMHz, Turbo %s\n",
HZ2MHZ(arm_freq), HZ2MHZ(core_freq), HZ2MHZ(sdram_freq),
(sc->turbo_mode == BCM2835_MBOX_TURBO_ON) ? "ON" : "OFF");
}
/* keep in softc (MHz/mV) */
sc->arm_max_freq = HZ2MHZ(arm_max_freq);
sc->arm_min_freq = HZ2MHZ(arm_min_freq);
sc->core_max_freq = HZ2MHZ(core_max_freq);
sc->core_min_freq = HZ2MHZ(core_min_freq);
sc->sdram_max_freq = HZ2MHZ(sdram_max_freq);
sc->sdram_min_freq = HZ2MHZ(sdram_min_freq);
sc->max_voltage_core = OFFSET2MVOLT(max_voltage_core);
sc->min_voltage_core = OFFSET2MVOLT(min_voltage_core);
/* if turbo is on, set to max values */
if (sc->turbo_mode == BCM2835_MBOX_TURBO_ON) {
bcm2835_cpufreq_set_clock_rate(sc, BCM2835_MBOX_CLOCK_ID_ARM,
arm_max_freq);
DELAY(TRANSITION_LATENCY);
bcm2835_cpufreq_set_clock_rate(sc, BCM2835_MBOX_CLOCK_ID_CORE,
core_max_freq);
DELAY(TRANSITION_LATENCY);
bcm2835_cpufreq_set_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_SDRAM, sdram_max_freq);
DELAY(TRANSITION_LATENCY);
} else {
bcm2835_cpufreq_set_clock_rate(sc, BCM2835_MBOX_CLOCK_ID_ARM,
arm_min_freq);
DELAY(TRANSITION_LATENCY);
bcm2835_cpufreq_set_clock_rate(sc, BCM2835_MBOX_CLOCK_ID_CORE,
core_min_freq);
DELAY(TRANSITION_LATENCY);
bcm2835_cpufreq_set_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_SDRAM, sdram_min_freq);
DELAY(TRANSITION_LATENCY);
}
VC_UNLOCK(sc);
/* add human readable temperature to dev.cpu node */
cpu = device_get_parent(sc->dev);
if (cpu != NULL) {
ctx = device_get_sysctl_ctx(cpu);
SYSCTL_ADD_PROC(ctx,
SYSCTL_CHILDREN(device_get_sysctl_tree(cpu)), OID_AUTO,
"temperature", CTLTYPE_INT | CTLFLAG_RD, sc, 0,
sysctl_bcm2835_devcpu_temperature, "IK",
"Current SoC temperature");
}
/* release this hook (continue boot) */
config_intrhook_disestablish(&sc->init_hook);
}
static void
bcm2835_cpufreq_identify(driver_t *driver, device_t parent)
{
const struct ofw_compat_data *compat;
phandle_t root;
root = OF_finddevice("/");
for (compat = compat_data; compat->ocd_str != NULL; compat++)
if (ofw_bus_node_is_compatible(root, compat->ocd_str))
break;
if (compat->ocd_data == 0)
return;
DPRINTF("driver=%p, parent=%p\n", driver, parent);
if (device_find_child(parent, "bcm2835_cpufreq", -1) != NULL)
return;
if (BUS_ADD_CHILD(parent, 0, "bcm2835_cpufreq", -1) == NULL)
device_printf(parent, "add child failed\n");
}
static int
bcm2835_cpufreq_probe(device_t dev)
{
if (device_get_unit(dev) != 0)
return (ENXIO);
device_set_desc(dev, "CPU Frequency Control");
return (0);
}
static int
bcm2835_cpufreq_attach(device_t dev)
{
struct bcm2835_cpufreq_softc *sc;
struct sysctl_oid *oid;
/* set self dev */
sc = device_get_softc(dev);
sc->dev = dev;
/* initial values */
sc->arm_max_freq = -1;
sc->arm_min_freq = -1;
sc->core_max_freq = -1;
sc->core_min_freq = -1;
sc->sdram_max_freq = -1;
sc->sdram_min_freq = -1;
sc->max_voltage_core = 0;
sc->min_voltage_core = 0;
/* setup sysctl at first device */
if (device_get_unit(dev) == 0) {
sysctl_ctx_init(&bcm2835_sysctl_ctx);
/* create node for hw.cpufreq */
oid = SYSCTL_ADD_NODE(&bcm2835_sysctl_ctx,
SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, "cpufreq",
CTLFLAG_RD, NULL, "");
/* Frequency (Hz) */
SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
OID_AUTO, "arm_freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
sysctl_bcm2835_cpufreq_arm_freq, "IU",
"ARM frequency (Hz)");
SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
OID_AUTO, "core_freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
sysctl_bcm2835_cpufreq_core_freq, "IU",
"Core frequency (Hz)");
SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
OID_AUTO, "sdram_freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
sysctl_bcm2835_cpufreq_sdram_freq, "IU",
"SDRAM frequency (Hz)");
/* Turbo state */
SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
OID_AUTO, "turbo", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
sysctl_bcm2835_cpufreq_turbo, "IU",
"Disables dynamic clocking");
/* Voltage (offset from 1.2V in units of 0.025V) */
SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
OID_AUTO, "voltage_core", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
sysctl_bcm2835_cpufreq_voltage_core, "I",
"ARM/GPU core voltage"
"(offset from 1.2V in units of 0.025V)");
SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
OID_AUTO, "voltage_sdram", CTLTYPE_INT | CTLFLAG_WR, sc,
0, sysctl_bcm2835_cpufreq_voltage_sdram, "I",
"SDRAM voltage (offset from 1.2V in units of 0.025V)");
/* Voltage individual SDRAM */
SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
OID_AUTO, "voltage_sdram_c", CTLTYPE_INT | CTLFLAG_RW, sc,
0, sysctl_bcm2835_cpufreq_voltage_sdram_c, "I",
"SDRAM controller voltage"
"(offset from 1.2V in units of 0.025V)");
SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
OID_AUTO, "voltage_sdram_i", CTLTYPE_INT | CTLFLAG_RW, sc,
0, sysctl_bcm2835_cpufreq_voltage_sdram_i, "I",
"SDRAM I/O voltage (offset from 1.2V in units of 0.025V)");
SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
OID_AUTO, "voltage_sdram_p", CTLTYPE_INT | CTLFLAG_RW, sc,
0, sysctl_bcm2835_cpufreq_voltage_sdram_p, "I",
"SDRAM phy voltage (offset from 1.2V in units of 0.025V)");
/* Temperature */
SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
OID_AUTO, "temperature", CTLTYPE_INT | CTLFLAG_RD, sc, 0,
sysctl_bcm2835_cpufreq_temperature, "I",
"SoC temperature (thousandths of a degree C)");
}
/* ARM->VC lock */
sema_init(&vc_sema, 1, "vcsema");
/* register callback for using mbox when interrupts are enabled */
sc->init_hook.ich_func = bcm2835_cpufreq_init;
sc->init_hook.ich_arg = sc;
if (config_intrhook_establish(&sc->init_hook) != 0) {
device_printf(dev, "config_intrhook_establish failed\n");
return (ENOMEM);
}
/* this device is controlled by cpufreq(4) */
cpufreq_register(dev);
return (0);
}
static int
bcm2835_cpufreq_detach(device_t dev)
{
- struct bcm2835_cpufreq_softc *sc;
- sc = device_get_softc(dev);
-
sema_destroy(&vc_sema);
return (cpufreq_unregister(dev));
}
static int
bcm2835_cpufreq_set(device_t dev, const struct cf_setting *cf)
{
struct bcm2835_cpufreq_softc *sc;
uint32_t rate_hz, rem;
- int cur_freq, resp_freq, arm_freq, min_freq, core_freq;
+ int resp_freq, arm_freq, min_freq, core_freq;
+#ifdef DEBUG
+ int cur_freq;
+#endif
if (cf == NULL || cf->freq < 0)
return (EINVAL);
sc = device_get_softc(dev);
/* setting clock (Hz) */
rate_hz = (uint32_t)MHZ2HZ(cf->freq);
rem = rate_hz % HZSTEP;
rate_hz -= rem;
if (rate_hz == 0)
return (EINVAL);
/* adjust min freq */
min_freq = sc->arm_min_freq;
if (sc->turbo_mode != BCM2835_MBOX_TURBO_ON)
if (min_freq > cpufreq_lowest_freq)
min_freq = cpufreq_lowest_freq;
if (rate_hz < MHZ2HZ(min_freq) || rate_hz > MHZ2HZ(sc->arm_max_freq))
return (EINVAL);
/* set new value and verify it */
VC_LOCK(sc);
+#ifdef DEBUG
cur_freq = bcm2835_cpufreq_get_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_ARM);
+#endif
resp_freq = bcm2835_cpufreq_set_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_ARM, rate_hz);
DELAY(TRANSITION_LATENCY);
arm_freq = bcm2835_cpufreq_get_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_ARM);
/*
* if non-turbo and lower than or equal min_freq,
* clock down core and sdram to default first.
*/
if (sc->turbo_mode != BCM2835_MBOX_TURBO_ON) {
core_freq = bcm2835_cpufreq_get_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_CORE);
if (rate_hz > MHZ2HZ(sc->arm_min_freq)) {
bcm2835_cpufreq_set_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_CORE,
MHZ2HZ(sc->core_max_freq));
DELAY(TRANSITION_LATENCY);
bcm2835_cpufreq_set_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_SDRAM,
MHZ2HZ(sc->sdram_max_freq));
DELAY(TRANSITION_LATENCY);
} else {
if (sc->core_min_freq < DEFAULT_CORE_FREQUENCY &&
core_freq > DEFAULT_CORE_FREQUENCY) {
/* first, down to 250, then down to min */
DELAY(TRANSITION_LATENCY);
bcm2835_cpufreq_set_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_CORE,
MHZ2HZ(DEFAULT_CORE_FREQUENCY));
DELAY(TRANSITION_LATENCY);
/* reset core voltage */
bcm2835_cpufreq_set_voltage(sc,
BCM2835_MBOX_VOLTAGE_ID_CORE, 0);
DELAY(TRANSITION_LATENCY);
}
bcm2835_cpufreq_set_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_CORE,
MHZ2HZ(sc->core_min_freq));
DELAY(TRANSITION_LATENCY);
bcm2835_cpufreq_set_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_SDRAM,
MHZ2HZ(sc->sdram_min_freq));
DELAY(TRANSITION_LATENCY);
}
}
VC_UNLOCK(sc);
if (resp_freq < 0 || arm_freq < 0 || resp_freq != arm_freq) {
device_printf(dev, "wrong freq\n");
return (EIO);
}
DPRINTF("cpufreq: %d -> %d\n", cur_freq, arm_freq);
return (0);
}
static int
bcm2835_cpufreq_get(device_t dev, struct cf_setting *cf)
{
struct bcm2835_cpufreq_softc *sc;
int arm_freq;
if (cf == NULL)
return (EINVAL);
sc = device_get_softc(dev);
memset(cf, CPUFREQ_VAL_UNKNOWN, sizeof(*cf));
cf->dev = NULL;
/* get cuurent value */
VC_LOCK(sc);
arm_freq = bcm2835_cpufreq_get_clock_rate(sc,
BCM2835_MBOX_CLOCK_ID_ARM);
VC_UNLOCK(sc);
if (arm_freq < 0) {
device_printf(dev, "can't get clock\n");
return (EINVAL);
}
/* CPU clock in MHz or 100ths of a percent. */
cf->freq = HZ2MHZ(arm_freq);
/* Voltage in mV. */
cf->volts = CPUFREQ_VAL_UNKNOWN;
/* Power consumed in mW. */
cf->power = CPUFREQ_VAL_UNKNOWN;
/* Transition latency in us. */
cf->lat = TRANSITION_LATENCY;
/* Driver providing this setting. */
cf->dev = dev;
return (0);
}
static int
bcm2835_cpufreq_make_freq_list(device_t dev, struct cf_setting *sets,
int *count)
{
struct bcm2835_cpufreq_softc *sc;
int freq, min_freq, volts, rem;
int idx;
sc = device_get_softc(dev);
freq = sc->arm_max_freq;
min_freq = sc->arm_min_freq;
/* adjust head freq to STEP */
rem = freq % MHZSTEP;
freq -= rem;
if (freq < min_freq)
freq = min_freq;
/* if non-turbo, add extra low freq */
if (sc->turbo_mode != BCM2835_MBOX_TURBO_ON)
if (min_freq > cpufreq_lowest_freq)
min_freq = cpufreq_lowest_freq;
#ifdef SOC_BCM2835
/* from freq to min_freq */
for (idx = 0; idx < *count && freq >= min_freq; idx++) {
if (freq > sc->arm_min_freq)
volts = sc->max_voltage_core;
else
volts = sc->min_voltage_core;
sets[idx].freq = freq;
sets[idx].volts = volts;
sets[idx].lat = TRANSITION_LATENCY;
sets[idx].dev = dev;
freq -= MHZSTEP;
}
#else
/* XXX RPi2 have only 900/600MHz */
idx = 0;
volts = sc->min_voltage_core;
sets[idx].freq = freq;
sets[idx].volts = volts;
sets[idx].lat = TRANSITION_LATENCY;
sets[idx].dev = dev;
idx++;