diff --git a/sys/arm/nvidia/as3722.c b/sys/arm/nvidia/as3722.c index ca5f7372aed1..ed5f47363b01 100644 --- a/sys/arm/nvidia/as3722.c +++ b/sys/arm/nvidia/as3722.c @@ -1,401 +1,406 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * AS3722 PMIC driver */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "clock_if.h" #include "regdev_if.h" #include "as3722.h" static struct ofw_compat_data compat_data[] = { {"ams,as3722", 1}, {NULL, 0}, }; #define LOCK(_sc) sx_xlock(&(_sc)->lock) #define UNLOCK(_sc) sx_xunlock(&(_sc)->lock) #define LOCK_INIT(_sc) sx_init(&(_sc)->lock, "as3722") #define LOCK_DESTROY(_sc) sx_destroy(&(_sc)->lock); #define ASSERT_LOCKED(_sc) sx_assert(&(_sc)->lock, SA_XLOCKED); #define ASSERT_UNLOCKED(_sc) sx_assert(&(_sc)->lock, SA_UNLOCKED); #define AS3722_DEVICE_ID 0x0C /* * Raw register access function. */ int as3722_read(struct as3722_softc *sc, uint8_t reg, uint8_t *val) { uint8_t addr; int rv; struct iic_msg msgs[2] = { {0, IIC_M_WR, 1, &addr}, {0, IIC_M_RD, 1, val}, }; msgs[0].slave = sc->bus_addr; msgs[1].slave = sc->bus_addr; addr = reg; rv = iicbus_transfer(sc->dev, msgs, 2); if (rv != 0) { device_printf(sc->dev, "Error when reading reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } int as3722_read_buf(struct as3722_softc *sc, uint8_t reg, uint8_t *buf, size_t size) { uint8_t addr; int rv; struct iic_msg msgs[2] = { {0, IIC_M_WR, 1, &addr}, {0, IIC_M_RD, size, buf}, }; msgs[0].slave = sc->bus_addr; msgs[1].slave = sc->bus_addr; addr = reg; rv = iicbus_transfer(sc->dev, msgs, 2); if (rv != 0) { device_printf(sc->dev, "Error when reading reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } int as3722_write(struct as3722_softc *sc, uint8_t reg, uint8_t val) { uint8_t data[2]; int rv; struct iic_msg msgs[1] = { {0, IIC_M_WR, 2, data}, }; msgs[0].slave = sc->bus_addr; data[0] = reg; data[1] = val; rv = iicbus_transfer(sc->dev, msgs, 1); if (rv != 0) { device_printf(sc->dev, "Error when writing reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } int as3722_write_buf(struct as3722_softc *sc, uint8_t reg, uint8_t *buf, size_t size) { uint8_t data[1]; int rv; struct iic_msg msgs[2] = { {0, IIC_M_WR, 1, data}, {0, IIC_M_WR | IIC_M_NOSTART, size, buf}, }; msgs[0].slave = sc->bus_addr; msgs[1].slave = sc->bus_addr; data[0] = reg; rv = iicbus_transfer(sc->dev, msgs, 2); if (rv != 0) { device_printf(sc->dev, "Error when writing reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } int as3722_modify(struct as3722_softc *sc, uint8_t reg, uint8_t clear, uint8_t set) { uint8_t val; int rv; rv = as3722_read(sc, reg, &val); if (rv != 0) return (rv); val &= ~clear; val |= set; rv = as3722_write(sc, reg, val); if (rv != 0) return (rv); return (0); } static int as3722_get_version(struct as3722_softc *sc) { uint8_t reg; int rv; /* Verify AS3722 ID and version. */ rv = RD1(sc, AS3722_ASIC_ID1, ®); if (rv != 0) return (ENXIO); if (reg != AS3722_DEVICE_ID) { device_printf(sc->dev, "Invalid chip ID is 0x%x\n", reg); return (ENXIO); } rv = RD1(sc, AS3722_ASIC_ID2, &sc->chip_rev); if (rv != 0) return (ENXIO); if (bootverbose) device_printf(sc->dev, "AS3722 rev: 0x%x\n", sc->chip_rev); return (0); } static int as3722_init(struct as3722_softc *sc) { uint32_t reg; int rv; reg = 0; if (sc->int_pullup) reg |= AS3722_INT_PULL_UP; if (sc->i2c_pullup) reg |= AS3722_I2C_PULL_UP; rv = RM1(sc, AS3722_IO_VOLTAGE, AS3722_INT_PULL_UP | AS3722_I2C_PULL_UP, reg); if (rv != 0) return (ENXIO); /* mask interrupts */ rv = WR1(sc, AS3722_INTERRUPT_MASK1, 0); if (rv != 0) return (ENXIO); rv = WR1(sc, AS3722_INTERRUPT_MASK2, 0); if (rv != 0) return (ENXIO); rv = WR1(sc, AS3722_INTERRUPT_MASK3, 0); if (rv != 0) return (ENXIO); rv = WR1(sc, AS3722_INTERRUPT_MASK4, 0); if (rv != 0) return (ENXIO); return (0); } static int as3722_parse_fdt(struct as3722_softc *sc, phandle_t node) { sc->int_pullup = OF_hasprop(node, "ams,enable-internal-int-pullup") ? 1 : 0; sc->i2c_pullup = OF_hasprop(node, "ams,enable-internal-i2c-pullup") ? 1 : 0; return 0; } static void as3722_intr(void *arg) { /* XXX Finish temperature alarms. */ } static int as3722_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "AS3722 PMIC"); return (BUS_PROBE_DEFAULT); } static int as3722_attach(device_t dev) { struct as3722_softc *sc; int rv, rid; phandle_t node; sc = device_get_softc(dev); sc->dev = dev; sc->bus_addr = iicbus_get_addr(dev); node = ofw_bus_get_node(sc->dev); rv = 0; LOCK_INIT(sc); rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate interrupt.\n"); rv = ENXIO; goto fail; } rv = as3722_parse_fdt(sc, node); if (rv != 0) goto fail; rv = as3722_get_version(sc); if (rv != 0) goto fail; rv = as3722_init(sc); if (rv != 0) goto fail; rv = as3722_regulator_attach(sc, node); if (rv != 0) goto fail; rv = as3722_gpio_attach(sc, node); if (rv != 0) goto fail; rv = as3722_rtc_attach(sc, node); if (rv != 0) goto fail; fdt_pinctrl_register(dev, NULL); fdt_pinctrl_configure_by_name(dev, "default"); /* Setup interrupt. */ rv = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, as3722_intr, sc, &sc->irq_h); if (rv) { device_printf(dev, "Cannot setup interrupt.\n"); goto fail; } return (bus_generic_attach(dev)); fail: if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); LOCK_DESTROY(sc); return (rv); } static int as3722_detach(device_t dev) { struct as3722_softc *sc; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); sc = device_get_softc(dev); if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); LOCK_DESTROY(sc); - return (bus_generic_detach(dev)); + return (0); } static phandle_t as3722_gpio_get_node(device_t bus, device_t dev) { /* We only have one child, the GPIO bus, which needs our own node. */ return (ofw_bus_get_node(bus)); } static device_method_t as3722_methods[] = { /* Device interface */ DEVMETHOD(device_probe, as3722_probe), DEVMETHOD(device_attach, as3722_attach), DEVMETHOD(device_detach, as3722_detach), /* Regdev interface */ DEVMETHOD(regdev_map, as3722_regulator_map), /* RTC interface */ DEVMETHOD(clock_gettime, as3722_rtc_gettime), DEVMETHOD(clock_settime, as3722_rtc_settime), /* GPIO protocol interface */ DEVMETHOD(gpio_get_bus, as3722_gpio_get_bus), DEVMETHOD(gpio_pin_max, as3722_gpio_pin_max), DEVMETHOD(gpio_pin_getname, as3722_gpio_pin_getname), DEVMETHOD(gpio_pin_getflags, as3722_gpio_pin_getflags), DEVMETHOD(gpio_pin_getcaps, as3722_gpio_pin_getcaps), DEVMETHOD(gpio_pin_setflags, as3722_gpio_pin_setflags), DEVMETHOD(gpio_pin_get, as3722_gpio_pin_get), DEVMETHOD(gpio_pin_set, as3722_gpio_pin_set), DEVMETHOD(gpio_pin_toggle, as3722_gpio_pin_toggle), DEVMETHOD(gpio_map_gpios, as3722_gpio_map_gpios), /* fdt_pinctrl interface */ DEVMETHOD(fdt_pinctrl_configure, as3722_pinmux_configure), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, as3722_gpio_get_node), DEVMETHOD_END }; static DEFINE_CLASS_0(gpio, as3722_driver, as3722_methods, sizeof(struct as3722_softc)); EARLY_DRIVER_MODULE(as3722, iicbus, as3722_driver, NULL, NULL, 74); diff --git a/sys/arm/nvidia/drm2/tegra_dc.c b/sys/arm/nvidia/drm2/tegra_dc.c index f4168f161f5e..a8a0e3f4abf8 100644 --- a/sys/arm/nvidia/drm2/tegra_dc.c +++ b/sys/arm/nvidia/drm2/tegra_dc.c @@ -1,1438 +1,1443 @@ /*- * Copyright (c) 2015 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "tegra_drm_if.h" #include "tegra_dc_if.h" #define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, 4 * (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, 4 * (_r)) #define LOCK(_sc) mtx_lock(&(_sc)->mtx) #define UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define SLEEP(_sc, timeout) \ mtx_sleep(sc, &sc->mtx, 0, "tegra_dc_wait", timeout); #define LOCK_INIT(_sc) \ mtx_init(&_sc->mtx, device_get_nameunit(_sc->dev), "tegra_dc", MTX_DEF) #define LOCK_DESTROY(_sc) mtx_destroy(&_sc->mtx) #define ASSERT_LOCKED(_sc) mtx_assert(&_sc->mtx, MA_OWNED) #define ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->mtx, MA_NOTOWNED) #define SYNCPT_VBLANK0 26 #define SYNCPT_VBLANK1 27 #define DC_MAX_PLANES 2 /* Maximum planes */ /* DRM Formats supported by DC */ /* XXXX expand me */ static uint32_t dc_plane_formats[] = { DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565, DRM_FORMAT_UYVY, DRM_FORMAT_YUYV, DRM_FORMAT_YUV420, DRM_FORMAT_YUV422, }; /* Complete description of one window (plane) */ struct dc_window { /* Source (in framebuffer) rectangle, in pixels */ u_int src_x; u_int src_y; u_int src_w; u_int src_h; /* Destination (on display) rectangle, in pixels */ u_int dst_x; u_int dst_y; u_int dst_w; u_int dst_h; /* Parsed pixel format */ u_int bits_per_pixel; bool is_yuv; /* any YUV mode */ bool is_yuv_planar; /* planar YUV mode */ uint32_t color_mode; /* DC_WIN_COLOR_DEPTH */ uint32_t swap; /* DC_WIN_BYTE_SWAP */ uint32_t surface_kind; /* DC_WINBUF_SURFACE_KIND */ uint32_t block_height; /* DC_WINBUF_SURFACE_KIND */ /* Parsed flipping, rotation is not supported for pitched modes */ bool flip_x; /* inverted X-axis */ bool flip_y; /* inverted Y-axis */ bool transpose_xy; /* swap X and Y-axis */ /* Color planes base addresses and strides */ bus_size_t base[3]; uint32_t stride[3]; /* stride[2] isn't used by HW */ }; struct dc_softc { device_t dev; struct resource *mem_res; struct resource *irq_res; void *irq_ih; struct mtx mtx; clk_t clk_parent; clk_t clk_dc; hwreset_t hwreset_dc; int pitch_align; struct tegra_crtc tegra_crtc; struct drm_pending_vblank_event *event; struct drm_gem_object *cursor_gem; }; static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-dc", 1}, {NULL, 0}, }; /* Convert standard drm pixel format to tegra windows parameters. */ static int dc_parse_drm_format(struct tegra_fb *fb, struct dc_window *win) { struct tegra_bo *bo; uint32_t cm; uint32_t sw; bool is_yuv, is_yuv_planar; int nplanes, i; switch (fb->drm_fb.pixel_format) { case DRM_FORMAT_XBGR8888: sw = BYTE_SWAP(NOSWAP); cm = WIN_COLOR_DEPTH_R8G8B8A8; is_yuv = false; is_yuv_planar = false; break; case DRM_FORMAT_XRGB8888: sw = BYTE_SWAP(NOSWAP); cm = WIN_COLOR_DEPTH_B8G8R8A8; is_yuv = false; is_yuv_planar = false; break; case DRM_FORMAT_RGB565: sw = BYTE_SWAP(NOSWAP); cm = WIN_COLOR_DEPTH_B5G6R5; is_yuv = false; is_yuv_planar = false; break; case DRM_FORMAT_UYVY: sw = BYTE_SWAP(NOSWAP); cm = WIN_COLOR_DEPTH_YCbCr422; is_yuv = true; is_yuv_planar = false; break; case DRM_FORMAT_YUYV: sw = BYTE_SWAP(SWAP2); cm = WIN_COLOR_DEPTH_YCbCr422; is_yuv = true; is_yuv_planar = false; break; case DRM_FORMAT_YUV420: sw = BYTE_SWAP(NOSWAP); cm = WIN_COLOR_DEPTH_YCbCr420P; is_yuv = true; is_yuv_planar = true; break; case DRM_FORMAT_YUV422: sw = BYTE_SWAP(NOSWAP); cm = WIN_COLOR_DEPTH_YCbCr422P; is_yuv = true; is_yuv_planar = true; break; default: /* Unsupported format */ return (-EINVAL); } /* Basic check of arguments. */ switch (fb->rotation) { case 0: case 180: break; case 90: /* Rotation is supported only */ case 270: /* for block linear surfaces */ if (!fb->block_linear) return (-EINVAL); break; default: return (-EINVAL); } /* XXX Add more checks (sizes, scaling...) */ if (win == NULL) return (0); win->surface_kind = fb->block_linear ? SURFACE_KIND_BL_16B2: SURFACE_KIND_PITCH; win->block_height = fb->block_height; switch (fb->rotation) { case 0: /* (0,0,0) */ win->transpose_xy = false; win->flip_x = false; win->flip_y = false; break; case 90: /* (1,0,1) */ win->transpose_xy = true; win->flip_x = false; win->flip_y = true; break; case 180: /* (0,1,1) */ win->transpose_xy = false; win->flip_x = true; win->flip_y = true; break; case 270: /* (1,1,0) */ win->transpose_xy = true; win->flip_x = true; win->flip_y = false; break; } win->flip_x ^= fb->flip_x; win->flip_y ^= fb->flip_y; win->color_mode = cm; win->swap = sw; win->bits_per_pixel = fb->drm_fb.bits_per_pixel; win->is_yuv = is_yuv; win->is_yuv_planar = is_yuv_planar; nplanes = drm_format_num_planes(fb->drm_fb.pixel_format); for (i = 0; i < nplanes; i++) { bo = fb->planes[i]; win->base[i] = bo->pbase + fb->drm_fb.offsets[i]; win->stride[i] = fb->drm_fb.pitches[i]; } return (0); } /* * Scaling functions. * * It's unclear if we want/must program the fractional portion * (aka bias) of init_dda registers, mainly when mirrored axis * modes are used. * For now, we use 1.0 as recommended by TRM. */ static inline uint32_t dc_scaling_init(uint32_t start) { return (1 << 12); } static inline uint32_t dc_scaling_incr(uint32_t src, uint32_t dst, uint32_t maxscale) { uint32_t val; val = (src - 1) << 12 ; /* 4.12 fixed float */ val /= (dst - 1); if (val > (maxscale << 12)) val = maxscale << 12; return val; } /* ------------------------------------------------------------------- * * HW Access. * */ /* * Setup pixel clock. * Minimal frequency is pixel clock, but output is free to select * any higher. */ static int dc_setup_clk(struct dc_softc *sc, struct drm_crtc *crtc, struct drm_display_mode *mode, uint32_t *div) { uint64_t pclk, freq; struct tegra_drm_encoder *output; struct drm_encoder *encoder; long rv; pclk = mode->clock * 1000; /* Find attached encoder */ output = NULL; list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { output = container_of(encoder, struct tegra_drm_encoder, encoder); break; } } if (output == NULL) return (-ENODEV); if (output->setup_clock == NULL) panic("Output have not setup_clock function.\n"); rv = output->setup_clock(output, sc->clk_dc, pclk); if (rv != 0) { device_printf(sc->dev, "Cannot setup pixel clock: %llu\n", pclk); return (rv); } rv = clk_get_freq(sc->clk_dc, &freq); *div = (freq * 2 / pclk) - 2; DRM_DEBUG_KMS("frequency: %llu, DC divider: %u\n", freq, *div); return 0; } static void dc_setup_window(struct dc_softc *sc, unsigned int index, struct dc_window *win) { uint32_t h_offset, v_offset, h_size, v_size, bpp; uint32_t h_init_dda, v_init_dda, h_incr_dda, v_incr_dda; uint32_t val; #ifdef DMR_DEBUG_WINDOW printf("%s window: %d\n", __func__, index); printf(" src: x: %d, y: %d, w: %d, h: %d\n", win->src_x, win->src_y, win->src_w, win->src_h); printf(" dst: x: %d, y: %d, w: %d, h: %d\n", win->dst_x, win->dst_y, win->dst_w, win->dst_h); printf(" bpp: %d, color_mode: %d, swap: %d\n", win->bits_per_pixel, win->color_mode, win->swap); #endif if (win->is_yuv) bpp = win->is_yuv_planar ? 1 : 2; else bpp = (win->bits_per_pixel + 7) / 8; if (!win->transpose_xy) { h_size = win->src_w * bpp; v_size = win->src_h; } else { h_size = win->src_h * bpp; v_size = win->src_w; } h_offset = win->src_x * bpp; v_offset = win->src_y; if (win->flip_x) { h_offset += win->src_w * bpp - 1; } if (win->flip_y) v_offset += win->src_h - 1; /* Adjust offsets for planar yuv modes */ if (win->is_yuv_planar) { h_offset &= ~1; if (win->flip_x ) h_offset |= 1; v_offset &= ~1; if (win->flip_y ) v_offset |= 1; } /* Setup scaling. */ if (!win->transpose_xy) { h_init_dda = dc_scaling_init(win->src_x); v_init_dda = dc_scaling_init(win->src_y); h_incr_dda = dc_scaling_incr(win->src_w, win->dst_w, 4); v_incr_dda = dc_scaling_incr(win->src_h, win->dst_h, 15); } else { h_init_dda = dc_scaling_init(win->src_y); v_init_dda = dc_scaling_init(win->src_x); h_incr_dda = dc_scaling_incr(win->src_h, win->dst_h, 4); v_incr_dda = dc_scaling_incr(win->src_w, win->dst_w, 15); } #ifdef DMR_DEBUG_WINDOW printf("\n"); printf(" bpp: %d, size: h: %d v: %d, offset: h:%d v: %d\n", bpp, h_size, v_size, h_offset, v_offset); printf(" init_dda: h: %d v: %d, incr_dda: h: %d v: %d\n", h_init_dda, v_init_dda, h_incr_dda, v_incr_dda); #endif LOCK(sc); /* Select target window */ val = WINDOW_A_SELECT << index; WR4(sc, DC_CMD_DISPLAY_WINDOW_HEADER, val); /* Sizes */ WR4(sc, DC_WIN_POSITION, WIN_POSITION(win->dst_x, win->dst_y)); WR4(sc, DC_WIN_SIZE, WIN_SIZE(win->dst_w, win->dst_h)); WR4(sc, DC_WIN_PRESCALED_SIZE, WIN_PRESCALED_SIZE(h_size, v_size)); /* DDA */ WR4(sc, DC_WIN_DDA_INCREMENT, WIN_DDA_INCREMENT(h_incr_dda, v_incr_dda)); WR4(sc, DC_WIN_H_INITIAL_DDA, h_init_dda); WR4(sc, DC_WIN_V_INITIAL_DDA, v_init_dda); /* Color planes base addresses and strides */ WR4(sc, DC_WINBUF_START_ADDR, win->base[0]); if (win->is_yuv_planar) { WR4(sc, DC_WINBUF_START_ADDR_U, win->base[1]); WR4(sc, DC_WINBUF_START_ADDR_V, win->base[2]); WR4(sc, DC_WIN_LINE_STRIDE, win->stride[1] << 16 | win->stride[0]); } else { WR4(sc, DC_WIN_LINE_STRIDE, win->stride[0]); } /* Offsets for rotation and axis flip */ WR4(sc, DC_WINBUF_ADDR_H_OFFSET, h_offset); WR4(sc, DC_WINBUF_ADDR_V_OFFSET, v_offset); /* Color format */ WR4(sc, DC_WIN_COLOR_DEPTH, win->color_mode); WR4(sc, DC_WIN_BYTE_SWAP, win->swap); /* Tiling */ val = win->surface_kind; if (win->surface_kind == SURFACE_KIND_BL_16B2) val |= SURFACE_KIND_BLOCK_HEIGHT(win->block_height); WR4(sc, DC_WINBUF_SURFACE_KIND, val); /* Color space coefs for YUV modes */ if (win->is_yuv) { WR4(sc, DC_WINC_CSC_YOF, 0x00f0); WR4(sc, DC_WINC_CSC_KYRGB, 0x012a); WR4(sc, DC_WINC_CSC_KUR, 0x0000); WR4(sc, DC_WINC_CSC_KVR, 0x0198); WR4(sc, DC_WINC_CSC_KUG, 0x039b); WR4(sc, DC_WINC_CSC_KVG, 0x032f); WR4(sc, DC_WINC_CSC_KUB, 0x0204); WR4(sc, DC_WINC_CSC_KVB, 0x0000); } val = WIN_ENABLE; if (win->is_yuv) val |= CSC_ENABLE; else if (win->bits_per_pixel < 24) val |= COLOR_EXPAND; if (win->flip_y) val |= V_DIRECTION; if (win->flip_x) val |= H_DIRECTION; if (win->transpose_xy) val |= SCAN_COLUMN; WR4(sc, DC_WINC_WIN_OPTIONS, val); #ifdef DMR_DEBUG_WINDOW /* Set underflow debug mode -> highlight missing pixels. */ WR4(sc, DC_WINBUF_UFLOW_CTRL, UFLOW_CTR_ENABLE); WR4(sc, DC_WINBUF_UFLOW_DBG_PIXEL, 0xFFFF0000); #endif UNLOCK(sc); } /* ------------------------------------------------------------------- * * Plane functions. * */ static int dc_plane_update(struct drm_plane *drm_plane, struct drm_crtc *drm_crtc, struct drm_framebuffer *drm_fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { struct tegra_plane *plane; struct tegra_crtc *crtc; struct tegra_fb *fb; struct dc_softc *sc; struct dc_window win; int rv; plane = container_of(drm_plane, struct tegra_plane, drm_plane); fb = container_of(drm_fb, struct tegra_fb, drm_fb); crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); memset(&win, 0, sizeof(win)); win.src_x = src_x >> 16; win.src_y = src_y >> 16; win.src_w = src_w >> 16; win.src_h = src_h >> 16; win.dst_x = crtc_x; win.dst_y = crtc_y; win.dst_w = crtc_w; win.dst_h = crtc_h; rv = dc_parse_drm_format(fb, &win); if (rv != 0) { DRM_WARNING("unsupported pixel format %d\n", fb->drm_fb.pixel_format); return (rv); } dc_setup_window(sc, plane->index, &win); WR4(sc, DC_CMD_STATE_CONTROL, WIN_A_UPDATE << plane->index); WR4(sc, DC_CMD_STATE_CONTROL, WIN_A_ACT_REQ << plane->index); return (0); } static int dc_plane_disable(struct drm_plane *drm_plane) { struct tegra_plane *plane; struct tegra_crtc *crtc; struct dc_softc *sc; uint32_t val, idx; if (drm_plane->crtc == NULL) return (0); plane = container_of(drm_plane, struct tegra_plane, drm_plane); crtc = container_of(drm_plane->crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); idx = plane->index; LOCK(sc); WR4(sc, DC_CMD_DISPLAY_WINDOW_HEADER, WINDOW_A_SELECT << idx); val = RD4(sc, DC_WINC_WIN_OPTIONS); val &= ~WIN_ENABLE; WR4(sc, DC_WINC_WIN_OPTIONS, val); UNLOCK(sc); WR4(sc, DC_CMD_STATE_CONTROL, WIN_A_UPDATE << idx); WR4(sc, DC_CMD_STATE_CONTROL, WIN_A_ACT_REQ << idx); return (0); } static void dc_plane_destroy(struct drm_plane *plane) { dc_plane_disable(plane); drm_plane_cleanup(plane); free(plane, DRM_MEM_KMS); } static const struct drm_plane_funcs dc_plane_funcs = { .update_plane = dc_plane_update, .disable_plane = dc_plane_disable, .destroy = dc_plane_destroy, }; /* ------------------------------------------------------------------- * * CRTC helper functions. * */ static void dc_crtc_dpms(struct drm_crtc *crtc, int mode) { /* Empty function */ } static bool dc_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted) { return (true); } static int dc_set_base(struct dc_softc *sc, int x, int y, struct tegra_fb *fb) { struct dc_window win; int rv; memset(&win, 0, sizeof(win)); win.src_x = x; win.src_y = y; win.src_w = fb->drm_fb.width; win.src_h = fb->drm_fb.height; win.dst_x = x; win.dst_y = y; win.dst_w = fb->drm_fb.width; win.dst_h = fb->drm_fb.height; rv = dc_parse_drm_format(fb, &win); if (rv != 0) { DRM_WARNING("unsupported pixel format %d\n", fb->drm_fb.pixel_format); return (rv); } dc_setup_window(sc, 0, &win); return (0); } static int dc_crtc_mode_set(struct drm_crtc *drm_crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted, int x, int y, struct drm_framebuffer *old_fb) { struct dc_softc *sc; struct tegra_crtc *crtc; struct tegra_fb *fb; struct dc_window win; uint32_t div, h_ref_to_sync, v_ref_to_sync; int rv; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); fb = container_of(drm_crtc->fb, struct tegra_fb, drm_fb); h_ref_to_sync = 1; v_ref_to_sync = 1; /* Setup timing */ rv = dc_setup_clk(sc, drm_crtc, mode, &div); if (rv != 0) { device_printf(sc->dev, "Cannot set pixel clock\n"); return (rv); } /* Timing */ WR4(sc, DC_DISP_DISP_TIMING_OPTIONS, 0); WR4(sc, DC_DISP_REF_TO_SYNC, (v_ref_to_sync << 16) | h_ref_to_sync); WR4(sc, DC_DISP_SYNC_WIDTH, ((mode->vsync_end - mode->vsync_start) << 16) | ((mode->hsync_end - mode->hsync_start) << 0)); WR4(sc, DC_DISP_BACK_PORCH, ((mode->vtotal - mode->vsync_end) << 16) | ((mode->htotal - mode->hsync_end) << 0)); WR4(sc, DC_DISP_FRONT_PORCH, ((mode->vsync_start - mode->vdisplay) << 16) | ((mode->hsync_start - mode->hdisplay) << 0)); WR4(sc, DC_DISP_DISP_ACTIVE, (mode->vdisplay << 16) | mode->hdisplay); WR4(sc, DC_DISP_DISP_INTERFACE_CONTROL, DISP_DATA_FORMAT(DF1P1C)); WR4(sc,DC_DISP_DISP_CLOCK_CONTROL, SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER(PCD1)); memset(&win, 0, sizeof(win)); win.src_x = x; win.src_y = y; win.src_w = mode->hdisplay; win.src_h = mode->vdisplay; win.dst_x = x; win.dst_y = y; win.dst_w = mode->hdisplay; win.dst_h = mode->vdisplay; rv = dc_parse_drm_format(fb, &win); if (rv != 0) { DRM_WARNING("unsupported pixel format %d\n", drm_crtc->fb->pixel_format); return (rv); } dc_setup_window(sc, 0, &win); return (0); } static int dc_crtc_mode_set_base(struct drm_crtc *drm_crtc, int x, int y, struct drm_framebuffer *old_fb) { struct dc_softc *sc; struct tegra_crtc *crtc; struct tegra_fb *fb; int rv; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); fb = container_of(drm_crtc->fb, struct tegra_fb, drm_fb); sc = device_get_softc(crtc->dev); rv = dc_set_base(sc, x, y, fb); /* Commit */ WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_UPDATE | WIN_A_UPDATE); WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_ACT_REQ | WIN_A_ACT_REQ); return (rv); } static void dc_crtc_prepare(struct drm_crtc *drm_crtc) { struct dc_softc *sc; struct tegra_crtc *crtc; uint32_t val; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); WR4(sc, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL, SYNCPT_CNTRL_NO_STALL); /* XXX allocate syncpoint from host1x */ WR4(sc, DC_CMD_CONT_SYNCPT_VSYNC, SYNCPT_VSYNC_ENABLE | (sc->tegra_crtc.nvidia_head == 0 ? SYNCPT_VBLANK0: SYNCPT_VBLANK1)); WR4(sc, DC_CMD_DISPLAY_POWER_CONTROL, PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); val = RD4(sc, DC_CMD_DISPLAY_COMMAND); val |= DISPLAY_CTRL_MODE(CTRL_MODE_C_DISPLAY); WR4(sc, DC_CMD_DISPLAY_COMMAND, val); WR4(sc, DC_CMD_INT_MASK, WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT); WR4(sc, DC_CMD_INT_ENABLE, VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT); } static void dc_crtc_commit(struct drm_crtc *drm_crtc) { struct dc_softc *sc; struct tegra_crtc *crtc; uint32_t val; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_UPDATE | WIN_A_UPDATE); val = RD4(sc, DC_CMD_INT_MASK); val |= FRAME_END_INT; WR4(sc, DC_CMD_INT_MASK, val); val = RD4(sc, DC_CMD_INT_ENABLE); val |= FRAME_END_INT; WR4(sc, DC_CMD_INT_ENABLE, val); WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_ACT_REQ | WIN_A_ACT_REQ); } static void dc_crtc_load_lut(struct drm_crtc *crtc) { /* empty function */ } static const struct drm_crtc_helper_funcs dc_crtc_helper_funcs = { .dpms = dc_crtc_dpms, .mode_fixup = dc_crtc_mode_fixup, .mode_set = dc_crtc_mode_set, .mode_set_base = dc_crtc_mode_set_base, .prepare = dc_crtc_prepare, .commit = dc_crtc_commit, .load_lut = dc_crtc_load_lut, }; static int drm_crtc_index(struct drm_crtc *crtc) { int idx; struct drm_crtc *tmp; idx = 0; list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) { if (tmp == crtc) return (idx); idx++; } panic("Cannot find CRTC"); } /* ------------------------------------------------------------------- * * Exported functions (mainly vsync related). * * XXX revisit this -> convert to bus methods? */ int tegra_dc_get_pipe(struct drm_crtc *drm_crtc) { struct tegra_crtc *crtc; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); return (crtc->nvidia_head); } void tegra_dc_enable_vblank(struct drm_crtc *drm_crtc) { struct dc_softc *sc; struct tegra_crtc *crtc; uint32_t val; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); LOCK(sc); val = RD4(sc, DC_CMD_INT_MASK); val |= VBLANK_INT; WR4(sc, DC_CMD_INT_MASK, val); UNLOCK(sc); } void tegra_dc_disable_vblank(struct drm_crtc *drm_crtc) { struct dc_softc *sc; struct tegra_crtc *crtc; uint32_t val; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); LOCK(sc); val = RD4(sc, DC_CMD_INT_MASK); val &= ~VBLANK_INT; WR4(sc, DC_CMD_INT_MASK, val); UNLOCK(sc); } static void dc_finish_page_flip(struct dc_softc *sc) { struct drm_crtc *drm_crtc; struct drm_device *drm; struct tegra_fb *fb; struct tegra_bo *bo; uint32_t base; int idx; drm_crtc = &sc->tegra_crtc.drm_crtc; drm = drm_crtc->dev; fb = container_of(drm_crtc->fb, struct tegra_fb, drm_fb); mtx_lock(&drm->event_lock); if (sc->event == NULL) { mtx_unlock(&drm->event_lock); return; } LOCK(sc); /* Read active copy of WINBUF_START_ADDR */ WR4(sc, DC_CMD_DISPLAY_WINDOW_HEADER, WINDOW_A_SELECT); WR4(sc, DC_CMD_STATE_ACCESS, READ_MUX); base = RD4(sc, DC_WINBUF_START_ADDR); WR4(sc, DC_CMD_STATE_ACCESS, 0); UNLOCK(sc); /* Is already active */ bo = tegra_fb_get_plane(fb, 0); if (base == (bo->pbase + fb->drm_fb.offsets[0])) { idx = drm_crtc_index(drm_crtc); drm_send_vblank_event(drm, idx, sc->event); drm_vblank_put(drm, idx); sc->event = NULL; } mtx_unlock(&drm->event_lock); } void tegra_dc_cancel_page_flip(struct drm_crtc *drm_crtc, struct drm_file *file) { struct dc_softc *sc; struct tegra_crtc *crtc; struct drm_device *drm; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); drm = drm_crtc->dev; mtx_lock(&drm->event_lock); if ((sc->event != NULL) && (sc->event->base.file_priv == file)) { sc->event->base.destroy(&sc->event->base); drm_vblank_put(drm, drm_crtc_index(drm_crtc)); sc->event = NULL; } mtx_unlock(&drm->event_lock); } /* ------------------------------------------------------------------- * * CRTC functions. * */ static int dc_page_flip(struct drm_crtc *drm_crtc, struct drm_framebuffer *drm_fb, struct drm_pending_vblank_event *event) { struct dc_softc *sc; struct tegra_crtc *crtc; struct tegra_fb *fb; struct drm_device *drm; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); fb = container_of(drm_crtc->fb, struct tegra_fb, drm_fb); drm = drm_crtc->dev; if (sc->event != NULL) return (-EBUSY); if (event != NULL) { event->pipe = sc->tegra_crtc.nvidia_head; sc->event = event; drm_vblank_get(drm, event->pipe); } dc_set_base(sc, drm_crtc->x, drm_crtc->y, fb); drm_crtc->fb = drm_fb; /* Commit */ WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_UPDATE | WIN_A_UPDATE); return (0); } static int dc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file, uint32_t handle, uint32_t width, uint32_t height) { struct dc_softc *sc; struct tegra_crtc *crtc; struct drm_gem_object *gem; struct tegra_bo *bo; int i; uint32_t val, *src, *dst; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); if (width != height) return (-EINVAL); switch (width) { case 32: val = CURSOR_SIZE(C32x32); break; case 64: val = CURSOR_SIZE(C64x64); break; case 128: val = CURSOR_SIZE(C128x128); break; case 256: val = CURSOR_SIZE(C256x256); break; default: return (-EINVAL); } bo = NULL; gem = NULL; if (handle != 0) { gem = drm_gem_object_lookup(drm_crtc->dev, file, handle); if (gem == NULL) return (-ENOENT); bo = container_of(gem, struct tegra_bo, gem_obj); } if (sc->cursor_gem != NULL) { drm_gem_object_unreference(sc->cursor_gem); } sc->cursor_gem = gem; if (bo != NULL) { /* * Copy cursor into cache and convert it from ARGB to RGBA. * XXXX - this is broken by design - client can write to BO at * any time. We can dedicate other window for cursor or switch * to sw cursor in worst case. */ src = (uint32_t *)bo->vbase; dst = (uint32_t *)crtc->cursor_vbase; for (i = 0; i < width * height; i++) dst[i] = (src[i] << 8) | (src[i] >> 24); val |= CURSOR_CLIP(CC_DISPLAY); val |= CURSOR_START_ADDR(crtc->cursor_pbase); WR4(sc, DC_DISP_CURSOR_START_ADDR, val); val = RD4(sc, DC_DISP_BLEND_CURSOR_CONTROL); val &= ~CURSOR_DST_BLEND_FACTOR_SELECT(~0); val &= ~CURSOR_SRC_BLEND_FACTOR_SELECT(~0); val |= CURSOR_MODE_SELECT; val |= CURSOR_DST_BLEND_FACTOR_SELECT(DST_NEG_K1_TIMES_SRC); val |= CURSOR_SRC_BLEND_FACTOR_SELECT(SRC_BLEND_K1_TIMES_SRC); val |= CURSOR_ALPHA(~0); WR4(sc, DC_DISP_BLEND_CURSOR_CONTROL, val); val = RD4(sc, DC_DISP_DISP_WIN_OPTIONS); val |= CURSOR_ENABLE; WR4(sc, DC_DISP_DISP_WIN_OPTIONS, val); } else { val = RD4(sc, DC_DISP_DISP_WIN_OPTIONS); val &= ~CURSOR_ENABLE; WR4(sc, DC_DISP_DISP_WIN_OPTIONS, val); } /* XXX This fixes cursor underflow issues, but why ? */ WR4(sc, DC_DISP_CURSOR_UNDERFLOW_CTRL, CURSOR_UFLOW_CYA); WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_UPDATE | CURSOR_UPDATE ); WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_ACT_REQ | CURSOR_ACT_REQ); return (0); } static int dc_cursor_move(struct drm_crtc *drm_crtc, int x, int y) { struct dc_softc *sc; struct tegra_crtc *crtc; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); WR4(sc, DC_DISP_CURSOR_POSITION, CURSOR_POSITION(x, y)); WR4(sc, DC_CMD_STATE_CONTROL, CURSOR_UPDATE); WR4(sc, DC_CMD_STATE_CONTROL, CURSOR_ACT_REQ); return (0); } static void dc_destroy(struct drm_crtc *crtc) { drm_crtc_cleanup(crtc); memset(crtc, 0, sizeof(*crtc)); } static const struct drm_crtc_funcs dc_crtc_funcs = { .page_flip = dc_page_flip, .cursor_set = dc_cursor_set, .cursor_move = dc_cursor_move, .set_config = drm_crtc_helper_set_config, .destroy = dc_destroy, }; /* ------------------------------------------------------------------- * * Bus and infrastructure. * */ static int dc_init_planes(struct dc_softc *sc, struct tegra_drm *drm) { int i, rv; struct tegra_plane *plane; rv = 0; for (i = 0; i < DC_MAX_PLANES; i++) { plane = malloc(sizeof(*plane), DRM_MEM_KMS, M_WAITOK | M_ZERO); plane->index = i + 1; rv = drm_plane_init(&drm->drm_dev, &plane->drm_plane, 1 << sc->tegra_crtc.nvidia_head, &dc_plane_funcs, dc_plane_formats, nitems(dc_plane_formats), false); if (rv != 0) { free(plane, DRM_MEM_KMS); return (rv); } } return 0; } static void dc_display_enable(device_t dev, bool enable) { struct dc_softc *sc; uint32_t val; sc = device_get_softc(dev); /* Set display mode */ val = enable ? CTRL_MODE_C_DISPLAY: CTRL_MODE_STOP; WR4(sc, DC_CMD_DISPLAY_COMMAND, DISPLAY_CTRL_MODE(val)); /* and commit it*/ WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_UPDATE); WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_ACT_REQ); } static void dc_hdmi_enable(device_t dev, bool enable) { struct dc_softc *sc; uint32_t val; sc = device_get_softc(dev); val = RD4(sc, DC_DISP_DISP_WIN_OPTIONS); if (enable) val |= HDMI_ENABLE; else val &= ~HDMI_ENABLE; WR4(sc, DC_DISP_DISP_WIN_OPTIONS, val); } static void dc_setup_timing(device_t dev, int h_pulse_start) { struct dc_softc *sc; sc = device_get_softc(dev); /* Setup display timing */ WR4(sc, DC_DISP_DISP_TIMING_OPTIONS, VSYNC_H_POSITION(1)); WR4(sc, DC_DISP_DISP_COLOR_CONTROL, DITHER_CONTROL(DITHER_DISABLE) | BASE_COLOR_SIZE(SIZE_BASE888)); WR4(sc, DC_DISP_DISP_SIGNAL_OPTIONS0, H_PULSE2_ENABLE); WR4(sc, DC_DISP_H_PULSE2_CONTROL, PULSE_CONTROL_QUAL(QUAL_VACTIVE) | PULSE_CONTROL_LAST(LAST_END_A)); WR4(sc, DC_DISP_H_PULSE2_POSITION_A, PULSE_START(h_pulse_start) | PULSE_END(h_pulse_start + 8)); } static void dc_intr(void *arg) { struct dc_softc *sc; uint32_t status; sc = arg; /* Confirm interrupt */ status = RD4(sc, DC_CMD_INT_STATUS); WR4(sc, DC_CMD_INT_STATUS, status); if (status & VBLANK_INT) { drm_handle_vblank(sc->tegra_crtc.drm_crtc.dev, sc->tegra_crtc.nvidia_head); dc_finish_page_flip(sc); } } static int dc_init_client(device_t dev, device_t host1x, struct tegra_drm *drm) { struct dc_softc *sc; int rv; sc = device_get_softc(dev); if (drm->pitch_align < sc->pitch_align) drm->pitch_align = sc->pitch_align; drm_crtc_init(&drm->drm_dev, &sc->tegra_crtc.drm_crtc, &dc_crtc_funcs); drm_mode_crtc_set_gamma_size(&sc->tegra_crtc.drm_crtc, 256); drm_crtc_helper_add(&sc->tegra_crtc.drm_crtc, &dc_crtc_helper_funcs); rv = dc_init_planes(sc, drm); if (rv!= 0){ device_printf(dev, "Cannot init planes\n"); return (rv); } WR4(sc, DC_CMD_INT_TYPE, WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT); WR4(sc, DC_CMD_INT_POLARITY, WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT); WR4(sc, DC_CMD_INT_ENABLE, 0); WR4(sc, DC_CMD_INT_MASK, 0); rv = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, dc_intr, sc, &sc->irq_ih); if (rv != 0) { device_printf(dev, "Cannot register interrupt handler\n"); return (rv); } /* allocate memory for cursor cache */ sc->tegra_crtc.cursor_vbase = kmem_alloc_contig(256 * 256 * 4, M_WAITOK | M_ZERO, 0, -1UL, PAGE_SIZE, 0, VM_MEMATTR_WRITE_COMBINING); sc->tegra_crtc.cursor_pbase = vtophys((uintptr_t)sc->tegra_crtc.cursor_vbase); return (0); } static int dc_exit_client(device_t dev, device_t host1x, struct tegra_drm *drm) { struct dc_softc *sc; sc = device_get_softc(dev); if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); sc->irq_ih = NULL; return (0); } static int get_fdt_resources(struct dc_softc *sc, phandle_t node) { int rv; rv = hwreset_get_by_ofw_name(sc->dev, 0, "dc", &sc->hwreset_dc); if (rv != 0) { device_printf(sc->dev, "Cannot get 'dc' reset\n"); return (rv); } rv = clk_get_by_ofw_name(sc->dev, 0, "parent", &sc->clk_parent); if (rv != 0) { device_printf(sc->dev, "Cannot get 'parent' clock\n"); return (rv); } rv = clk_get_by_ofw_name(sc->dev, 0, "dc", &sc->clk_dc); if (rv != 0) { device_printf(sc->dev, "Cannot get 'dc' clock\n"); return (rv); } rv = OF_getencprop(node, "nvidia,head", &sc->tegra_crtc.nvidia_head, sizeof(sc->tegra_crtc.nvidia_head)); if (rv <= 0) { device_printf(sc->dev, "Cannot get 'nvidia,head' property\n"); return (rv); } return (0); } static int enable_fdt_resources(struct dc_softc *sc) { int id, rv; rv = clk_set_parent_by_clk(sc->clk_dc, sc->clk_parent); if (rv != 0) { device_printf(sc->dev, "Cannot set parent for 'dc' clock\n"); return (rv); } id = (sc->tegra_crtc.nvidia_head == 0) ? TEGRA_POWERGATE_DIS: TEGRA_POWERGATE_DISB; rv = tegra_powergate_sequence_power_up(id, sc->clk_dc, sc->hwreset_dc); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'DIS' powergate\n"); return (rv); } return (0); } static int dc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Tegra Display Controller"); return (BUS_PROBE_DEFAULT); } static int dc_attach(device_t dev) { struct dc_softc *sc; phandle_t node; int rid, rv; sc = device_get_softc(dev); sc->dev = dev; sc->tegra_crtc.dev = dev; node = ofw_bus_get_node(sc->dev); LOCK_INIT(sc); rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot allocate memory resources\n"); goto fail; } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate IRQ resources\n"); goto fail; } rv = get_fdt_resources(sc, node); if (rv != 0) { device_printf(dev, "Cannot parse FDT resources\n"); goto fail; } rv = enable_fdt_resources(sc); if (rv != 0) { device_printf(dev, "Cannot enable FDT resources\n"); goto fail; } /* * Tegra124 * - 64 for RGB modes * - 128 for YUV planar modes * - 256 for block linear modes */ sc->pitch_align = 256; rv = TEGRA_DRM_REGISTER_CLIENT(device_get_parent(sc->dev), sc->dev); if (rv != 0) { device_printf(dev, "Cannot register DRM device\n"); goto fail; } return (bus_generic_attach(dev)); fail: TEGRA_DRM_DEREGISTER_CLIENT(device_get_parent(sc->dev), sc->dev); if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); if (sc->clk_parent != NULL) clk_release(sc->clk_parent); if (sc->clk_dc != NULL) clk_release(sc->clk_dc); if (sc->hwreset_dc != NULL) hwreset_release(sc->hwreset_dc); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (ENXIO); } static int dc_detach(device_t dev) { struct dc_softc *sc; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); sc = device_get_softc(dev); TEGRA_DRM_DEREGISTER_CLIENT(device_get_parent(sc->dev), sc->dev); if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); if (sc->clk_parent != NULL) clk_release(sc->clk_parent); if (sc->clk_dc != NULL) clk_release(sc->clk_dc); if (sc->hwreset_dc != NULL) hwreset_release(sc->hwreset_dc); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); - return (bus_generic_detach(dev)); + return (0); } static device_method_t tegra_dc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dc_probe), DEVMETHOD(device_attach, dc_attach), DEVMETHOD(device_detach, dc_detach), /* tegra drm interface */ DEVMETHOD(tegra_drm_init_client, dc_init_client), DEVMETHOD(tegra_drm_exit_client, dc_exit_client), /* tegra dc interface */ DEVMETHOD(tegra_dc_display_enable, dc_display_enable), DEVMETHOD(tegra_dc_hdmi_enable, dc_hdmi_enable), DEVMETHOD(tegra_dc_setup_timing, dc_setup_timing), DEVMETHOD_END }; DEFINE_CLASS_0(tegra_dc, tegra_dc_driver, tegra_dc_methods, sizeof(struct dc_softc)); DRIVER_MODULE(tegra_dc, host1x, tegra_dc_driver, NULL, NULL); diff --git a/sys/arm/nvidia/drm2/tegra_hdmi.c b/sys/arm/nvidia/drm2/tegra_hdmi.c index e5ce30ac2eb3..067df21c889c 100644 --- a/sys/arm/nvidia/drm2/tegra_hdmi.c +++ b/sys/arm/nvidia/drm2/tegra_hdmi.c @@ -1,1315 +1,1321 @@ /*- * Copyright (c) 2015 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "tegra_dc_if.h" #include "tegra_drm_if.h" #define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, 4 * (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, 4 * (_r)) /* HDA stream format verb. */ #define AC_FMT_CHAN_GET(x) (((x) >> 0) & 0xf) #define AC_FMT_CHAN_BITS_GET(x) (((x) >> 4) & 0x7) #define AC_FMT_DIV_GET(x) (((x) >> 8) & 0x7) #define AC_FMT_MUL_GET(x) (((x) >> 11) & 0x7) #define AC_FMT_BASE_44K (1 << 14) #define AC_FMT_TYPE_NON_PCM (1 << 15) #define HDMI_REKEY_DEFAULT 56 #define HDMI_ELD_BUFFER_SIZE 96 #define HDMI_DC_CLOCK_MULTIPIER 2 struct audio_reg { uint32_t audio_clk; bus_size_t acr_reg; bus_size_t nval_reg; bus_size_t aval_reg; }; static const struct audio_reg audio_regs[] = { { .audio_clk = 32000, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_0320, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320, }, { .audio_clk = 44100, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_0441, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441, }, { .audio_clk = 88200, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_0882, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882, }, { .audio_clk = 176400, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_1764, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764, }, { .audio_clk = 48000, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_0480, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480, }, { .audio_clk = 96000, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_0960, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960, }, { .audio_clk = 192000, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_1920, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920, }, }; struct tmds_config { uint32_t pclk; uint32_t pll0; uint32_t pll1; uint32_t drive_c; uint32_t pe_c; uint32_t peak_c; uint32_t pad_ctls; }; static const struct tmds_config tegra124_tmds_config[] = { { /* 480p/576p / 25.2MHz/27MHz */ .pclk = 27000000, .pll0 = 0x01003010, .pll1 = 0x00301B00, .drive_c = 0x1F1F1F1F, .pe_c = 0x00000000, .peak_c = 0x03030303, .pad_ctls = 0x800034BB, }, { /* 720p/1080i / 74.25MHz */ .pclk = 74250000, .pll0 = 0x01003110, .pll1 = 0x00301500, .drive_c = 0x2C2C2C2C, .pe_c = 0x00000000, .peak_c = 0x07070707, .pad_ctls = 0x800034BB, }, { /* 1080p / 148.5MHz */ .pclk = 148500000, .pll0 = 0x01003310, .pll1 = 0x00301500, .drive_c = 0x33333333, .pe_c = 0x00000000, .peak_c = 0x0C0C0C0C, .pad_ctls = 0x800034BB, }, { /* 2216p / 297MHz */ .pclk = UINT_MAX, .pll0 = 0x01003F10, .pll1 = 0x00300F00, .drive_c = 0x37373737, .pe_c = 0x00000000, .peak_c = 0x17171717, .pad_ctls = 0x800036BB, }, }; struct hdmi_softc { device_t dev; struct resource *mem_res; struct resource *irq_res; void *irq_ih; clk_t clk_parent; clk_t clk_hdmi; hwreset_t hwreset_hdmi; regulator_t supply_hdmi; regulator_t supply_pll; regulator_t supply_vdd; uint64_t pclk; boolean_t hdmi_mode; int audio_src_type; int audio_freq; int audio_chans; struct tegra_drm *drm; struct tegra_drm_encoder output; const struct tmds_config *tmds_config; int n_tmds_configs; }; static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-hdmi", 1}, {NULL, 0}, }; /* These functions have been copied from newer version of drm_edid.c */ /* ELD Header Block */ #define DRM_ELD_HEADER_BLOCK_SIZE 4 #define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */ static int drm_eld_size(const uint8_t *eld) { return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; } static int drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, struct drm_display_mode *mode) { int rv; if (!frame || !mode) return -EINVAL; rv = hdmi_avi_infoframe_init(frame); if (rv < 0) return rv; if (mode->flags & DRM_MODE_FLAG_DBLCLK) frame->pixel_repeat = 1; frame->video_code = drm_match_cea_mode(mode); frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; #ifdef FREEBSD_NOTYET /* * Populate picture aspect ratio from either * user input (if specified) or from the CEA mode list. */ if (mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_4_3 || mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_16_9) frame->picture_aspect = mode->picture_aspect_ratio; else if (frame->video_code > 0) frame->picture_aspect = drm_get_cea_aspect_ratio( frame->video_code); #endif frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN; return 0; } /* --------------------------------------------------------------------- */ static int hdmi_setup_clock(struct tegra_drm_encoder *output, clk_t clk, uint64_t pclk) { struct hdmi_softc *sc; uint64_t freq; int rv; sc = device_get_softc(output->dev); /* Disable consumers clock for while. */ rv = clk_disable(sc->clk_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot disable 'hdmi' clock\n"); return (rv); } rv = clk_disable(clk); if (rv != 0) { device_printf(sc->dev, "Cannot disable display clock\n"); return (rv); } /* Set frequency for Display Controller PLL. */ freq = HDMI_DC_CLOCK_MULTIPIER * pclk; rv = clk_set_freq(sc->clk_parent, freq, 0); if (rv != 0) { device_printf(output->dev, "Cannot set display pixel frequency\n"); return (rv); } /* Reparent display controller */ rv = clk_set_parent_by_clk(clk, sc->clk_parent); if (rv != 0) { device_printf(output->dev, "Cannot set parent clock\n"); return (rv); } rv = clk_set_freq(clk, freq, 0); if (rv != 0) { device_printf(output->dev, "Cannot set display controller frequency\n"); return (rv); } rv = clk_set_freq(sc->clk_hdmi, pclk, 0); if (rv != 0) { device_printf(output->dev, "Cannot set display controller frequency\n"); return (rv); } /* And reenable consumers clock. */ rv = clk_enable(clk); if (rv != 0) { device_printf(sc->dev, "Cannot enable display clock\n"); return (rv); } rv = clk_enable(sc->clk_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'hdmi' clock\n"); return (rv); } rv = clk_get_freq(clk, &freq); if (rv != 0) { device_printf(output->dev, "Cannot get display controller frequency\n"); return (rv); } DRM_DEBUG_KMS("DC frequency: %llu\n", freq); return (0); } /* ------------------------------------------------------------------- * * Infoframes. * */ static void avi_setup_infoframe(struct hdmi_softc *sc, struct drm_display_mode *mode) { struct hdmi_avi_infoframe frame; uint8_t buf[17], *hdr, *pb; ssize_t rv; rv = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); if (rv < 0) { device_printf(sc->dev, "Cannot setup AVI infoframe: %zd\n", rv); return; } rv = hdmi_avi_infoframe_pack(&frame, buf, sizeof(buf)); if (rv < 0) { device_printf(sc->dev, "Cannot pack AVI infoframe: %zd\n", rv); return; } hdr = buf + 0; pb = buf + 3; WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER, (hdr[2] << 16) | (hdr[1] << 8) | (hdr[0] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW, (pb[3] << 24) |(pb[2] << 16) | (pb[1] << 8) | (pb[0] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH, (pb[6] << 16) | (pb[5] << 8) | (pb[4] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW, (pb[10] << 24) |(pb[9] << 16) | (pb[8] << 8) | (pb[7] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH, (pb[13] << 16) | (pb[12] << 8) | (pb[11] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL, AVI_INFOFRAME_CTRL_ENABLE); } static void audio_setup_infoframe(struct hdmi_softc *sc) { struct hdmi_audio_infoframe frame; uint8_t buf[14], *hdr, *pb; ssize_t rv; rv = hdmi_audio_infoframe_init(&frame); frame.channels = sc->audio_chans; rv = hdmi_audio_infoframe_pack(&frame, buf, sizeof(buf)); if (rv < 0) { device_printf(sc->dev, "Cannot pack audio infoframe\n"); return; } hdr = buf + 0; pb = buf + 3; WR4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER, (hdr[2] << 16) | (hdr[1] << 8) | (hdr[0] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW, (pb[3] << 24) |(pb[2] << 16) | (pb[1] << 8) | (pb[0] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH, (pb[5] << 8) | (pb[4] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL, AUDIO_INFOFRAME_CTRL_ENABLE); } /* ------------------------------------------------------------------- * * Audio * */ static void init_hda_eld(struct hdmi_softc *sc) { size_t size; int i ; uint32_t val; size = drm_eld_size(sc->output.connector.eld); for (i = 0; i < HDMI_ELD_BUFFER_SIZE; i++) { val = i << 8; if (i < size) val |= sc->output.connector.eld[i]; WR4(sc, HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR, val); } WR4(sc,HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE, SOR_AUDIO_HDA_PRESENSE_VALID | SOR_AUDIO_HDA_PRESENSE_PRESENT); } static int get_audio_regs(int freq, bus_size_t *acr_reg, bus_size_t *nval_reg, bus_size_t *aval_reg) { int i; const struct audio_reg *reg; for (i = 0; i < nitems(audio_regs) ; i++) { reg = audio_regs + i; if (reg->audio_clk == freq) { if (acr_reg != NULL) *acr_reg = reg->acr_reg; if (nval_reg != NULL) *nval_reg = reg->nval_reg; if (aval_reg != NULL) *aval_reg = reg->aval_reg; return (0); } } return (ERANGE); } #define FR_BITS 16 #define TO_FFP(x) (((int64_t)(x)) << FR_BITS) #define TO_INT(x) ((int)((x) >> FR_BITS)) static int get_hda_cts_n(uint32_t audio_freq_hz, uint32_t pixclk_freq_hz, uint32_t *best_cts, uint32_t *best_n, uint32_t *best_a) { int min_n; int max_n; int ideal_n; int n; int cts; int aval; int64_t err_f; int64_t min_err_f; int64_t cts_f; int64_t aval_f; int64_t half_f; /* constant 0.5 */ bool better_n; /* * All floats are in fixed I48.16 format. * * Ideal ACR interval is 1000 hz (1 ms); * acceptable is 300 hz .. 1500 hz */ min_n = 128 * audio_freq_hz / 1500; max_n = 128 * audio_freq_hz / 300; ideal_n = 128 * audio_freq_hz / 1000; min_err_f = TO_FFP(100); half_f = TO_FFP(1) / 2; *best_n = 0; *best_cts = 0; *best_a = 0; for (n = min_n; n <= max_n; n++) { cts_f = TO_FFP(pixclk_freq_hz); cts_f *= n; cts_f /= 128 * audio_freq_hz; cts = TO_INT(cts_f + half_f); /* round */ err_f = cts_f - TO_FFP(cts); if (err_f < 0) err_f = -err_f; aval_f = TO_FFP(24000000); aval_f *= n; aval_f /= 128 * audio_freq_hz; aval = TO_INT(aval_f); /* truncate */ better_n = abs(n - ideal_n) < abs((int)(*best_n) - ideal_n); if (TO_FFP(aval) == aval_f && (err_f < min_err_f || (err_f == min_err_f && better_n))) { min_err_f = err_f; *best_n = (uint32_t)n; *best_cts = (uint32_t)cts; *best_a = (uint32_t)aval; if (err_f == 0 && n == ideal_n) break; } } return (0); } #undef FR_BITS #undef TO_FFP #undef TO_INT static int audio_setup(struct hdmi_softc *sc) { uint32_t val; uint32_t audio_n; uint32_t audio_cts; uint32_t audio_aval; uint64_t hdmi_freq; bus_size_t aval_reg; int rv; if (!sc->hdmi_mode) return (ENOTSUP); rv = get_audio_regs(sc->audio_freq, NULL, NULL, &aval_reg); if (rv != 0) { device_printf(sc->dev, "Unsupported audio frequency.\n"); return (rv); } rv = clk_get_freq(sc->clk_hdmi, &hdmi_freq); if (rv != 0) { device_printf(sc->dev, "Cannot get hdmi frequency: %d\n", rv); return (rv); } rv = get_hda_cts_n(sc->audio_freq, hdmi_freq, &audio_cts, &audio_n, &audio_aval); if (rv != 0) { device_printf(sc->dev, "Cannot compute audio coefs: %d\n", rv); return (rv); } /* Audio infoframe. */ audio_setup_infoframe(sc); /* Setup audio source */ WR4(sc, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0, SOR_AUDIO_CNTRL0_SOURCE_SELECT(sc->audio_src_type) | SOR_AUDIO_CNTRL0_INJECT_NULLSMPL); val = RD4(sc, HDMI_NV_PDISP_SOR_AUDIO_SPARE0); val |= SOR_AUDIO_SPARE0_HBR_ENABLE; WR4(sc, HDMI_NV_PDISP_SOR_AUDIO_SPARE0, val); WR4(sc, HDMI_NV_PDISP_HDMI_ACR_CTRL, 0); WR4(sc, HDMI_NV_PDISP_AUDIO_N, AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNATE | AUDIO_N_VALUE(audio_n - 1)); WR4(sc, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH, ACR_SUBPACK_N(audio_n) | ACR_ENABLE); WR4(sc, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW, ACR_SUBPACK_CTS(audio_cts)); WR4(sc, HDMI_NV_PDISP_HDMI_SPARE, SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1)); val = RD4(sc, HDMI_NV_PDISP_AUDIO_N); val &= ~AUDIO_N_RESETF; WR4(sc, HDMI_NV_PDISP_AUDIO_N, val); WR4(sc, aval_reg, audio_aval); return (0); } static void audio_disable(struct hdmi_softc *sc) { uint32_t val; /* Disable audio */ val = RD4(sc, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); val &= ~GENERIC_CTRL_AUDIO; WR4(sc, HDMI_NV_PDISP_HDMI_GENERIC_CTRL, val); /* Disable audio infoframes */ val = RD4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); val &= ~AUDIO_INFOFRAME_CTRL_ENABLE; WR4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL, val); } static void audio_enable(struct hdmi_softc *sc) { uint32_t val; if (!sc->hdmi_mode) audio_disable(sc); /* Enable audio infoframes */ val = RD4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); val |= AUDIO_INFOFRAME_CTRL_ENABLE; WR4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL, val); /* Enable audio */ val = RD4(sc, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); val |= GENERIC_CTRL_AUDIO; WR4(sc, HDMI_NV_PDISP_HDMI_GENERIC_CTRL, val); } /* ------------------------------------------------------------------- * * HDMI. * */ /* Process format change notification from HDA */ static void hda_intr(struct hdmi_softc *sc) { uint32_t val; int rv; if (!sc->hdmi_mode) return; val = RD4(sc, HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0); if ((val & (1 << 30)) == 0) { audio_disable(sc); return; } /* XXX Move this to any header */ /* Keep in sync with HDA */ sc->audio_freq = val & 0x00FFFFFF; sc->audio_chans = (val >> 24) & 0x0f; DRM_DEBUG_KMS("%d channel(s) at %dHz\n", sc->audio_chans, sc->audio_freq); rv = audio_setup(sc); if (rv != 0) { audio_disable(sc); return; } audio_enable(sc); } static void tmds_init(struct hdmi_softc *sc, const struct tmds_config *tmds) { WR4(sc, HDMI_NV_PDISP_SOR_PLL0, tmds->pll0); WR4(sc, HDMI_NV_PDISP_SOR_PLL1, tmds->pll1); WR4(sc, HDMI_NV_PDISP_PE_CURRENT, tmds->pe_c); WR4(sc, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT, tmds->drive_c); WR4(sc, HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT, tmds->peak_c); WR4(sc, HDMI_NV_PDISP_SOR_PAD_CTLS0, tmds->pad_ctls); } static int hdmi_sor_start(struct hdmi_softc *sc, struct drm_display_mode *mode) { int i; uint32_t val; /* Enable TMDS macro */ val = RD4(sc, HDMI_NV_PDISP_SOR_PLL0); val &= ~SOR_PLL0_PWR; val &= ~SOR_PLL0_VCOPD; val &= ~SOR_PLL0_PULLDOWN; WR4(sc, HDMI_NV_PDISP_SOR_PLL0, val); DELAY(10); val = RD4(sc, HDMI_NV_PDISP_SOR_PLL0); val &= ~SOR_PLL0_PDBG; WR4(sc, HDMI_NV_PDISP_SOR_PLL0, val); WR4(sc, HDMI_NV_PDISP_SOR_PWR, SOR_PWR_SETTING_NEW); WR4(sc, HDMI_NV_PDISP_SOR_PWR, 0); /* Wait until SOR is ready */ for (i = 1000; i > 0; i--) { val = RD4(sc, HDMI_NV_PDISP_SOR_PWR); if ((val & SOR_PWR_SETTING_NEW) == 0) break; DELAY(10); } if (i <= 0) { device_printf(sc->dev, "Timeouted while enabling SOR power.\n"); return (ETIMEDOUT); } val = SOR_STATE2_ASY_OWNER(ASY_OWNER_HEAD0) | SOR_STATE2_ASY_SUBOWNER(SUBOWNER_BOTH) | SOR_STATE2_ASY_CRCMODE(ASY_CRCMODE_COMPLETE) | SOR_STATE2_ASY_PROTOCOL(ASY_PROTOCOL_SINGLE_TMDS_A); if (mode->flags & DRM_MODE_FLAG_NHSYNC) val |= SOR_STATE2_ASY_HSYNCPOL_NEG; if (mode->flags & DRM_MODE_FLAG_NVSYNC) val |= SOR_STATE2_ASY_VSYNCPOL_NEG; WR4(sc, HDMI_NV_PDISP_SOR_STATE2, val); WR4(sc, HDMI_NV_PDISP_SOR_STATE1, SOR_STATE1_ASY_ORMODE_NORMAL | SOR_STATE1_ASY_HEAD_OPMODE(ASY_HEAD_OPMODE_AWAKE)); WR4(sc, HDMI_NV_PDISP_SOR_STATE0, 0); WR4(sc, HDMI_NV_PDISP_SOR_STATE0, SOR_STATE0_UPDATE); val = RD4(sc, HDMI_NV_PDISP_SOR_STATE1); val |= SOR_STATE1_ATTACHED; WR4(sc, HDMI_NV_PDISP_SOR_STATE1, val); WR4(sc, HDMI_NV_PDISP_SOR_STATE0, 0); return 0; } static int hdmi_disable(struct hdmi_softc *sc) { struct tegra_crtc *crtc; device_t dc; uint32_t val; dc = NULL; if (sc->output.encoder.crtc != NULL) { crtc = container_of(sc->output.encoder.crtc, struct tegra_crtc, drm_crtc); dc = crtc->dev; } if (dc != NULL) { TEGRA_DC_HDMI_ENABLE(dc, false); TEGRA_DC_DISPLAY_ENABLE(dc, false); } audio_disable(sc); val = RD4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); val &= ~AVI_INFOFRAME_CTRL_ENABLE; WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL, val); /* Disable interrupts */ WR4(sc, HDMI_NV_PDISP_INT_ENABLE, 0); WR4(sc, HDMI_NV_PDISP_INT_MASK, 0); return (0); } static int hdmi_enable(struct hdmi_softc *sc) { uint64_t freq; struct drm_display_mode *mode; struct tegra_crtc *crtc; uint32_t val, h_sync_width, h_back_porch, h_front_porch, h_pulse_start; uint32_t h_max_ac_packet, div8_2; device_t dc; int i, rv; mode = &sc->output.encoder.crtc->mode; crtc = container_of(sc->output.encoder.crtc, struct tegra_crtc, drm_crtc); dc = crtc->dev; /* Compute all timings first. */ sc->pclk = mode->clock * 1000; h_sync_width = mode->hsync_end - mode->hsync_start; h_back_porch = mode->htotal - mode->hsync_end; h_front_porch = mode->hsync_start - mode->hdisplay; h_pulse_start = 1 + h_sync_width + h_back_porch - 10; h_max_ac_packet = (h_sync_width + h_back_porch + h_front_porch - HDMI_REKEY_DEFAULT - 18) / 32; /* Check if HDMI device is connected and detected. */ if (sc->output.connector.edid_blob_ptr == NULL) { sc->hdmi_mode = false; } else { sc->hdmi_mode = drm_detect_hdmi_monitor( (struct edid *)sc->output.connector.edid_blob_ptr->data); } /* Get exact HDMI pixel frequency. */ rv = clk_get_freq(sc->clk_hdmi, &freq); if (rv != 0) { device_printf(sc->dev, "Cannot get 'hdmi' clock frequency\n"); return (rv); } DRM_DEBUG_KMS("HDMI frequency: %llu Hz\n", freq); /* Wakeup SOR power */ val = RD4(sc, HDMI_NV_PDISP_SOR_PLL0); val &= ~SOR_PLL0_PDBG; WR4(sc, HDMI_NV_PDISP_SOR_PLL0, val); DELAY(10); val = RD4(sc, HDMI_NV_PDISP_SOR_PLL0); val &= ~SOR_PLL0_PWR; WR4(sc, HDMI_NV_PDISP_SOR_PLL0, val); /* Setup timings */ TEGRA_DC_SETUP_TIMING(dc, h_pulse_start); WR4(sc, HDMI_NV_PDISP_HDMI_VSYNC_WINDOW, VSYNC_WINDOW_START(0x200) | VSYNC_WINDOW_END(0x210) | VSYNC_WINDOW_ENABLE); /* Setup video source and adjust video range */ val = 0; if (crtc->nvidia_head != 0) HDMI_SRC_DISPLAYB; if ((mode->hdisplay != 640) || (mode->vdisplay != 480)) val |= ARM_VIDEO_RANGE_LIMITED; WR4(sc, HDMI_NV_PDISP_INPUT_CONTROL, val); /* Program SOR reference clock - it uses 8.2 fractional divisor */ div8_2 = (freq * 4) / 1000000; val = SOR_REFCLK_DIV_INT(div8_2 >> 2) | SOR_REFCLK_DIV_FRAC(div8_2); WR4(sc, HDMI_NV_PDISP_SOR_REFCLK, val); /* Setup audio */ if (sc->hdmi_mode) { rv = audio_setup(sc); if (rv != 0) sc->hdmi_mode = false; } /* Init HDA ELD */ init_hda_eld(sc); val = HDMI_CTRL_REKEY(HDMI_REKEY_DEFAULT); val |= HDMI_CTRL_MAX_AC_PACKET(h_max_ac_packet); if (sc->hdmi_mode) val |= HDMI_CTRL_ENABLE; WR4(sc, HDMI_NV_PDISP_HDMI_CTRL, val); /* Setup TMDS */ for (i = 0; i < sc->n_tmds_configs; i++) { if (sc->pclk <= sc->tmds_config[i].pclk) { tmds_init(sc, sc->tmds_config + i); break; } } /* Program sequencer. */ WR4(sc, HDMI_NV_PDISP_SOR_SEQ_CTL, SOR_SEQ_PU_PC(0) | SOR_SEQ_PU_PC_ALT(0) | SOR_SEQ_PD_PC(8) | SOR_SEQ_PD_PC_ALT(8)); val = SOR_SEQ_INST_WAIT_TIME(1) | SOR_SEQ_INST_WAIT_UNITS(WAIT_UNITS_VSYNC) | SOR_SEQ_INST_HALT | SOR_SEQ_INST_DRIVE_PWM_OUT_LO; WR4(sc, HDMI_NV_PDISP_SOR_SEQ_INST(0), val); WR4(sc, HDMI_NV_PDISP_SOR_SEQ_INST(8), val); val = RD4(sc,HDMI_NV_PDISP_SOR_CSTM); val &= ~SOR_CSTM_LVDS_ENABLE; val &= ~SOR_CSTM_ROTCLK(~0); val |= SOR_CSTM_ROTCLK(2); val &= ~SOR_CSTM_MODE(~0); val |= SOR_CSTM_MODE(CSTM_MODE_TMDS); val |= SOR_CSTM_PLLDIV; WR4(sc, HDMI_NV_PDISP_SOR_CSTM, val); TEGRA_DC_DISPLAY_ENABLE(dc, false); rv = hdmi_sor_start(sc, mode); if (rv != 0) return (rv); TEGRA_DC_HDMI_ENABLE(dc, true); TEGRA_DC_DISPLAY_ENABLE(dc, true); /* Enable HDA codec interrupt */ WR4(sc, HDMI_NV_PDISP_INT_MASK, INT_CODEC_SCRATCH0); WR4(sc, HDMI_NV_PDISP_INT_ENABLE, INT_CODEC_SCRATCH0); if (sc->hdmi_mode) { avi_setup_infoframe(sc, mode); audio_enable(sc); } return (0); } /* ------------------------------------------------------------------- * * DRM Interface. * */ static enum drm_mode_status hdmi_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct tegra_drm_encoder *output; struct hdmi_softc *sc; int rv; uint64_t freq; output = container_of(connector, struct tegra_drm_encoder, connector); sc = device_get_softc(output->dev); freq = HDMI_DC_CLOCK_MULTIPIER * mode->clock * 1000; rv = clk_test_freq(sc->clk_parent, freq, 0); DRM_DEBUG_KMS("Test HDMI frequency: %u kHz, rv: %d\n", mode->clock, rv); if (rv != 0) return (MODE_NOCLOCK); return (MODE_OK); } static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { .get_modes = tegra_drm_connector_get_modes, .mode_valid = hdmi_connector_mode_valid, .best_encoder = tegra_drm_connector_best_encoder, }; static const struct drm_connector_funcs hdmi_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = tegra_drm_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = drm_connector_cleanup, }; static const struct drm_encoder_funcs hdmi_encoder_funcs = { .destroy = drm_encoder_cleanup, }; static void hdmi_encoder_dpms(struct drm_encoder *encoder, int mode) { /* Empty function. */ } static bool hdmi_encoder_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted) { return (true); } static void hdmi_encoder_prepare(struct drm_encoder *encoder) { /* Empty function. */ } static void hdmi_encoder_commit(struct drm_encoder *encoder) { /* Empty function. */ } static void hdmi_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted) { struct tegra_drm_encoder *output; struct hdmi_softc *sc; int rv; output = container_of(encoder, struct tegra_drm_encoder, encoder); sc = device_get_softc(output->dev); rv = hdmi_enable(sc); if (rv != 0) device_printf(sc->dev, "Cannot enable HDMI port\n"); } static void hdmi_encoder_disable(struct drm_encoder *encoder) { struct tegra_drm_encoder *output; struct hdmi_softc *sc; int rv; output = container_of(encoder, struct tegra_drm_encoder, encoder); sc = device_get_softc(output->dev); if (sc == NULL) return; rv = hdmi_disable(sc); if (rv != 0) device_printf(sc->dev, "Cannot disable HDMI port\n"); } static const struct drm_encoder_helper_funcs hdmi_encoder_helper_funcs = { .dpms = hdmi_encoder_dpms, .mode_fixup = hdmi_encoder_mode_fixup, .prepare = hdmi_encoder_prepare, .commit = hdmi_encoder_commit, .mode_set = hdmi_encoder_mode_set, .disable = hdmi_encoder_disable, }; /* ------------------------------------------------------------------- * * Bus and infrastructure. * */ static int hdmi_init_client(device_t dev, device_t host1x, struct tegra_drm *drm) { struct hdmi_softc *sc; phandle_t node; int rv; sc = device_get_softc(dev); node = ofw_bus_get_node(sc->dev); sc->drm = drm; sc->output.setup_clock = &hdmi_setup_clock; rv = tegra_drm_encoder_attach(&sc->output, node); if (rv != 0) { device_printf(dev, "Cannot attach output connector\n"); return(ENXIO); } /* Connect this encoder + connector to DRM. */ drm_connector_init(&drm->drm_dev, &sc->output.connector, &hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); drm_connector_helper_add(&sc->output.connector, &hdmi_connector_helper_funcs); sc->output.connector.dpms = DRM_MODE_DPMS_OFF; drm_encoder_init(&drm->drm_dev, &sc->output.encoder, &hdmi_encoder_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(&sc->output.encoder, &hdmi_encoder_helper_funcs); drm_mode_connector_attach_encoder(&sc->output.connector, &sc->output.encoder); rv = tegra_drm_encoder_init(&sc->output, drm); if (rv < 0) { device_printf(sc->dev, "Unable to init HDMI output\n"); return (rv); } sc->output.encoder.possible_crtcs = 0x3; return (0); } static int hdmi_exit_client(device_t dev, device_t host1x, struct tegra_drm *drm) { struct hdmi_softc *sc; sc = device_get_softc(dev); tegra_drm_encoder_exit(&sc->output, drm); return (0); } static int get_fdt_resources(struct hdmi_softc *sc, phandle_t node) { int rv; rv = regulator_get_by_ofw_property(sc->dev, 0, "hdmi-supply", &sc->supply_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot get 'hdmi' regulator\n"); return (ENXIO); } rv = regulator_get_by_ofw_property(sc->dev,0, "pll-supply", &sc->supply_pll); if (rv != 0) { device_printf(sc->dev, "Cannot get 'pll' regulator\n"); return (ENXIO); } rv = regulator_get_by_ofw_property(sc->dev, 0, "vdd-supply", &sc->supply_vdd); if (rv != 0) { device_printf(sc->dev, "Cannot get 'vdd' regulator\n"); return (ENXIO); } rv = hwreset_get_by_ofw_name(sc->dev, 0, "hdmi", &sc->hwreset_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot get 'hdmi' reset\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "parent", &sc->clk_parent); if (rv != 0) { device_printf(sc->dev, "Cannot get 'parent' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "hdmi", &sc->clk_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot get 'hdmi' clock\n"); return (ENXIO); } return (0); } static int enable_fdt_resources(struct hdmi_softc *sc) { int rv; rv = clk_set_parent_by_clk(sc->clk_hdmi, sc->clk_parent); if (rv != 0) { device_printf(sc->dev, "Cannot set parent for 'hdmi' clock\n"); return (rv); } /* 594 MHz is arbitrarily selected value */ rv = clk_set_freq(sc->clk_parent, 594000000, 0); if (rv != 0) { device_printf(sc->dev, "Cannot set frequency for 'hdmi' parent clock\n"); return (rv); } rv = clk_set_freq(sc->clk_hdmi, 594000000 / 4, 0); if (rv != 0) { device_printf(sc->dev, "Cannot set frequency for 'hdmi' parent clock\n"); return (rv); } rv = regulator_enable(sc->supply_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'hdmi' regulator\n"); return (rv); } rv = regulator_enable(sc->supply_pll); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'pll' regulator\n"); return (rv); } rv = regulator_enable(sc->supply_vdd); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'vdd' regulator\n"); return (rv); } rv = clk_enable(sc->clk_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'hdmi' clock\n"); return (rv); } rv = hwreset_deassert(sc->hwreset_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot unreset 'hdmi' reset\n"); return (rv); } return (0); } static void hdmi_intr(void *arg) { struct hdmi_softc *sc; uint32_t status; sc = arg; /* Confirm interrupt */ status = RD4(sc, HDMI_NV_PDISP_INT_STATUS); WR4(sc, HDMI_NV_PDISP_INT_STATUS, status); /* process audio verb from HDA */ if (status & INT_CODEC_SCRATCH0) hda_intr(sc); } static int hdmi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Tegra HDMI"); return (BUS_PROBE_DEFAULT); } static int hdmi_attach(device_t dev) { struct hdmi_softc *sc; phandle_t node; int rid, rv; sc = device_get_softc(dev); sc->dev = dev; sc->output.dev = sc->dev; node = ofw_bus_get_node(sc->dev); sc->audio_src_type = SOURCE_SELECT_AUTO; sc->audio_freq = 44100; sc->audio_chans = 2; sc->hdmi_mode = false; sc->tmds_config = tegra124_tmds_config; sc->n_tmds_configs = nitems(tegra124_tmds_config); rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot allocate memory resources\n"); goto fail; } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate IRQ resources\n"); goto fail; } rv = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, hdmi_intr, sc, &sc->irq_ih); if (rv != 0) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); goto fail; } rv = get_fdt_resources(sc, node); if (rv != 0) { device_printf(dev, "Cannot parse FDT resources\n"); goto fail; } rv = enable_fdt_resources(sc); if (rv != 0) { device_printf(dev, "Cannot enable FDT resources\n"); goto fail; } rv = TEGRA_DRM_REGISTER_CLIENT(device_get_parent(sc->dev), sc->dev); if (rv != 0) { device_printf(dev, "Cannot register DRM device\n"); goto fail; } return (bus_generic_attach(dev)); fail: TEGRA_DRM_DEREGISTER_CLIENT(device_get_parent(sc->dev), sc->dev); if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); if (sc->clk_parent != NULL) clk_release(sc->clk_parent); if (sc->clk_hdmi != NULL) clk_release(sc->clk_hdmi); if (sc->hwreset_hdmi != NULL) hwreset_release(sc->hwreset_hdmi); if (sc->supply_hdmi != NULL) regulator_release(sc->supply_hdmi); if (sc->supply_pll != NULL) regulator_release(sc->supply_pll); if (sc->supply_vdd != NULL) regulator_release(sc->supply_vdd); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (ENXIO); } static int hdmi_detach(device_t dev) { struct hdmi_softc *sc; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); + sc = device_get_softc(dev); TEGRA_DRM_DEREGISTER_CLIENT(device_get_parent(sc->dev), sc->dev); if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); if (sc->clk_parent != NULL) clk_release(sc->clk_parent); if (sc->clk_hdmi != NULL) clk_release(sc->clk_hdmi); if (sc->hwreset_hdmi != NULL) hwreset_release(sc->hwreset_hdmi); if (sc->supply_hdmi != NULL) regulator_release(sc->supply_hdmi); if (sc->supply_pll != NULL) regulator_release(sc->supply_pll); if (sc->supply_vdd != NULL) regulator_release(sc->supply_vdd); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); - return (bus_generic_detach(dev)); + return (0); } static device_method_t tegra_hdmi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hdmi_probe), DEVMETHOD(device_attach, hdmi_attach), DEVMETHOD(device_detach, hdmi_detach), /* tegra drm interface */ DEVMETHOD(tegra_drm_init_client, hdmi_init_client), DEVMETHOD(tegra_drm_exit_client, hdmi_exit_client), DEVMETHOD_END }; DEFINE_CLASS_0(tegra_hdmi, tegra_hdmi_driver, tegra_hdmi_methods, sizeof(struct hdmi_softc)); DRIVER_MODULE(tegra_hdmi, host1x, tegra_hdmi_driver, 0, 0); diff --git a/sys/arm/nvidia/drm2/tegra_host1x.c b/sys/arm/nvidia/drm2/tegra_host1x.c index e04a50e4c003..ee43efba5ba7 100644 --- a/sys/arm/nvidia/drm2/tegra_host1x.c +++ b/sys/arm/nvidia/drm2/tegra_host1x.c @@ -1,639 +1,644 @@ /*- * Copyright (c) 2015 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fb_if.h" #include "tegra_drm_if.h" #define WR4(_sc, _r, _v) bus_rite_4((_sc)->mem_res, (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, (_r)) #define LOCK(_sc) sx_xlock(&(_sc)->lock) #define UNLOCK(_sc) sx_xunlock(&(_sc)->lock) #define SLEEP(_sc, timeout) sx_sleep(sc, &sc->lock, 0, "host1x", timeout); #define LOCK_INIT(_sc) sx_init(&_sc->lock, "host1x") #define LOCK_DESTROY(_sc) sx_destroy(&_sc->lock) #define ASSERT_LOCKED(_sc) sx_assert(&_sc->lock, SA_LOCKED) #define ASSERT_UNLOCKED(_sc) sx_assert(&_sc->lock, SA_UNLOCKED) static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-host1x", 1}, {NULL, 0} }; #define DRIVER_NAME "tegra" #define DRIVER_DESC "NVIDIA Tegra TK1" #define DRIVER_DATE "20151101" #define DRIVER_MAJOR 0 #define DRIVER_MINOR 0 #define DRIVER_PATCHLEVEL 0 struct client_info; TAILQ_HEAD(client_list, client_info); typedef struct client_list client_list_t; struct client_info { TAILQ_ENTRY(client_info) list_e; device_t client; int activated; }; struct host1x_softc { struct simplebus_softc simplebus_sc; /* must be first */ device_t dev; struct sx lock; int attach_done; struct resource *mem_res; struct resource *syncpt_irq_res; void *syncpt_irq_h; struct resource *gen_irq_res; void *gen_irq_h; clk_t clk; hwreset_t reset; struct intr_config_hook irq_hook; int drm_inited; client_list_t clients; struct tegra_drm *tegra_drm; }; static void host1x_output_poll_changed(struct drm_device *drm_dev) { struct tegra_drm *drm; drm = container_of(drm_dev, struct tegra_drm, drm_dev); if (drm->fb != NULL) drm_fb_helper_hotplug_event(&drm->fb->fb_helper); } static const struct drm_mode_config_funcs mode_config_funcs = { .fb_create = tegra_drm_fb_create, .output_poll_changed = host1x_output_poll_changed, }; static int host1x_drm_init(struct host1x_softc *sc) { struct client_info *entry; int rv; LOCK(sc); TAILQ_FOREACH(entry, &sc->clients, list_e) { if (entry->activated) continue; rv = TEGRA_DRM_INIT_CLIENT(entry->client, sc->dev, sc->tegra_drm); if (rv != 0) { device_printf(sc->dev, "Cannot init DRM client %s: %d\n", device_get_name(entry->client), rv); return (rv); } entry->activated = 1; } UNLOCK(sc); return (0); } static int host1x_drm_exit(struct host1x_softc *sc) { struct client_info *entry; int rv; #ifdef FREEBSD_NOTYET struct drm_device *dev, *tmp; #endif LOCK(sc); if (!sc->drm_inited) { UNLOCK(sc); return (0); } TAILQ_FOREACH_REVERSE(entry, &sc->clients, client_list, list_e) { if (!entry->activated) continue; rv = TEGRA_DRM_EXIT_CLIENT(entry->client, sc->dev, sc->tegra_drm); if (rv != 0) { device_printf(sc->dev, "Cannot exit DRM client %s: %d\n", device_get_name(entry->client), rv); } entry->activated = 0; } #ifdef FREEBSD_NOTYET list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item) drm_put_dev(dev); #endif sc->drm_inited = 0; UNLOCK(sc); return (0); } static int host1x_drm_load(struct drm_device *drm_dev, unsigned long flags) { struct host1x_softc *sc; int rv; sc = device_get_softc(drm_dev->dev); drm_mode_config_init(drm_dev); drm_dev->mode_config.min_width = 32; drm_dev->mode_config.min_height = 32; drm_dev->mode_config.max_width = 4096; drm_dev->mode_config.max_height = 4096; drm_dev->mode_config.funcs = &mode_config_funcs; rv = host1x_drm_init(sc); if (rv != 0) goto fail_host1x; drm_dev->irq_enabled = true; drm_dev->max_vblank_count = 0xffffffff; drm_dev->vblank_disable_allowed = true; rv = drm_vblank_init(drm_dev, drm_dev->mode_config.num_crtc); if (rv != 0) goto fail_vblank; drm_mode_config_reset(drm_dev); rv = tegra_drm_fb_init(drm_dev); if (rv != 0) goto fail_fb; drm_kms_helper_poll_init(drm_dev); return (0); fail_fb: tegra_drm_fb_destroy(drm_dev); drm_vblank_cleanup(drm_dev); fail_vblank: host1x_drm_exit(sc); fail_host1x: drm_mode_config_cleanup(drm_dev); return (rv); } static int host1x_drm_unload(struct drm_device *drm_dev) { struct host1x_softc *sc; int rv; sc = device_get_softc(drm_dev->dev); drm_kms_helper_poll_fini(drm_dev); tegra_drm_fb_destroy(drm_dev); drm_mode_config_cleanup(drm_dev); rv = host1x_drm_exit(sc); if (rv < 0) return (rv); return (0); } static int host1x_drm_open(struct drm_device *drm_dev, struct drm_file *filp) { return (0); } static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file) { struct drm_crtc *crtc; list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) tegra_dc_cancel_page_flip(crtc, file); } static void host1x_drm_lastclose(struct drm_device *drm_dev) { struct tegra_drm *drm; drm = container_of(drm_dev, struct tegra_drm, drm_dev); if (drm->fb != NULL) drm_fb_helper_restore_fbdev_mode(&drm->fb->fb_helper); } static int host1x_drm_enable_vblank(struct drm_device *drm_dev, int pipe) { struct drm_crtc *crtc; list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) { if (pipe == tegra_dc_get_pipe(crtc)) { tegra_dc_enable_vblank(crtc); return (0); } } return (-ENODEV); } static void host1x_drm_disable_vblank(struct drm_device *drm_dev, int pipe) { struct drm_crtc *crtc; list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) { if (pipe == tegra_dc_get_pipe(crtc)) { tegra_dc_disable_vblank(crtc); return; } } } static struct drm_ioctl_desc host1x_drm_ioctls[] = { }; struct drm_driver tegra_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, .load = host1x_drm_load, .unload = host1x_drm_unload, .open = host1x_drm_open, .preclose = tegra_drm_preclose, .lastclose = host1x_drm_lastclose, .get_vblank_counter = drm_vblank_count, .enable_vblank = host1x_drm_enable_vblank, .disable_vblank = host1x_drm_disable_vblank, /* Fields filled by tegra_bo_driver_register() .gem_free_object .gem_pager_ops .dumb_create .dumb_map_offset .dumb_destroy */ .ioctls = host1x_drm_ioctls, .num_ioctls = nitems(host1x_drm_ioctls), .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; /* * ----------------- Device methods ------------------------- */ static void host1x_irq_hook(void *arg) { struct host1x_softc *sc; int rv; sc = arg; config_intrhook_disestablish(&sc->irq_hook); tegra_bo_driver_register(&tegra_drm_driver); rv = drm_get_platform_dev(sc->dev, &sc->tegra_drm->drm_dev, &tegra_drm_driver); if (rv != 0) { device_printf(sc->dev, "drm_get_platform_dev(): %d\n", rv); return; } sc->drm_inited = 1; } static struct fb_info * host1x_fb_helper_getinfo(device_t dev) { struct host1x_softc *sc; sc = device_get_softc(dev); if (sc->tegra_drm == NULL) return (NULL); return (tegra_drm_fb_getinfo(&sc->tegra_drm->drm_dev)); } static int host1x_register_client(device_t dev, device_t client) { struct host1x_softc *sc; struct client_info *entry; sc = device_get_softc(dev); entry = malloc(sizeof(struct client_info), M_DEVBUF, M_WAITOK | M_ZERO); entry->client = client; entry->activated = 0; LOCK(sc); TAILQ_INSERT_TAIL(&sc->clients, entry, list_e); UNLOCK(sc); return (0); } static int host1x_deregister_client(device_t dev, device_t client) { struct host1x_softc *sc; struct client_info *entry; sc = device_get_softc(dev); LOCK(sc); TAILQ_FOREACH(entry, &sc->clients, list_e) { if (entry->client == client) { if (entry->activated) panic("Tegra DRM: Attempt to deregister " "activated client"); TAILQ_REMOVE(&sc->clients, entry, list_e); free(entry, M_DEVBUF); UNLOCK(sc); return (0); } } UNLOCK(sc); return (0); } static void host1x_gen_intr(void *arg) { struct host1x_softc *sc; sc = (struct host1x_softc *)arg; LOCK(sc); UNLOCK(sc); } static void host1x_syncpt_intr(void *arg) { struct host1x_softc *sc; sc = (struct host1x_softc *)arg; LOCK(sc); UNLOCK(sc); } static void host1x_new_pass(device_t dev) { struct host1x_softc *sc; int rv, rid; phandle_t node; /* * We attach during BUS_PASS_BUS (because we must overcome simplebus), * but some of our FDT resources are not ready until BUS_PASS_DEFAULT */ sc = device_get_softc(dev); if (sc->attach_done || bus_get_pass() < BUS_PASS_DEFAULT) { bus_generic_new_pass(dev); return; } sc->attach_done = 1; node = ofw_bus_get_node(dev); /* Allocate our IRQ resource. */ rid = 0; sc->syncpt_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->syncpt_irq_res == NULL) { device_printf(dev, "Cannot allocate interrupt.\n"); rv = ENXIO; goto fail; } rid = 1; sc->gen_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->gen_irq_res == NULL) { device_printf(dev, "Cannot allocate interrupt.\n"); rv = ENXIO; goto fail; } /* FDT resources */ rv = hwreset_get_by_ofw_name(sc->dev, 0, "host1x", &sc->reset); if (rv != 0) { device_printf(dev, "Cannot get fuse reset\n"); goto fail; } rv = clk_get_by_ofw_index(sc->dev, 0, 0, &sc->clk); if (rv != 0) { device_printf(dev, "Cannot get i2c clock: %d\n", rv); goto fail; } rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable clock: %d\n", rv); goto fail; } rv = hwreset_deassert(sc->reset); if (rv != 0) { device_printf(sc->dev, "Cannot clear reset\n"); goto fail; } /* Setup interrupts */ rv = bus_setup_intr(dev, sc->gen_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, host1x_gen_intr, sc, &sc->gen_irq_h); if (rv) { device_printf(dev, "Cannot setup gen interrupt.\n"); goto fail; } rv = bus_setup_intr(dev, sc->syncpt_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, host1x_syncpt_intr, sc, &sc->syncpt_irq_h); if (rv) { device_printf(dev, "Cannot setup syncpt interrupt.\n"); goto fail; } simplebus_init(dev, 0); for (node = OF_child(node); node > 0; node = OF_peer(node)) simplebus_add_device(dev, node, 0, NULL, -1, NULL); sc->irq_hook.ich_func = host1x_irq_hook; sc->irq_hook.ich_arg = sc; config_intrhook_establish(&sc->irq_hook); bus_generic_new_pass(dev); return; fail: device_detach(dev); return; } static int host1x_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); return (BUS_PROBE_DEFAULT); } static int host1x_attach(device_t dev) { int rv, rid; struct host1x_softc *sc; sc = device_get_softc(dev); sc->tegra_drm = malloc(sizeof(struct tegra_drm), DRM_MEM_DRIVER, M_WAITOK | M_ZERO); /* crosslink together all worlds */ sc->dev = dev; sc->tegra_drm->drm_dev.dev_private = &sc->tegra_drm; sc->tegra_drm->drm_dev.dev = dev; TAILQ_INIT(&sc->clients); LOCK_INIT(sc); /* Get the memory resource for the register mapping. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot map registers.\n"); rv = ENXIO; goto fail; } return (bus_generic_attach(dev)); fail: if (sc->tegra_drm != NULL) free(sc->tegra_drm, DRM_MEM_DRIVER); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (rv); } static int host1x_detach(device_t dev) { struct host1x_softc *sc; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); sc = device_get_softc(dev); host1x_drm_exit(sc); if (sc->gen_irq_h != NULL) bus_teardown_intr(dev, sc->gen_irq_res, sc->gen_irq_h); if (sc->tegra_drm != NULL) free(sc->tegra_drm, DRM_MEM_DRIVER); if (sc->clk != NULL) clk_release(sc->clk); if (sc->reset != NULL) hwreset_release(sc->reset); if (sc->syncpt_irq_h != NULL) bus_teardown_intr(dev, sc->syncpt_irq_res, sc->syncpt_irq_h); if (sc->gen_irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 1, sc->gen_irq_res); if (sc->syncpt_irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->syncpt_irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); - return (bus_generic_detach(dev)); + return (0); } static device_method_t host1x_methods[] = { /* Device interface */ DEVMETHOD(device_probe, host1x_probe), DEVMETHOD(device_attach, host1x_attach), DEVMETHOD(device_detach, host1x_detach), /* Bus interface */ DEVMETHOD(bus_new_pass, host1x_new_pass), /* Framebuffer service methods */ DEVMETHOD(fb_getinfo, host1x_fb_helper_getinfo), /* tegra drm interface */ DEVMETHOD(tegra_drm_register_client, host1x_register_client), DEVMETHOD(tegra_drm_deregister_client, host1x_deregister_client), DEVMETHOD_END }; DEFINE_CLASS_1(host1x, host1x_driver, host1x_methods, sizeof(struct host1x_softc), simplebus_driver); EARLY_DRIVER_MODULE(host1x, simplebus, host1x_driver, 0, 0, BUS_PASS_BUS); /* Bindings for fbd device. */ extern driver_t fbd_driver; DRIVER_MODULE(fbd, host1x, fbd_driver, 0, 0); diff --git a/sys/arm/nvidia/tegra_abpmisc.c b/sys/arm/nvidia/tegra_abpmisc.c index 88f9ecde3f31..1f54a918f63b 100644 --- a/sys/arm/nvidia/tegra_abpmisc.c +++ b/sys/arm/nvidia/tegra_abpmisc.c @@ -1,190 +1,195 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * SoC misc configuration and indentification driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PMC_STRAPPING_OPT_A 0 /* 0x464 */ #define PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT 4 #define PMC_STRAPPING_OPT_A_RAM_CODE_MASK_LONG \ (0xf << PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT) #define PMC_STRAPPING_OPT_A_RAM_CODE_MASK_SHORT \ (0x3 << PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT) #define ABP_RD4(_sc, _r) bus_read_4((_sc)->abp_misc_res, (_r)) #define STR_RD4(_sc, _r) bus_read_4((_sc)->strap_opt_res, (_r)) static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-apbmisc", 1}, {"nvidia,tegra210-apbmisc", 1}, {NULL, 0} }; struct tegra_abpmisc_softc { device_t dev; struct resource *abp_misc_res; struct resource *strap_opt_res; }; static struct tegra_abpmisc_softc *dev_sc; static void tegra_abpmisc_read_revision(struct tegra_abpmisc_softc *sc) { uint32_t id, chip_id, minor_rev; int rev; id = ABP_RD4(sc, 4); chip_id = (id >> 8) & 0xff; minor_rev = (id >> 16) & 0xf; switch (minor_rev) { case 1: rev = TEGRA_REVISION_A01; break; case 2: rev = TEGRA_REVISION_A02; break; case 3: rev = TEGRA_REVISION_A03; break; case 4: rev = TEGRA_REVISION_A04; break; default: rev = TEGRA_REVISION_UNKNOWN; } tegra_sku_info.chip_id = chip_id; tegra_sku_info.revision = rev; } static int tegra_abpmisc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); return (BUS_PROBE_DEFAULT); } static int tegra_abpmisc_attach(device_t dev) { int rid; struct tegra_abpmisc_softc *sc; sc = device_get_softc(dev); sc->dev = dev; rid = 0; sc->abp_misc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->abp_misc_res == NULL) { device_printf(dev, "Cannot map ABP misc registers.\n"); goto fail; } rid = 1; sc->strap_opt_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->strap_opt_res == NULL) { device_printf(dev, "Cannot map strapping options registers.\n"); goto fail; } tegra_abpmisc_read_revision(sc); /* XXX - Hack - address collision with pinmux. */ if (sc->abp_misc_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->abp_misc_res); sc->abp_misc_res = NULL; } dev_sc = sc; return (bus_generic_attach(dev)); fail: if (sc->abp_misc_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->abp_misc_res); if (sc->strap_opt_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 1, sc->strap_opt_res); return (ENXIO); } static int tegra_abpmisc_detach(device_t dev) { struct tegra_abpmisc_softc *sc; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); sc = device_get_softc(dev); if (sc->abp_misc_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->abp_misc_res); if (sc->strap_opt_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 1, sc->strap_opt_res); - return (bus_generic_detach(dev)); + return (0); } static device_method_t tegra_abpmisc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra_abpmisc_probe), DEVMETHOD(device_attach, tegra_abpmisc_attach), DEVMETHOD(device_detach, tegra_abpmisc_detach), DEVMETHOD_END }; static DEFINE_CLASS_0(abpmisc, tegra_abpmisc_driver, tegra_abpmisc_methods, sizeof(struct tegra_abpmisc_softc)); EARLY_DRIVER_MODULE(tegra_abpmisc, simplebus, tegra_abpmisc_driver, NULL, NULL, BUS_PASS_TIMER); diff --git a/sys/arm/nvidia/tegra_efuse.c b/sys/arm/nvidia/tegra_efuse.c index 35d9380a18a5..dc637e6d0bec 100644 --- a/sys/arm/nvidia/tegra_efuse.c +++ b/sys/arm/nvidia/tegra_efuse.c @@ -1,527 +1,532 @@ /*- * Copyright (c) 2015 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define FUSES_START 0x100 #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, (FUSES_START + (_r))) struct efuse_soc; struct tegra_efuse_softc { device_t dev; struct resource *mem_res; struct efuse_soc *soc; clk_t clk; hwreset_t reset; }; struct tegra_efuse_softc *dev_sc; struct tegra_sku_info tegra_sku_info; static char *tegra_rev_name[] = { [TEGRA_REVISION_UNKNOWN] = "unknown", [TEGRA_REVISION_A01] = "A01", [TEGRA_REVISION_A02] = "A02", [TEGRA_REVISION_A03] = "A03", [TEGRA_REVISION_A03p] = "A03 prime", [TEGRA_REVISION_A04] = "A04", }; struct efuse_soc { void (*init)(struct tegra_efuse_softc *sc, struct tegra_sku_info *sku); }; static void tegra124_init(struct tegra_efuse_softc *sc, struct tegra_sku_info *sku); struct efuse_soc tegra124_efuse_soc = { .init = tegra124_init, }; static void tegra210_init(struct tegra_efuse_softc *sc, struct tegra_sku_info *sku); struct efuse_soc tegra210_efuse_soc = { .init = tegra210_init, }; static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-efuse", (intptr_t)&tegra124_efuse_soc}, {"nvidia,tegra210-efuse", (intptr_t)&tegra210_efuse_soc}, {NULL, 0} }; /* ---------------------- Tegra 124 specific code & data --------------- */ #define TEGRA124_CPU_PROCESS_CORNERS 2 #define TEGRA124_GPU_PROCESS_CORNERS 2 #define TEGRA124_SOC_PROCESS_CORNERS 2 #define TEGRA124_FUSE_SKU_INFO 0x10 #define TEGRA124_FUSE_CPU_SPEEDO_0 0x14 #define TEGRA124_FUSE_CPU_IDDQ 0x18 #define TEGRA124_FUSE_FT_REV 0x28 #define TEGRA124_FUSE_CPU_SPEEDO_1 0x2c #define TEGRA124_FUSE_CPU_SPEEDO_2 0x30 #define TEGRA124_FUSE_SOC_SPEEDO_0 0x34 #define TEGRA124_FUSE_SOC_SPEEDO_1 0x38 #define TEGRA124_FUSE_SOC_SPEEDO_2 0x3c #define TEGRA124_FUSE_SOC_IDDQ 0x40 #define TEGRA124_FUSE_GPU_IDDQ 0x128 enum { TEGRA124_THRESHOLD_INDEX_0, TEGRA124_THRESHOLD_INDEX_1, TEGRA124_THRESHOLD_INDEX_COUNT, }; static uint32_t tegra124_cpu_process_speedos[][TEGRA124_CPU_PROCESS_CORNERS] = { {2190, UINT_MAX}, {0, UINT_MAX}, }; static uint32_t tegra124_gpu_process_speedos[][TEGRA124_GPU_PROCESS_CORNERS] = { {1965, UINT_MAX}, {0, UINT_MAX}, }; static uint32_t tegra124_soc_process_speedos[][TEGRA124_SOC_PROCESS_CORNERS] = { {2101, UINT_MAX}, {0, UINT_MAX}, }; static void tegra124_rev_sku_to_speedo_ids(struct tegra_efuse_softc *sc, struct tegra_sku_info *sku, int *threshold) { /* Set default */ sku->cpu_speedo_id = 0; sku->soc_speedo_id = 0; sku->gpu_speedo_id = 0; *threshold = TEGRA124_THRESHOLD_INDEX_0; switch (sku->sku_id) { case 0x00: /* Eng sku */ case 0x0F: case 0x23: /* Using the default */ break; case 0x83: sku->cpu_speedo_id = 2; break; case 0x1F: case 0x87: case 0x27: sku->cpu_speedo_id = 2; sku->soc_speedo_id = 0; sku->gpu_speedo_id = 1; *threshold = TEGRA124_THRESHOLD_INDEX_0; break; case 0x81: case 0x21: case 0x07: sku->cpu_speedo_id = 1; sku->soc_speedo_id = 1; sku->gpu_speedo_id = 1; *threshold = TEGRA124_THRESHOLD_INDEX_1; break; case 0x49: case 0x4A: case 0x48: sku->cpu_speedo_id = 4; sku->soc_speedo_id = 2; sku->gpu_speedo_id = 3; *threshold = TEGRA124_THRESHOLD_INDEX_1; break; default: device_printf(sc->dev, " Unknown SKU ID %d\n", sku->sku_id); break; } } static void tegra124_init(struct tegra_efuse_softc *sc, struct tegra_sku_info *sku) { int i, threshold; sku->sku_id = RD4(sc, TEGRA124_FUSE_SKU_INFO); sku->soc_iddq_value = RD4(sc, TEGRA124_FUSE_SOC_IDDQ); sku->cpu_iddq_value = RD4(sc, TEGRA124_FUSE_CPU_IDDQ); sku->gpu_iddq_value = RD4(sc, TEGRA124_FUSE_GPU_IDDQ); sku->soc_speedo_value = RD4(sc, TEGRA124_FUSE_SOC_SPEEDO_0); sku->cpu_speedo_value = RD4(sc, TEGRA124_FUSE_CPU_SPEEDO_0); sku->gpu_speedo_value = RD4(sc, TEGRA124_FUSE_CPU_SPEEDO_2); if (sku->cpu_speedo_value == 0) { device_printf(sc->dev, "CPU Speedo value is not fused.\n"); return; } tegra124_rev_sku_to_speedo_ids(sc, sku, &threshold); for (i = 0; i < TEGRA124_SOC_PROCESS_CORNERS; i++) { if (sku->soc_speedo_value < tegra124_soc_process_speedos[threshold][i]) break; } sku->soc_process_id = i; for (i = 0; i < TEGRA124_CPU_PROCESS_CORNERS; i++) { if (sku->cpu_speedo_value < tegra124_cpu_process_speedos[threshold][i]) break; } sku->cpu_process_id = i; for (i = 0; i < TEGRA124_GPU_PROCESS_CORNERS; i++) { if (sku->gpu_speedo_value < tegra124_gpu_process_speedos[threshold][i]) break; } sku->gpu_process_id = i; } /* ----------------- End of Tegra 124 specific code & data --------------- */ /* -------------------- Tegra 201 specific code & data ------------------- */ #define TEGRA210_CPU_PROCESS_CORNERS 2 #define TEGRA210_GPU_PROCESS_CORNERS 2 #define TEGRA210_SOC_PROCESS_CORNERS 3 #define TEGRA210_FUSE_SKU_INFO 0x010 #define TEGRA210_FUSE_CPU_SPEEDO_0 0x014 #define TEGRA210_FUSE_CPU_IDDQ 0x018 #define TEGRA210_FUSE_FT_REV 0x028 #define TEGRA210_FUSE_CPU_SPEEDO_1 0x02c #define TEGRA210_FUSE_CPU_SPEEDO_2 0x030 #define TEGRA210_FUSE_SOC_SPEEDO_0 0x034 #define TEGRA210_FUSE_SOC_SPEEDO_1 0x038 #define TEGRA210_FUSE_SOC_SPEEDO_2 0x03c #define TEGRA210_FUSE_SOC_IDDQ 0x040 #define TEGRA210_FUSE_GPU_IDDQ 0x128 #define TEGRA210_FUSE_SPARE 0x270 enum { TEGRA210_THRESHOLD_INDEX_0, TEGRA210_THRESHOLD_INDEX_1, TEGRA210_THRESHOLD_INDEX_COUNT, }; static uint32_t tegra210_cpu_process_speedos[][TEGRA210_CPU_PROCESS_CORNERS] = { {2119, UINT_MAX}, {2119, UINT_MAX}, }; static uint32_t tegra210_gpu_process_speedos[][TEGRA210_GPU_PROCESS_CORNERS] = { {UINT_MAX, UINT_MAX}, {UINT_MAX, UINT_MAX}, }; static uint32_t tegra210_soc_process_speedos[][TEGRA210_SOC_PROCESS_CORNERS] = { {1950, 2100, UINT_MAX}, {1950, 2100, UINT_MAX}, }; static uint32_t tegra210_get_speedo_revision(struct tegra_efuse_softc *sc) { uint32_t reg; uint32_t val; val = 0; /* Revision i encoded in spare fields */ reg = RD4(sc, TEGRA210_FUSE_SPARE + 2 * 4); val |= (reg & 1) << 0; reg = RD4(sc, TEGRA210_FUSE_SPARE + 3 * 4); val |= (reg & 1) << 1; reg = RD4(sc, TEGRA210_FUSE_SPARE + 4 * 4); val |= (reg & 1) << 2; return (val); } static void tegra210_rev_sku_to_speedo_ids(struct tegra_efuse_softc *sc, struct tegra_sku_info *sku, int speedo_rev, int *threshold) { /* Set defaults */ sku->cpu_speedo_id = 0; sku->soc_speedo_id = 0; sku->gpu_speedo_id = 0; *threshold = TEGRA210_THRESHOLD_INDEX_0; switch (sku->sku_id) { case 0x00: /* Eng sku */ case 0x01: /* Eng sku */ case 0x07: case 0x17: case 0x27: /* Use defaults */ if (speedo_rev >= 2) sku->gpu_speedo_id = 1; break; case 0x13: if (speedo_rev >= 2) sku->gpu_speedo_id = 1; sku->cpu_speedo_id = 1; break; default: device_printf(sc->dev, " Unknown SKU ID %d\n", sku->sku_id); break; } } static void tegra210_init(struct tegra_efuse_softc *sc, struct tegra_sku_info *sku) { int i, threshold, speedo_rev; uint32_t cpu_speedo[3], soc_speedo[3]; cpu_speedo[0] = RD4(sc, TEGRA210_FUSE_CPU_SPEEDO_0); cpu_speedo[1] = RD4(sc, TEGRA210_FUSE_CPU_SPEEDO_1); cpu_speedo[2] = RD4(sc, TEGRA210_FUSE_CPU_SPEEDO_2); soc_speedo[0] = RD4(sc, TEGRA210_FUSE_SOC_SPEEDO_0); soc_speedo[1] = RD4(sc, TEGRA210_FUSE_SOC_SPEEDO_1); soc_speedo[2] = RD4(sc, TEGRA210_FUSE_SOC_SPEEDO_2); sku->cpu_iddq_value = RD4(sc, TEGRA210_FUSE_CPU_IDDQ); sku->soc_iddq_value = RD4(sc, TEGRA210_FUSE_SOC_IDDQ); sku->gpu_iddq_value = RD4(sc, TEGRA210_FUSE_GPU_IDDQ); speedo_rev = tegra210_get_speedo_revision(sc); device_printf(sc->dev, " Speedo revision: %u\n", speedo_rev); if (speedo_rev >= 3) { sku->cpu_speedo_value = cpu_speedo[0]; sku->gpu_speedo_value = cpu_speedo[2]; sku->soc_speedo_value = soc_speedo[0]; } else if (speedo_rev == 2) { sku->cpu_speedo_value = (-1938 + (1095 * cpu_speedo[0] / 100)) / 10; sku->gpu_speedo_value = (-1662 + (1082 * cpu_speedo[2] / 100)) / 10; sku->soc_speedo_value = ( -705 + (1037 * soc_speedo[0] / 100)) / 10; } else { sku->cpu_speedo_value = 2100; sku->gpu_speedo_value = cpu_speedo[2] - 75; sku->soc_speedo_value = 1900; } tegra210_rev_sku_to_speedo_ids(sc, sku, speedo_rev, &threshold); for (i = 0; i < TEGRA210_SOC_PROCESS_CORNERS; i++) { if (sku->soc_speedo_value < tegra210_soc_process_speedos[threshold][i]) break; } sku->soc_process_id = i; for (i = 0; i < TEGRA210_CPU_PROCESS_CORNERS; i++) { if (sku->cpu_speedo_value < tegra210_cpu_process_speedos[threshold][i]) break; } sku->cpu_process_id = i; for (i = 0; i < TEGRA210_GPU_PROCESS_CORNERS; i++) { if (sku->gpu_speedo_value < tegra210_gpu_process_speedos[threshold][i]) break; } sku->gpu_process_id = i; } /* ----------------- End of Tegra 210 specific code & data --------------- */ uint32_t tegra_fuse_read_4(int addr) { if (dev_sc == NULL) panic("tegra_fuse_read_4 called too early"); return (RD4(dev_sc, addr)); } static void tegra_efuse_dump_sku(void) { printf(" TEGRA SKU Info:\n"); printf(" chip_id: %u\n", tegra_sku_info.chip_id); printf(" sku_id: %u\n", tegra_sku_info.sku_id); printf(" cpu_process_id: %u\n", tegra_sku_info.cpu_process_id); printf(" cpu_speedo_id: %u\n", tegra_sku_info.cpu_speedo_id); printf(" cpu_speedo_value: %u\n", tegra_sku_info.cpu_speedo_value); printf(" cpu_iddq_value: %u\n", tegra_sku_info.cpu_iddq_value); printf(" soc_process_id: %u\n", tegra_sku_info.soc_process_id); printf(" soc_speedo_id: %u\n", tegra_sku_info.soc_speedo_id); printf(" soc_speedo_value: %u\n", tegra_sku_info.soc_speedo_value); printf(" soc_iddq_value: %u\n", tegra_sku_info.soc_iddq_value); printf(" gpu_process_id: %u\n", tegra_sku_info.gpu_process_id); printf(" gpu_speedo_id: %u\n", tegra_sku_info.gpu_speedo_id); printf(" gpu_speedo_value: %u\n", tegra_sku_info.gpu_speedo_value); printf(" gpu_iddq_value: %u\n", tegra_sku_info.gpu_iddq_value); printf(" revision: %s\n", tegra_rev_name[tegra_sku_info.revision]); } static int tegra_efuse_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); return (BUS_PROBE_DEFAULT); } static int tegra_efuse_attach(device_t dev) { int rv, rid; struct tegra_efuse_softc *sc; sc = device_get_softc(dev); sc->dev = dev; sc->soc = (struct efuse_soc *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; /* Get the memory resource for the register mapping. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot map registers.\n"); rv = ENXIO; goto fail; } /* OFW resources. */ rv = clk_get_by_ofw_name(dev, 0, "fuse", &sc->clk); if (rv != 0) { device_printf(dev, "Cannot get fuse clock: %d\n", rv); goto fail; } rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable clock: %d\n", rv); goto fail; } rv = hwreset_get_by_ofw_name(sc->dev, 0, "fuse", &sc->reset); if (rv != 0) { device_printf(dev, "Cannot get fuse reset\n"); goto fail; } rv = hwreset_deassert(sc->reset); if (rv != 0) { device_printf(sc->dev, "Cannot clear reset\n"); goto fail; } sc->soc->init(sc, &tegra_sku_info); dev_sc = sc; if (bootverbose) tegra_efuse_dump_sku(); return (bus_generic_attach(dev)); fail: dev_sc = NULL; if (sc->clk != NULL) clk_release(sc->clk); if (sc->reset != NULL) hwreset_release(sc->reset); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (rv); } static int tegra_efuse_detach(device_t dev) { struct tegra_efuse_softc *sc; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); sc = device_get_softc(dev); dev_sc = NULL; if (sc->clk != NULL) clk_release(sc->clk); if (sc->reset != NULL) hwreset_release(sc->reset); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); - return (bus_generic_detach(dev)); + return (0); } static device_method_t tegra_efuse_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra_efuse_probe), DEVMETHOD(device_attach, tegra_efuse_attach), DEVMETHOD(device_detach, tegra_efuse_detach), DEVMETHOD_END }; static DEFINE_CLASS_0(efuse, tegra_efuse_driver, tegra_efuse_methods, sizeof(struct tegra_efuse_softc)); EARLY_DRIVER_MODULE(tegra_efuse, simplebus, tegra_efuse_driver, NULL, NULL, BUS_PASS_TIMER); diff --git a/sys/arm/nvidia/tegra_i2c.c b/sys/arm/nvidia/tegra_i2c.c index 0fe1aa7ba48e..b01f1a1fdce4 100644 --- a/sys/arm/nvidia/tegra_i2c.c +++ b/sys/arm/nvidia/tegra_i2c.c @@ -1,797 +1,802 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * I2C driver for Tegra SoCs. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" #define I2C_CNFG 0x000 #define I2C_CNFG_MSTR_CLR_BUS_ON_TIMEOUT (1 << 15) #define I2C_CNFG_DEBOUNCE_CNT(x) (((x) & 0x07) << 12) #define I2C_CNFG_NEW_MASTER_FSM (1 << 11) #define I2C_CNFG_PACKET_MODE_EN (1 << 10) #define I2C_CNFG_SEND (1 << 9) #define I2C_CNFG_NOACK (1 << 8) #define I2C_CNFG_CMD2 (1 << 7) #define I2C_CNFG_CMD1 (1 << 6) #define I2C_CNFG_START (1 << 5) #define I2C_CNFG_SLV2 (1 << 4) #define I2C_CNFG_LENGTH_SHIFT 1 #define I2C_CNFG_LENGTH_MASK 0x7 #define I2C_CNFG_A_MOD (1 << 0) #define I2C_CMD_ADDR0 0x004 #define I2C_CMD_ADDR1 0x008 #define I2C_CMD_DATA1 0x00c #define I2C_CMD_DATA2 0x010 #define I2C_STATUS 0x01c #define I2C_SL_CNFG 0x020 #define I2C_SL_RCVD 0x024 #define I2C_SL_STATUS 0x028 #define I2C_SL_ADDR1 0x02c #define I2C_SL_ADDR2 0x030 #define I2C_TLOW_SEXT 0x034 #define I2C_SL_DELAY_COUNT 0x03c #define I2C_SL_INT_MASK 0x040 #define I2C_SL_INT_SOURCE 0x044 #define I2C_SL_INT_SET 0x048 #define I2C_TX_PACKET_FIFO 0x050 #define I2C_RX_FIFO 0x054 #define I2C_PACKET_TRANSFER_STATUS 0x058 #define I2C_FIFO_CONTROL 0x05c #define I2C_FIFO_CONTROL_SLV_TX_FIFO_TRIG(x) (((x) & 0x07) << 13) #define I2C_FIFO_CONTROL_SLV_RX_FIFO_TRIG(x) (((x) & 0x07) << 10) #define I2C_FIFO_CONTROL_SLV_TX_FIFO_FLUSH (1 << 9) #define I2C_FIFO_CONTROL_SLV_RX_FIFO_FLUSH (1 << 8) #define I2C_FIFO_CONTROL_TX_FIFO_TRIG(x) (((x) & 0x07) << 5) #define I2C_FIFO_CONTROL_RX_FIFO_TRIG(x) (((x) & 0x07) << 2) #define I2C_FIFO_CONTROL_TX_FIFO_FLUSH (1 << 1) #define I2C_FIFO_CONTROL_RX_FIFO_FLUSH (1 << 0) #define I2C_FIFO_STATUS 0x060 #define I2C_FIFO_STATUS_SLV_XFER_ERR_REASON (1 << 25) #define I2C_FIFO_STATUS_TX_FIFO_SLV_EMPTY_CNT(x) (((x) >> 20) & 0xF) #define I2C_FIFO_STATUS_RX_FIFO_SLV_FULL_CNT(x) (((x) >> 16) & 0xF) #define I2C_FIFO_STATUS_TX_FIFO_EMPTY_CNT(x) (((x) >> 4) & 0xF) #define I2C_FIFO_STATUS_RX_FIFO_FULL_CNT(x) (((x) >> 0) & 0xF) #define I2C_INTERRUPT_MASK_REGISTER 0x064 #define I2C_INTERRUPT_STATUS_REGISTER 0x068 #define I2C_INT_SLV_ACK_WITHHELD (1 << 28) #define I2C_INT_SLV_RD2WR (1 << 27) #define I2C_INT_SLV_WR2RD (1 << 26) #define I2C_INT_SLV_PKT_XFER_ERR (1 << 25) #define I2C_INT_SLV_TX_BUFFER_REQ (1 << 24) #define I2C_INT_SLV_RX_BUFFER_FILLED (1 << 23) #define I2C_INT_SLV_PACKET_XFER_COMPLETE (1 << 22) #define I2C_INT_SLV_TFIFO_OVF (1 << 21) #define I2C_INT_SLV_RFIFO_UNF (1 << 20) #define I2C_INT_SLV_TFIFO_DATA_REQ (1 << 17) #define I2C_INT_SLV_RFIFO_DATA_REQ (1 << 16) #define I2C_INT_BUS_CLEAR_DONE (1 << 11) #define I2C_INT_TLOW_MEXT_TIMEOUT (1 << 10) #define I2C_INT_TLOW_SEXT_TIMEOUT (1 << 9) #define I2C_INT_TIMEOUT (1 << 8) #define I2C_INT_PACKET_XFER_COMPLETE (1 << 7) #define I2C_INT_ALL_PACKETS_XFER_COMPLETE (1 << 6) #define I2C_INT_TFIFO_OVR (1 << 5) #define I2C_INT_RFIFO_UNF (1 << 4) #define I2C_INT_NOACK (1 << 3) #define I2C_INT_ARB_LOST (1 << 2) #define I2C_INT_TFIFO_DATA_REQ (1 << 1) #define I2C_INT_RFIFO_DATA_REQ (1 << 0) #define I2C_ERROR_MASK (I2C_INT_ARB_LOST | I2C_INT_NOACK | \ I2C_INT_RFIFO_UNF | I2C_INT_TFIFO_OVR) #define I2C_CLK_DIVISOR 0x06c #define I2C_CLK_DIVISOR_STD_FAST_MODE_SHIFT 16 #define I2C_CLK_DIVISOR_STD_FAST_MODE_MASK 0xffff #define I2C_CLK_DIVISOR_HSMODE_SHIFT 0 #define I2C_CLK_DIVISOR_HSMODE_MASK 0xffff #define I2C_INTERRUPT_SOURCE_REGISTER 0x070 #define I2C_INTERRUPT_SET_REGISTER 0x074 #define I2C_SLV_TX_PACKET_FIFO 0x07c #define I2C_SLV_PACKET_STATUS 0x080 #define I2C_BUS_CLEAR_CONFIG 0x084 #define I2C_BUS_CLEAR_CONFIG_BC_SCLK_THRESHOLD(x) (((x) & 0xFF) << 16) #define I2C_BUS_CLEAR_CONFIG_BC_STOP_COND (1 << 2) #define I2C_BUS_CLEAR_CONFIG_BC_TERMINATE (1 << 1) #define I2C_BUS_CLEAR_CONFIG_BC_ENABLE (1 << 0) #define I2C_BUS_CLEAR_STATUS 0x088 #define I2C_BUS_CLEAR_STATUS_BC_STATUS (1 << 0) #define I2C_CONFIG_LOAD 0x08c #define I2C_CONFIG_LOAD_TIMEOUT_CONFIG_LOAD (1 << 2) #define I2C_CONFIG_LOAD_SLV_CONFIG_LOAD (1 << 1) #define I2C_CONFIG_LOAD_MSTR_CONFIG_LOAD (1 << 0) #define I2C_INTERFACE_TIMING_0 0x094 #define I2C_INTERFACE_TIMING_1 0x098 #define I2C_HS_INTERFACE_TIMING_0 0x09c #define I2C_HS_INTERFACE_TIMING_1 0x0a0 /* Protocol header 0 */ #define PACKET_HEADER0_HEADER_SIZE_SHIFT 28 #define PACKET_HEADER0_HEADER_SIZE_MASK 0x3 #define PACKET_HEADER0_PACKET_ID_SHIFT 16 #define PACKET_HEADER0_PACKET_ID_MASK 0xff #define PACKET_HEADER0_CONT_ID_SHIFT 12 #define PACKET_HEADER0_CONT_ID_MASK 0xf #define PACKET_HEADER0_PROTOCOL_I2C (1 << 4) #define PACKET_HEADER0_TYPE_SHIFT 0 #define PACKET_HEADER0_TYPE_MASK 0x7 /* I2C header */ #define I2C_HEADER_HIGHSPEED_MODE (1 << 22) #define I2C_HEADER_CONT_ON_NAK (1 << 21) #define I2C_HEADER_SEND_START_BYTE (1 << 20) #define I2C_HEADER_READ (1 << 19) #define I2C_HEADER_10BIT_ADDR (1 << 18) #define I2C_HEADER_IE_ENABLE (1 << 17) #define I2C_HEADER_REPEAT_START (1 << 16) #define I2C_HEADER_CONTINUE_XFER (1 << 15) #define I2C_HEADER_MASTER_ADDR_SHIFT 12 #define I2C_HEADER_MASTER_ADDR_MASK 0x7 #define I2C_HEADER_SLAVE_ADDR_SHIFT 0 #define I2C_HEADER_SLAVE_ADDR_MASK 0x3ff #define I2C_CLK_DIVISOR_STD_FAST_MODE 0x19 #define I2C_CLK_MULTIPLIER_STD_FAST_MODE 8 #define I2C_REQUEST_TIMEOUT (5 * hz) #define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, (_r)) #define LOCK(_sc) mtx_lock(&(_sc)->mtx) #define UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define SLEEP(_sc, timeout) \ mtx_sleep(sc, &sc->mtx, 0, "i2cbuswait", timeout); #define LOCK_INIT(_sc) \ mtx_init(&_sc->mtx, device_get_nameunit(_sc->dev), "tegra_i2c", MTX_DEF) #define LOCK_DESTROY(_sc) mtx_destroy(&_sc->mtx) #define ASSERT_LOCKED(_sc) mtx_assert(&_sc->mtx, MA_OWNED) #define ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->mtx, MA_NOTOWNED) static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-i2c", 1}, {"nvidia,tegra210-i2c", 1}, {NULL, 0} }; enum tegra_i2c_xfer_type { XFER_STOP, /* Send stop condition after xfer */ XFER_REPEAT_START, /* Send repeated start after xfer */ XFER_CONTINUE /* Don't send nothing */ } ; struct tegra_i2c_softc { device_t dev; struct mtx mtx; struct resource *mem_res; struct resource *irq_res; void *irq_h; device_t iicbus; clk_t clk; hwreset_t reset; uint32_t core_freq; uint32_t bus_freq; int bus_inuse; struct iic_msg *msg; int msg_idx; uint32_t bus_err; int done; }; static int tegra_i2c_flush_fifo(struct tegra_i2c_softc *sc) { int timeout; uint32_t reg; reg = RD4(sc, I2C_FIFO_CONTROL); reg |= I2C_FIFO_CONTROL_TX_FIFO_FLUSH | I2C_FIFO_CONTROL_RX_FIFO_FLUSH; WR4(sc, I2C_FIFO_CONTROL, reg); timeout = 10; while (timeout > 0) { reg = RD4(sc, I2C_FIFO_CONTROL); reg &= I2C_FIFO_CONTROL_TX_FIFO_FLUSH | I2C_FIFO_CONTROL_RX_FIFO_FLUSH; if (reg == 0) break; DELAY(10); } if (timeout <= 0) { device_printf(sc->dev, "FIFO flush timedout\n"); return (ETIMEDOUT); } return (0); } static void tegra_i2c_setup_clk(struct tegra_i2c_softc *sc, int clk_freq) { int div; div = ((sc->core_freq / clk_freq) / 10) - 1; if ((sc->core_freq / (10 * (div + 1))) > clk_freq) div++; if (div > 65535) div = 65535; WR4(sc, I2C_CLK_DIVISOR, (1 << I2C_CLK_DIVISOR_HSMODE_SHIFT) | (div << I2C_CLK_DIVISOR_STD_FAST_MODE_SHIFT)); } static void tegra_i2c_bus_clear(struct tegra_i2c_softc *sc) { int timeout; uint32_t reg, status; WR4(sc, I2C_BUS_CLEAR_CONFIG, I2C_BUS_CLEAR_CONFIG_BC_SCLK_THRESHOLD(18) | I2C_BUS_CLEAR_CONFIG_BC_STOP_COND | I2C_BUS_CLEAR_CONFIG_BC_TERMINATE); WR4(sc, I2C_CONFIG_LOAD, I2C_CONFIG_LOAD_MSTR_CONFIG_LOAD); for (timeout = 1000; timeout > 0; timeout--) { if (RD4(sc, I2C_CONFIG_LOAD) == 0) break; DELAY(10); } if (timeout <= 0) device_printf(sc->dev, "config load timeouted\n"); reg = RD4(sc, I2C_BUS_CLEAR_CONFIG); reg |= I2C_BUS_CLEAR_CONFIG_BC_ENABLE; WR4(sc, I2C_BUS_CLEAR_CONFIG,reg); for (timeout = 1000; timeout > 0; timeout--) { if ((RD4(sc, I2C_BUS_CLEAR_CONFIG) & I2C_BUS_CLEAR_CONFIG_BC_ENABLE) == 0) break; DELAY(10); } if (timeout <= 0) device_printf(sc->dev, "bus clear timeouted\n"); status = RD4(sc, I2C_BUS_CLEAR_STATUS); if ((status & I2C_BUS_CLEAR_STATUS_BC_STATUS) == 0) device_printf(sc->dev, "bus clear failed\n"); } static int tegra_i2c_hw_init(struct tegra_i2c_softc *sc) { int rv, timeout; /* Reset the core. */ rv = hwreset_assert(sc->reset); if (rv != 0) { device_printf(sc->dev, "Cannot assert reset\n"); return (rv); } DELAY(10); rv = hwreset_deassert(sc->reset); if (rv != 0) { device_printf(sc->dev, "Cannot clear reset\n"); return (rv); } WR4(sc, I2C_INTERRUPT_MASK_REGISTER, 0); WR4(sc, I2C_INTERRUPT_STATUS_REGISTER, 0xFFFFFFFF); WR4(sc, I2C_CNFG, I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN | I2C_CNFG_DEBOUNCE_CNT(2)); tegra_i2c_setup_clk(sc, sc->bus_freq); WR4(sc, I2C_FIFO_CONTROL, I2C_FIFO_CONTROL_TX_FIFO_TRIG(7) | I2C_FIFO_CONTROL_RX_FIFO_TRIG(0)); WR4(sc, I2C_CONFIG_LOAD, I2C_CONFIG_LOAD_MSTR_CONFIG_LOAD); for (timeout = 1000; timeout > 0; timeout--) { if (RD4(sc, I2C_CONFIG_LOAD) == 0) break; DELAY(10); } if (timeout <= 0) device_printf(sc->dev, "config load timeouted\n"); tegra_i2c_bus_clear(sc); return (0); } static int tegra_i2c_tx(struct tegra_i2c_softc *sc) { uint32_t reg; int cnt, i; if (sc->msg_idx >= sc->msg->len) panic("Invalid call to tegra_i2c_tx\n"); while(sc->msg_idx < sc->msg->len) { reg = RD4(sc, I2C_FIFO_STATUS); if (I2C_FIFO_STATUS_TX_FIFO_EMPTY_CNT(reg) == 0) break; cnt = min(4, sc->msg->len - sc->msg_idx); reg = 0; for (i = 0; i < cnt; i++) { reg |= sc->msg->buf[sc->msg_idx] << (i * 8); sc->msg_idx++; } WR4(sc, I2C_TX_PACKET_FIFO, reg); } if (sc->msg_idx >= sc->msg->len) return (0); return (sc->msg->len - sc->msg_idx - 1); } static int tegra_i2c_rx(struct tegra_i2c_softc *sc) { uint32_t reg; int cnt, i; if (sc->msg_idx >= sc->msg->len) panic("Invalid call to tegra_i2c_rx\n"); while(sc->msg_idx < sc->msg->len) { reg = RD4(sc, I2C_FIFO_STATUS); if (I2C_FIFO_STATUS_RX_FIFO_FULL_CNT(reg) == 0) break; cnt = min(4, sc->msg->len - sc->msg_idx); reg = RD4(sc, I2C_RX_FIFO); for (i = 0; i < cnt; i++) { sc->msg->buf[sc->msg_idx] = (reg >> (i * 8)) & 0xFF; sc->msg_idx++; } } if (sc->msg_idx >= sc->msg->len) return (0); return (sc->msg->len - sc->msg_idx - 1); } static void tegra_i2c_intr(void *arg) { struct tegra_i2c_softc *sc; uint32_t status, reg; int rv; sc = (struct tegra_i2c_softc *)arg; LOCK(sc); status = RD4(sc, I2C_INTERRUPT_SOURCE_REGISTER); if (sc->msg == NULL) { /* Unexpected interrupt - disable FIFOs, clear reset. */ reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER); reg &= ~I2C_INT_TFIFO_DATA_REQ; reg &= ~I2C_INT_RFIFO_DATA_REQ; WR4(sc, I2C_INTERRUPT_MASK_REGISTER, 0); WR4(sc, I2C_INTERRUPT_STATUS_REGISTER, status); UNLOCK(sc); return; } if ((status & I2C_ERROR_MASK) != 0) { if (status & I2C_INT_NOACK) sc->bus_err = IIC_ENOACK; if (status & I2C_INT_ARB_LOST) sc->bus_err = IIC_EBUSERR; if ((status & I2C_INT_TFIFO_OVR) || (status & I2C_INT_RFIFO_UNF)) sc->bus_err = IIC_EBUSERR; sc->done = 1; } else if ((status & I2C_INT_RFIFO_DATA_REQ) && (sc->msg != NULL) && (sc->msg->flags & IIC_M_RD)) { rv = tegra_i2c_rx(sc); if (rv == 0) { reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER); reg &= ~I2C_INT_RFIFO_DATA_REQ; WR4(sc, I2C_INTERRUPT_MASK_REGISTER, reg); } } else if ((status & I2C_INT_TFIFO_DATA_REQ) && (sc->msg != NULL) && !(sc->msg->flags & IIC_M_RD)) { rv = tegra_i2c_tx(sc); if (rv == 0) { reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER); reg &= ~I2C_INT_TFIFO_DATA_REQ; WR4(sc, I2C_INTERRUPT_MASK_REGISTER, reg); } } else if ((status & I2C_INT_RFIFO_DATA_REQ) || (status & I2C_INT_TFIFO_DATA_REQ)) { device_printf(sc->dev, "Unexpected data interrupt: 0x%08X\n", status); reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER); reg &= ~I2C_INT_TFIFO_DATA_REQ; reg &= ~I2C_INT_RFIFO_DATA_REQ; WR4(sc, I2C_INTERRUPT_MASK_REGISTER, reg); } if (status & I2C_INT_PACKET_XFER_COMPLETE) sc->done = 1; WR4(sc, I2C_INTERRUPT_STATUS_REGISTER, status); if (sc->done) { WR4(sc, I2C_INTERRUPT_MASK_REGISTER, 0); wakeup(&(sc->done)); } UNLOCK(sc); } static void tegra_i2c_start_msg(struct tegra_i2c_softc *sc, struct iic_msg *msg, enum tegra_i2c_xfer_type xtype) { uint32_t tmp, mask; /* Packet header. */ tmp = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) | PACKET_HEADER0_PROTOCOL_I2C | (1 << PACKET_HEADER0_CONT_ID_SHIFT) | (1 << PACKET_HEADER0_PACKET_ID_SHIFT); WR4(sc, I2C_TX_PACKET_FIFO, tmp); /* Packet size. */ WR4(sc, I2C_TX_PACKET_FIFO, msg->len - 1); /* I2C header. */ tmp = I2C_HEADER_IE_ENABLE; if (xtype == XFER_CONTINUE) tmp |= I2C_HEADER_CONTINUE_XFER; else if (xtype == XFER_REPEAT_START) tmp |= I2C_HEADER_REPEAT_START; tmp |= msg->slave << I2C_HEADER_SLAVE_ADDR_SHIFT; if (msg->flags & IIC_M_RD) { tmp |= I2C_HEADER_READ; tmp |= 1 << I2C_HEADER_SLAVE_ADDR_SHIFT; } else tmp &= ~(1 << I2C_HEADER_SLAVE_ADDR_SHIFT); WR4(sc, I2C_TX_PACKET_FIFO, tmp); /* Interrupt mask. */ mask = I2C_INT_NOACK | I2C_INT_ARB_LOST | I2C_INT_PACKET_XFER_COMPLETE; if (msg->flags & IIC_M_RD) mask |= I2C_INT_RFIFO_DATA_REQ; else mask |= I2C_INT_TFIFO_DATA_REQ; WR4(sc, I2C_INTERRUPT_MASK_REGISTER, mask); } static int tegra_i2c_poll(struct tegra_i2c_softc *sc) { int timeout; for(timeout = 10000; timeout > 0; timeout--) { UNLOCK(sc); tegra_i2c_intr(sc); LOCK(sc); if (sc->done != 0) break; DELAY(1); } if (timeout <= 0) return (ETIMEDOUT); return (0); } static int tegra_i2c_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs) { int rv, i; struct tegra_i2c_softc *sc; enum tegra_i2c_xfer_type xtype; sc = device_get_softc(dev); LOCK(sc); /* Get the bus. */ while (sc->bus_inuse == 1) SLEEP(sc, 0); sc->bus_inuse = 1; rv = 0; for (i = 0; i < nmsgs; i++) { sc->msg = &msgs[i]; sc->msg_idx = 0; sc->bus_err = 0; sc->done = 0; /* Check for valid parameters. */ if (sc->msg == NULL || sc->msg->buf == NULL || sc->msg->len == 0) { rv = EINVAL; break; } /* Get flags for next transfer. */ if (i == (nmsgs - 1)) { if (msgs[i].flags & IIC_M_NOSTOP) xtype = XFER_CONTINUE; else xtype = XFER_STOP; } else { if (msgs[i + 1].flags & IIC_M_NOSTART) xtype = XFER_CONTINUE; else xtype = XFER_REPEAT_START; } tegra_i2c_start_msg(sc, sc->msg, xtype); if (cold) rv = tegra_i2c_poll(sc); else rv = msleep(&sc->done, &sc->mtx, PZERO, "iic", I2C_REQUEST_TIMEOUT); WR4(sc, I2C_INTERRUPT_MASK_REGISTER, 0); WR4(sc, I2C_INTERRUPT_STATUS_REGISTER, 0xFFFFFFFF); if (rv == 0) rv = sc->bus_err; if (rv != 0) break; } if (rv != 0) { tegra_i2c_hw_init(sc); tegra_i2c_flush_fifo(sc); } sc->msg = NULL; sc->msg_idx = 0; sc->bus_err = 0; sc->done = 0; /* Wake up the processes that are waiting for the bus. */ sc->bus_inuse = 0; wakeup(sc); UNLOCK(sc); return (rv); } static int tegra_i2c_iicbus_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr) { struct tegra_i2c_softc *sc; int busfreq; sc = device_get_softc(dev); busfreq = IICBUS_GET_FREQUENCY(sc->iicbus, speed); sc = device_get_softc(dev); LOCK(sc); tegra_i2c_setup_clk(sc, busfreq); UNLOCK(sc); return (0); } static int tegra_i2c_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); return (BUS_PROBE_DEFAULT); } static int tegra_i2c_attach(device_t dev) { int rv, rid; phandle_t node; struct tegra_i2c_softc *sc; uint64_t freq; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); LOCK_INIT(sc); /* Get the memory resource for the register mapping. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot map registers.\n"); rv = ENXIO; goto fail; } /* Allocate our IRQ resource. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate interrupt.\n"); rv = ENXIO; goto fail; } /* FDT resources. */ rv = clk_get_by_ofw_name(dev, 0, "div-clk", &sc->clk); if (rv != 0) { device_printf(dev, "Cannot get i2c clock: %d\n", rv); goto fail; } rv = hwreset_get_by_ofw_name(sc->dev, 0, "i2c", &sc->reset); if (rv != 0) { device_printf(sc->dev, "Cannot get i2c reset\n"); return (ENXIO); } rv = OF_getencprop(node, "clock-frequency", &sc->bus_freq, sizeof(sc->bus_freq)); if (rv != sizeof(sc->bus_freq)) { sc->bus_freq = 100000; } /* Request maximum frequency for I2C block 136MHz (408MHz / 3). */ rv = clk_set_freq(sc->clk, 136000000, CLK_SET_ROUND_DOWN); if (rv != 0) { device_printf(dev, "Cannot set clock frequency\n"); goto fail; } rv = clk_get_freq(sc->clk, &freq); if (rv != 0) { device_printf(dev, "Cannot get clock frequency\n"); goto fail; } sc->core_freq = (uint32_t)freq; rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable clock: %d\n", rv); goto fail; } /* Init hardware. */ rv = tegra_i2c_hw_init(sc); if (rv) { device_printf(dev, "tegra_i2c_activate failed\n"); goto fail; } /* Setup interrupt. */ rv = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, tegra_i2c_intr, sc, &sc->irq_h); if (rv) { device_printf(dev, "Cannot setup interrupt.\n"); goto fail; } /* Attach the iicbus. */ sc->iicbus = device_add_child(dev, "iicbus", DEVICE_UNIT_ANY); if (sc->iicbus == NULL) { device_printf(dev, "Could not allocate iicbus instance.\n"); rv = ENXIO; goto fail; } /* Probe and attach the iicbus. */ return (bus_generic_attach(dev)); fail: if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (rv); } static int tegra_i2c_detach(device_t dev) { struct tegra_i2c_softc *sc; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); sc = device_get_softc(dev); tegra_i2c_hw_init(sc); if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); if (sc->iicbus) device_delete_child(dev, sc->iicbus); - return (bus_generic_detach(dev)); + return (0); } static phandle_t tegra_i2c_get_node(device_t bus, device_t dev) { /* Share controller node with iibus device. */ return (ofw_bus_get_node(bus)); } static device_method_t tegra_i2c_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra_i2c_probe), DEVMETHOD(device_attach, tegra_i2c_attach), DEVMETHOD(device_detach, tegra_i2c_detach), /* Bus interface */ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), /* OFW methods */ DEVMETHOD(ofw_bus_get_node, tegra_i2c_get_node), /* iicbus interface */ DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_reset, tegra_i2c_iicbus_reset), DEVMETHOD(iicbus_transfer, tegra_i2c_transfer), DEVMETHOD_END }; static DEFINE_CLASS_0(iichb, tegra_i2c_driver, tegra_i2c_methods, sizeof(struct tegra_i2c_softc)); EARLY_DRIVER_MODULE(tegra_iic, simplebus, tegra_i2c_driver, NULL, NULL, 73); diff --git a/sys/arm/nvidia/tegra_mc.c b/sys/arm/nvidia/tegra_mc.c index 020c9617b453..0f05c32117e9 100644 --- a/sys/arm/nvidia/tegra_mc.c +++ b/sys/arm/nvidia/tegra_mc.c @@ -1,307 +1,312 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Memory controller driver for Tegra SoCs. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "clock_if.h" #define MC_INTSTATUS 0x000 #define MC_INTMASK 0x004 #define MC_INT_DECERR_MTS (1 << 16) #define MC_INT_SECERR_SEC (1 << 13) #define MC_INT_DECERR_VPR (1 << 12) #define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11) #define MC_INT_INVALID_SMMU_PAGE (1 << 10) #define MC_INT_ARBITRATION_EMEM (1 << 9) #define MC_INT_SECURITY_VIOLATION (1 << 8) #define MC_INT_DECERR_EMEM (1 << 6) #define MC_INT_INT_MASK (MC_INT_DECERR_MTS | \ MC_INT_SECERR_SEC | \ MC_INT_DECERR_VPR | \ MC_INT_INVALID_APB_ASID_UPDATE | \ MC_INT_INVALID_SMMU_PAGE | \ MC_INT_ARBITRATION_EMEM | \ MC_INT_SECURITY_VIOLATION | \ MC_INT_DECERR_EMEM) #define MC_ERR_STATUS 0x008 #define MC_ERR_TYPE(x) (((x) >> 28) & 0x7) #define MC_ERR_TYPE_DECERR_EMEM 2 #define MC_ERR_TYPE_SECURITY_TRUSTZONE 3 #define MC_ERR_TYPE_SECURITY_CARVEOUT 4 #define MC_ERR_TYPE_INVALID_SMMU_PAGE 6 #define MC_ERR_INVALID_SMMU_PAGE_READABLE (1 << 27) #define MC_ERR_INVALID_SMMU_PAGE_WRITABLE (1 << 26) #define MC_ERR_INVALID_SMMU_PAGE_NONSECURE (1 << 25) #define MC_ERR_ADR_HI(x) (((x) >> 20) & 0x3) #define MC_ERR_SWAP (1 << 18) #define MC_ERR_SECURITY (1 << 17) #define MC_ERR_RW (1 << 16) #define MC_ERR_ADR1(x) (((x) >> 12) & 0x7) #define MC_ERR_ID(x) (((x) >> 0) & 07F) #define MC_ERR_ADDR 0x00C #define MC_EMEM_CFG 0x050 #define MC_EMEM_ADR_CFG 0x054 #define MC_EMEM_NUMDEV(x) (((x) >> 0 ) & 0x1) #define MC_EMEM_ADR_CFG_DEV0 0x058 #define MC_EMEM_ADR_CFG_DEV1 0x05C #define EMEM_DEV_DEVSIZE(x) (((x) >> 16) & 0xF) #define EMEM_DEV_BANKWIDTH(x) (((x) >> 8) & 0x3) #define EMEM_DEV_COLWIDTH(x) (((x) >> 8) & 0x3) #define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, (_r)) #define LOCK(_sc) mtx_lock(&(_sc)->mtx) #define UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define SLEEP(_sc, timeout) mtx_sleep(sc, &sc->mtx, 0, "tegra_mc", timeout); #define LOCK_INIT(_sc) \ mtx_init(&_sc->mtx, device_get_nameunit(_sc->dev), "tegra_mc", MTX_DEF) #define LOCK_DESTROY(_sc) mtx_destroy(&_sc->mtx) #define ASSERT_LOCKED(_sc) mtx_assert(&_sc->mtx, MA_OWNED) #define ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->mtx, MA_NOTOWNED) static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-mc", 1}, {"nvidia,tegra210-mc", 1}, {NULL, 0} }; struct tegra_mc_softc { device_t dev; struct mtx mtx; struct resource *mem_res; struct resource *irq_res; void *irq_h; clk_t clk; }; static char *smmu_err_tbl[16] = { "reserved", /* 0 */ "reserved", /* 1 */ "DRAM decode", /* 2 */ "Trustzome Security", /* 3 */ "Security carveout", /* 4 */ "reserved", /* 5 */ "Invalid SMMU page", /* 6 */ "reserved", /* 7 */ }; static void tegra_mc_intr(void *arg) { struct tegra_mc_softc *sc; uint32_t stat, err; uint64_t addr; sc = (struct tegra_mc_softc *)arg; stat = RD4(sc, MC_INTSTATUS); if ((stat & MC_INT_INT_MASK) == 0) { WR4(sc, MC_INTSTATUS, stat); return; } device_printf(sc->dev, "Memory Controller Interrupt:\n"); if (stat & MC_INT_DECERR_MTS) printf(" - MTS carveout violation\n"); if (stat & MC_INT_SECERR_SEC) printf(" - SEC carveout violation\n"); if (stat & MC_INT_DECERR_VPR) printf(" - VPR requirements violated\n"); if (stat & MC_INT_INVALID_APB_ASID_UPDATE) printf(" - ivalid APB ASID update\n"); if (stat & MC_INT_INVALID_SMMU_PAGE) printf(" - SMMU address translation error\n"); if (stat & MC_INT_ARBITRATION_EMEM) printf(" - arbitration deadlock-prevention threshold hit\n"); if (stat & MC_INT_SECURITY_VIOLATION) printf(" - SMMU address translation security error\n"); if (stat & MC_INT_DECERR_EMEM) printf(" - SMMU address decode error\n"); if ((stat & (MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM)) != 0) { err = RD4(sc, MC_ERR_STATUS); addr = RD4(sc, MC_ERR_STATUS); addr |= (uint64_t)(MC_ERR_ADR_HI(err)) << 32; printf(" at 0x%012jX [%s %s %s] - %s error.\n", (uintmax_t)addr, stat & MC_ERR_SWAP ? "Swap, " : "", stat & MC_ERR_SECURITY ? "Sec, " : "", stat & MC_ERR_RW ? "Write" : "Read", smmu_err_tbl[MC_ERR_TYPE(err)]); } WR4(sc, MC_INTSTATUS, stat); } static void tegra_mc_init_hw(struct tegra_mc_softc *sc) { /* Disable and acknowledge all interrupts */ WR4(sc, MC_INTMASK, 0); WR4(sc, MC_INTSTATUS, MC_INT_INT_MASK); } static int tegra_mc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Tegra Memory Controller"); return (BUS_PROBE_DEFAULT); } static int tegra_mc_attach(device_t dev) { int rv, rid; struct tegra_mc_softc *sc; sc = device_get_softc(dev); sc->dev = dev; LOCK_INIT(sc); /* Get the memory resource for the register mapping. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot map registers.\n"); rv = ENXIO; goto fail; } /* Allocate our IRQ resource. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate interrupt.\n"); rv = ENXIO; goto fail; } /* OFW resources. */ rv = clk_get_by_ofw_name(dev, 0, "mc", &sc->clk); if (rv != 0) { device_printf(dev, "Cannot get mc clock: %d\n", rv); goto fail; } rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable clock: %d\n", rv); goto fail; } /* Init hardware. */ tegra_mc_init_hw(sc); /* Setup interrupt */ rv = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, tegra_mc_intr, sc, &sc->irq_h); if (rv) { device_printf(dev, "Cannot setup interrupt.\n"); goto fail; } /* Enable Interrupts */ WR4(sc, MC_INTMASK, MC_INT_INT_MASK); return (bus_generic_attach(dev)); fail: if (sc->clk != NULL) clk_release(sc->clk); if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (rv); } static int tegra_mc_detach(device_t dev) { struct tegra_mc_softc *sc; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); sc = device_get_softc(dev); if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); - return (bus_generic_detach(dev)); + return (0); } static device_method_t tegra_mc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra_mc_probe), DEVMETHOD(device_attach, tegra_mc_attach), DEVMETHOD(device_detach, tegra_mc_detach), DEVMETHOD_END }; static DEFINE_CLASS_0(mc, tegra_mc_driver, tegra_mc_methods, sizeof(struct tegra_mc_softc)); DRIVER_MODULE(tegra_mc, simplebus, tegra_mc_driver, NULL, NULL); diff --git a/sys/arm/nvidia/tegra_rtc.c b/sys/arm/nvidia/tegra_rtc.c index 07bf51628665..09bd42bc64e3 100644 --- a/sys/arm/nvidia/tegra_rtc.c +++ b/sys/arm/nvidia/tegra_rtc.c @@ -1,297 +1,302 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * RTC driver for Tegra SoCs. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "clock_if.h" #define RTC_CONTROL 0x00 #define RTC_BUSY 0x04 #define RTC_BUSY_STATUS (1 << 0) #define RTC_SECONDS 0x08 #define RTC_SHADOW_SECONDS 0x0c #define RTC_MILLI_SECONDS 0x10 #define RTC_SECONDS_ALARM0 0x14 #define RTC_SECONDS_ALARM1 0x18 #define RTC_MILLI_SECONDS_ALARM 0x1c #define RTC_SECONDS_COUNTDOWN_ALARM 0x20 #define RTC_MILLI_SECONDS_COUNTDOW_ALARM 0x24 #define RTC_INTR_MASK 0x28 #define RTC_INTR_MSEC_CDN_ALARM (1 << 4) #define RTC_INTR_SEC_CDN_ALARM (1 << 3) #define RTC_INTR_MSEC_ALARM (1 << 2) #define RTC_INTR_SEC_ALARM1 (1 << 1) #define RTC_INTR_SEC_ALARM0 (1 << 0) #define RTC_INTR_STATUS 0x2c #define RTC_INTR_SOURCE 0x30 #define RTC_INTR_SET 0x34 #define RTC_CORRECTION_FACTOR 0x38 #define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, (_r)) #define LOCK(_sc) mtx_lock(&(_sc)->mtx) #define UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define SLEEP(_sc, timeout) \ mtx_sleep(sc, &sc->mtx, 0, "rtcwait", timeout); #define LOCK_INIT(_sc) \ mtx_init(&_sc->mtx, device_get_nameunit(_sc->dev), "tegra_rtc", MTX_DEF) #define LOCK_DESTROY(_sc) mtx_destroy(&_sc->mtx) #define ASSERT_LOCKED(_sc) mtx_assert(&_sc->mtx, MA_OWNED) #define ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->mtx, MA_NOTOWNED) static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-rtc", 1}, {NULL, 0} }; struct tegra_rtc_softc { device_t dev; struct mtx mtx; struct resource *mem_res; struct resource *irq_res; void *irq_h; clk_t clk; uint32_t core_freq; }; static void tegra_rtc_wait(struct tegra_rtc_softc *sc) { int timeout; for (timeout = 500; timeout >0; timeout--) { if ((RD4(sc, RTC_BUSY) & RTC_BUSY_STATUS) == 0) break; DELAY(1); } if (timeout <= 0) device_printf(sc->dev, "Device busy timeouted\n"); } /* * Get the time of day clock and return it in ts. * Return 0 on success, an error number otherwise. */ static int tegra_rtc_gettime(device_t dev, struct timespec *ts) { struct tegra_rtc_softc *sc; struct timeval tv; uint32_t msec, sec; sc = device_get_softc(dev); LOCK(sc); msec = RD4(sc, RTC_MILLI_SECONDS); sec = RD4(sc, RTC_SHADOW_SECONDS); UNLOCK(sc); tv.tv_sec = sec; tv.tv_usec = msec * 1000; TIMEVAL_TO_TIMESPEC(&tv, ts); return (0); } static int tegra_rtc_settime(device_t dev, struct timespec *ts) { struct tegra_rtc_softc *sc; struct timeval tv; sc = device_get_softc(dev); LOCK(sc); TIMESPEC_TO_TIMEVAL(&tv, ts); tegra_rtc_wait(sc); WR4(sc, RTC_SECONDS, tv.tv_sec); UNLOCK(sc); return (0); } static void tegra_rtc_intr(void *arg) { struct tegra_rtc_softc *sc; uint32_t status; sc = (struct tegra_rtc_softc *)arg; LOCK(sc); status = RD4(sc, RTC_INTR_STATUS); WR4(sc, RTC_INTR_STATUS, status); UNLOCK(sc); } static int tegra_rtc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); return (BUS_PROBE_DEFAULT); } static int tegra_rtc_attach(device_t dev) { int rv, rid; struct tegra_rtc_softc *sc; sc = device_get_softc(dev); sc->dev = dev; LOCK_INIT(sc); /* Get the memory resource for the register mapping. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot map registers.\n"); rv = ENXIO; goto fail; } /* Allocate our IRQ resource. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate interrupt.\n"); rv = ENXIO; goto fail; } /* OFW resources. */ rv = clk_get_by_ofw_index(dev, 0, 0, &sc->clk); if (rv != 0) { device_printf(dev, "Cannot get i2c clock: %d\n", rv); goto fail; } rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable clock: %d\n", rv); goto fail; } /* Init hardware. */ WR4(sc, RTC_SECONDS_ALARM0, 0); WR4(sc, RTC_SECONDS_ALARM1, 0); WR4(sc, RTC_INTR_STATUS, 0xFFFFFFFF); WR4(sc, RTC_INTR_MASK, 0); /* Setup interrupt */ rv = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, tegra_rtc_intr, sc, &sc->irq_h); if (rv) { device_printf(dev, "Cannot setup interrupt.\n"); goto fail; } /* * Register as a time of day clock with 1-second resolution. * * XXXX Not yet, we don't have support for multiple RTCs */ /* clock_register(dev, 1000000); */ return (bus_generic_attach(dev)); fail: if (sc->clk != NULL) clk_release(sc->clk); if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (rv); } static int tegra_rtc_detach(device_t dev) { struct tegra_rtc_softc *sc; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); sc = device_get_softc(dev); if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); - return (bus_generic_detach(dev)); + return (0); } static device_method_t tegra_rtc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra_rtc_probe), DEVMETHOD(device_attach, tegra_rtc_attach), DEVMETHOD(device_detach, tegra_rtc_detach), /* clock interface */ DEVMETHOD(clock_gettime, tegra_rtc_gettime), DEVMETHOD(clock_settime, tegra_rtc_settime), DEVMETHOD_END }; static DEFINE_CLASS_0(rtc, tegra_rtc_driver, tegra_rtc_methods, sizeof(struct tegra_rtc_softc)); DRIVER_MODULE(tegra_rtc, simplebus, tegra_rtc_driver, NULL, NULL); diff --git a/sys/arm/ti/ti_adc.c b/sys/arm/ti/ti_adc.c index 5a3a337afff5..d13dd87001de 100644 --- a/sys/arm/ti/ti_adc.c +++ b/sys/arm/ti/ti_adc.c @@ -1,964 +1,969 @@ /*- * Copyright 2014 Luiz Otavio O Souza * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_evdev.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef EVDEV_SUPPORT #include #include #endif #include #include #include #undef DEBUG_TSC #define DEFAULT_CHARGE_DELAY 0x400 #define STEPDLY_OPEN 0x98 #define ORDER_XP 0 #define ORDER_XN 1 #define ORDER_YP 2 #define ORDER_YN 3 /* Define our 8 steps, one for each input channel. */ static struct ti_adc_input ti_adc_inputs[TI_ADC_NPINS] = { { .stepconfig = ADC_STEPCFG(1), .stepdelay = ADC_STEPDLY(1) }, { .stepconfig = ADC_STEPCFG(2), .stepdelay = ADC_STEPDLY(2) }, { .stepconfig = ADC_STEPCFG(3), .stepdelay = ADC_STEPDLY(3) }, { .stepconfig = ADC_STEPCFG(4), .stepdelay = ADC_STEPDLY(4) }, { .stepconfig = ADC_STEPCFG(5), .stepdelay = ADC_STEPDLY(5) }, { .stepconfig = ADC_STEPCFG(6), .stepdelay = ADC_STEPDLY(6) }, { .stepconfig = ADC_STEPCFG(7), .stepdelay = ADC_STEPDLY(7) }, { .stepconfig = ADC_STEPCFG(8), .stepdelay = ADC_STEPDLY(8) }, }; static int ti_adc_samples[5] = { 0, 2, 4, 8, 16 }; static int ti_adc_detach(device_t dev); #ifdef EVDEV_SUPPORT static void ti_adc_ev_report(struct ti_adc_softc *sc) { evdev_push_event(sc->sc_evdev, EV_ABS, ABS_X, sc->sc_x); evdev_push_event(sc->sc_evdev, EV_ABS, ABS_Y, sc->sc_y); evdev_push_event(sc->sc_evdev, EV_KEY, BTN_TOUCH, sc->sc_pen_down); evdev_sync(sc->sc_evdev); } #endif /* EVDEV */ static void ti_adc_enable(struct ti_adc_softc *sc) { uint32_t reg; TI_ADC_LOCK_ASSERT(sc); if (sc->sc_last_state == 1) return; /* Enable the FIFO0 threshold and the end of sequence interrupt. */ ADC_WRITE4(sc, ADC_IRQENABLE_SET, ADC_IRQ_FIFO0_THRES | ADC_IRQ_FIFO1_THRES | ADC_IRQ_END_OF_SEQ); reg = ADC_CTRL_STEP_WP | ADC_CTRL_STEP_ID; if (sc->sc_tsc_wires > 0) { reg |= ADC_CTRL_TSC_ENABLE; switch (sc->sc_tsc_wires) { case 4: reg |= ADC_CTRL_TSC_4WIRE; break; case 5: reg |= ADC_CTRL_TSC_5WIRE; break; case 8: reg |= ADC_CTRL_TSC_8WIRE; break; default: break; } } reg |= ADC_CTRL_ENABLE; /* Enable the ADC. Run thru enabled steps, start the conversions. */ ADC_WRITE4(sc, ADC_CTRL, reg); sc->sc_last_state = 1; } static void ti_adc_disable(struct ti_adc_softc *sc) { int count; TI_ADC_LOCK_ASSERT(sc); if (sc->sc_last_state == 0) return; /* Disable all the enabled steps. */ ADC_WRITE4(sc, ADC_STEPENABLE, 0); /* Disable the ADC. */ ADC_WRITE4(sc, ADC_CTRL, ADC_READ4(sc, ADC_CTRL) & ~ADC_CTRL_ENABLE); /* Disable the FIFO0 threshold and the end of sequence interrupt. */ ADC_WRITE4(sc, ADC_IRQENABLE_CLR, ADC_IRQ_FIFO0_THRES | ADC_IRQ_FIFO1_THRES | ADC_IRQ_END_OF_SEQ); /* ACK any pending interrupt. */ ADC_WRITE4(sc, ADC_IRQSTATUS, ADC_READ4(sc, ADC_IRQSTATUS)); /* Drain the FIFO data. */ count = ADC_READ4(sc, ADC_FIFO0COUNT) & ADC_FIFO_COUNT_MSK; while (count > 0) { (void)ADC_READ4(sc, ADC_FIFO0DATA); count = ADC_READ4(sc, ADC_FIFO0COUNT) & ADC_FIFO_COUNT_MSK; } count = ADC_READ4(sc, ADC_FIFO1COUNT) & ADC_FIFO_COUNT_MSK; while (count > 0) { (void)ADC_READ4(sc, ADC_FIFO1DATA); count = ADC_READ4(sc, ADC_FIFO1COUNT) & ADC_FIFO_COUNT_MSK; } sc->sc_last_state = 0; } static int ti_adc_setup(struct ti_adc_softc *sc) { int ain, i; uint32_t enabled; TI_ADC_LOCK_ASSERT(sc); /* Check for enabled inputs. */ enabled = sc->sc_tsc_enabled; for (i = 0; i < sc->sc_adc_nchannels; i++) { ain = sc->sc_adc_channels[i]; if (ti_adc_inputs[ain].enable) enabled |= (1U << (ain + 1)); } /* Set the ADC global status. */ if (enabled != 0) { ti_adc_enable(sc); /* Update the enabled steps. */ if (enabled != ADC_READ4(sc, ADC_STEPENABLE)) ADC_WRITE4(sc, ADC_STEPENABLE, enabled); } else ti_adc_disable(sc); return (0); } static void ti_adc_input_setup(struct ti_adc_softc *sc, int32_t ain) { struct ti_adc_input *input; uint32_t reg, val; TI_ADC_LOCK_ASSERT(sc); input = &ti_adc_inputs[ain]; reg = input->stepconfig; val = ADC_READ4(sc, reg); /* Set single ended operation. */ val &= ~ADC_STEP_DIFF_CNTRL; /* Set the negative voltage reference. */ val &= ~ADC_STEP_RFM_MSK; /* Set the positive voltage reference. */ val &= ~ADC_STEP_RFP_MSK; /* Set the samples average. */ val &= ~ADC_STEP_AVG_MSK; val |= input->samples << ADC_STEP_AVG_SHIFT; /* Select the desired input. */ val &= ~ADC_STEP_INP_MSK; val |= ain << ADC_STEP_INP_SHIFT; /* Set the ADC to one-shot mode. */ val &= ~ADC_STEP_MODE_MSK; ADC_WRITE4(sc, reg, val); } static void ti_adc_reset(struct ti_adc_softc *sc) { int ain, i; TI_ADC_LOCK_ASSERT(sc); /* Disable all the inputs. */ for (i = 0; i < sc->sc_adc_nchannels; i++) { ain = sc->sc_adc_channels[i]; ti_adc_inputs[ain].enable = 0; } } static int ti_adc_clockdiv_proc(SYSCTL_HANDLER_ARGS) { int error, reg; struct ti_adc_softc *sc; sc = (struct ti_adc_softc *)arg1; TI_ADC_LOCK(sc); reg = (int)ADC_READ4(sc, ADC_CLKDIV) + 1; TI_ADC_UNLOCK(sc); error = sysctl_handle_int(oidp, ®, sizeof(reg), req); if (error != 0 || req->newptr == NULL) return (error); /* * The actual written value is the prescaler setting - 1. * Enforce a minimum value of 10 (i.e. 9) which limits the maximum * ADC clock to ~2.4Mhz (CLK_M_OSC / 10). */ reg--; if (reg < 9) reg = 9; if (reg > USHRT_MAX) reg = USHRT_MAX; TI_ADC_LOCK(sc); /* Disable the ADC. */ ti_adc_disable(sc); /* Update the ADC prescaler setting. */ ADC_WRITE4(sc, ADC_CLKDIV, reg); /* Enable the ADC again. */ ti_adc_setup(sc); TI_ADC_UNLOCK(sc); return (0); } static int ti_adc_enable_proc(SYSCTL_HANDLER_ARGS) { int error; int32_t enable; struct ti_adc_softc *sc; struct ti_adc_input *input; input = (struct ti_adc_input *)arg1; sc = input->sc; enable = input->enable; error = sysctl_handle_int(oidp, &enable, sizeof(enable), req); if (error != 0 || req->newptr == NULL) return (error); if (enable) enable = 1; TI_ADC_LOCK(sc); /* Setup the ADC as needed. */ if (input->enable != enable) { input->enable = enable; ti_adc_setup(sc); if (input->enable == 0) input->value = 0; } TI_ADC_UNLOCK(sc); return (0); } static int ti_adc_open_delay_proc(SYSCTL_HANDLER_ARGS) { int error, reg; struct ti_adc_softc *sc; struct ti_adc_input *input; input = (struct ti_adc_input *)arg1; sc = input->sc; TI_ADC_LOCK(sc); reg = (int)ADC_READ4(sc, input->stepdelay) & ADC_STEP_OPEN_DELAY; TI_ADC_UNLOCK(sc); error = sysctl_handle_int(oidp, ®, sizeof(reg), req); if (error != 0 || req->newptr == NULL) return (error); if (reg < 0) reg = 0; TI_ADC_LOCK(sc); ADC_WRITE4(sc, input->stepdelay, reg & ADC_STEP_OPEN_DELAY); TI_ADC_UNLOCK(sc); return (0); } static int ti_adc_samples_avg_proc(SYSCTL_HANDLER_ARGS) { int error, samples, i; struct ti_adc_softc *sc; struct ti_adc_input *input; input = (struct ti_adc_input *)arg1; sc = input->sc; if (input->samples > nitems(ti_adc_samples)) input->samples = nitems(ti_adc_samples); samples = ti_adc_samples[input->samples]; error = sysctl_handle_int(oidp, &samples, 0, req); if (error != 0 || req->newptr == NULL) return (error); TI_ADC_LOCK(sc); if (samples != ti_adc_samples[input->samples]) { input->samples = 0; for (i = 0; i < nitems(ti_adc_samples); i++) if (samples >= ti_adc_samples[i]) input->samples = i; ti_adc_input_setup(sc, input->input); } TI_ADC_UNLOCK(sc); return (error); } static void ti_adc_read_data(struct ti_adc_softc *sc) { int count, ain; struct ti_adc_input *input; uint32_t data; TI_ADC_LOCK_ASSERT(sc); /* Read the available data. */ count = ADC_READ4(sc, ADC_FIFO0COUNT) & ADC_FIFO_COUNT_MSK; while (count > 0) { data = ADC_READ4(sc, ADC_FIFO0DATA); ain = (data & ADC_FIFO_STEP_ID_MSK) >> ADC_FIFO_STEP_ID_SHIFT; input = &ti_adc_inputs[ain]; if (input->enable == 0) input->value = 0; else input->value = (int32_t)(data & ADC_FIFO_DATA_MSK); count = ADC_READ4(sc, ADC_FIFO0COUNT) & ADC_FIFO_COUNT_MSK; } } static int cmp_values(const void *a, const void *b) { const uint32_t *v1, *v2; v1 = a; v2 = b; if (*v1 < *v2) return -1; if (*v1 > *v2) return 1; return (0); } static void ti_adc_tsc_read_data(struct ti_adc_softc *sc) { int count; uint32_t data[16]; uint32_t x, y; int i, start, end; TI_ADC_LOCK_ASSERT(sc); /* Read the available data. */ count = ADC_READ4(sc, ADC_FIFO1COUNT) & ADC_FIFO_COUNT_MSK; if (count == 0) return; i = 0; while (count > 0) { data[i++] = ADC_READ4(sc, ADC_FIFO1DATA) & ADC_FIFO_DATA_MSK; count = ADC_READ4(sc, ADC_FIFO1COUNT) & ADC_FIFO_COUNT_MSK; } if (sc->sc_coord_readouts > 3) { start = 1; end = sc->sc_coord_readouts - 1; qsort(data, sc->sc_coord_readouts, sizeof(data[0]), &cmp_values); qsort(&data[sc->sc_coord_readouts + 2], sc->sc_coord_readouts, sizeof(data[0]), &cmp_values); } else { start = 0; end = sc->sc_coord_readouts; } x = y = 0; for (i = start; i < end; i++) y += data[i]; y /= (end - start); for (i = sc->sc_coord_readouts + 2 + start; i < sc->sc_coord_readouts + 2 + end; i++) x += data[i]; x /= (end - start); #ifdef DEBUG_TSC device_printf(sc->sc_dev, "touchscreen x: %d, y: %d\n", x, y); #endif #ifdef EVDEV_SUPPORT if ((sc->sc_x != x) || (sc->sc_y != y)) { sc->sc_x = x; sc->sc_y = y; ti_adc_ev_report(sc); } #endif } static void ti_adc_intr_locked(struct ti_adc_softc *sc, uint32_t status) { /* Read the available data. */ if (status & ADC_IRQ_FIFO0_THRES) ti_adc_read_data(sc); } static void ti_adc_tsc_intr_locked(struct ti_adc_softc *sc, uint32_t status) { /* Read the available data. */ if (status & ADC_IRQ_FIFO1_THRES) ti_adc_tsc_read_data(sc); } static void ti_adc_intr(void *arg) { struct ti_adc_softc *sc; uint32_t status, rawstatus; sc = (struct ti_adc_softc *)arg; TI_ADC_LOCK(sc); rawstatus = ADC_READ4(sc, ADC_IRQSTATUS_RAW); status = ADC_READ4(sc, ADC_IRQSTATUS); if (rawstatus & ADC_IRQ_HW_PEN_ASYNC) { sc->sc_pen_down = 1; status |= ADC_IRQ_HW_PEN_ASYNC; ADC_WRITE4(sc, ADC_IRQENABLE_CLR, ADC_IRQ_HW_PEN_ASYNC); #ifdef EVDEV_SUPPORT ti_adc_ev_report(sc); #endif } if (rawstatus & ADC_IRQ_PEN_UP) { sc->sc_pen_down = 0; status |= ADC_IRQ_PEN_UP; #ifdef EVDEV_SUPPORT ti_adc_ev_report(sc); #endif } if (status & ADC_IRQ_FIFO0_THRES) ti_adc_intr_locked(sc, status); if (status & ADC_IRQ_FIFO1_THRES) ti_adc_tsc_intr_locked(sc, status); if (status) { /* ACK the interrupt. */ ADC_WRITE4(sc, ADC_IRQSTATUS, status); } /* Start the next conversion ? */ if (status & ADC_IRQ_END_OF_SEQ) ti_adc_setup(sc); TI_ADC_UNLOCK(sc); } static void ti_adc_sysctl_init(struct ti_adc_softc *sc) { char pinbuf[3]; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree_node, *inp_node, *inpN_node; struct sysctl_oid_list *tree, *inp_tree, *inpN_tree; int ain, i; /* * Add per-pin sysctl tree/handlers. */ ctx = device_get_sysctl_ctx(sc->sc_dev); tree_node = device_get_sysctl_tree(sc->sc_dev); tree = SYSCTL_CHILDREN(tree_node); SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "clockdiv", CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_NEEDGIANT, sc, 0, ti_adc_clockdiv_proc, "IU", "ADC clock prescaler"); inp_node = SYSCTL_ADD_NODE(ctx, tree, OID_AUTO, "ain", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ADC inputs"); inp_tree = SYSCTL_CHILDREN(inp_node); for (i = 0; i < sc->sc_adc_nchannels; i++) { ain = sc->sc_adc_channels[i]; snprintf(pinbuf, sizeof(pinbuf), "%d", ain); inpN_node = SYSCTL_ADD_NODE(ctx, inp_tree, OID_AUTO, pinbuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ADC input"); inpN_tree = SYSCTL_CHILDREN(inpN_node); SYSCTL_ADD_PROC(ctx, inpN_tree, OID_AUTO, "enable", CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_NEEDGIANT, &ti_adc_inputs[ain], 0, ti_adc_enable_proc, "IU", "Enable ADC input"); SYSCTL_ADD_PROC(ctx, inpN_tree, OID_AUTO, "open_delay", CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_NEEDGIANT, &ti_adc_inputs[ain], 0, ti_adc_open_delay_proc, "IU", "ADC open delay"); SYSCTL_ADD_PROC(ctx, inpN_tree, OID_AUTO, "samples_avg", CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_NEEDGIANT, &ti_adc_inputs[ain], 0, ti_adc_samples_avg_proc, "IU", "ADC samples average"); SYSCTL_ADD_INT(ctx, inpN_tree, OID_AUTO, "input", CTLFLAG_RD, &ti_adc_inputs[ain].value, 0, "Converted raw value for the ADC input"); } } static void ti_adc_inputs_init(struct ti_adc_softc *sc) { int ain, i; struct ti_adc_input *input; TI_ADC_LOCK(sc); for (i = 0; i < sc->sc_adc_nchannels; i++) { ain = sc->sc_adc_channels[i]; input = &ti_adc_inputs[ain]; input->sc = sc; input->input = ain; input->value = 0; input->enable = 0; input->samples = 0; ti_adc_input_setup(sc, ain); } TI_ADC_UNLOCK(sc); } static void ti_adc_tsc_init(struct ti_adc_softc *sc) { int i, start_step, end_step; uint32_t stepconfig, val; TI_ADC_LOCK(sc); /* X coordinates */ stepconfig = ADC_STEP_FIFO1 | (4 << ADC_STEP_AVG_SHIFT) | ADC_STEP_MODE_HW_ONESHOT | sc->sc_xp_bit; if (sc->sc_tsc_wires == 4) stepconfig |= ADC_STEP_INP(sc->sc_yp_inp) | sc->sc_xn_bit; else if (sc->sc_tsc_wires == 5) stepconfig |= ADC_STEP_INP(4) | sc->sc_xn_bit | sc->sc_yn_bit | sc->sc_yp_bit; else if (sc->sc_tsc_wires == 8) stepconfig |= ADC_STEP_INP(sc->sc_yp_inp) | sc->sc_xn_bit; start_step = ADC_STEPS - sc->sc_coord_readouts + 1; end_step = start_step + sc->sc_coord_readouts - 1; for (i = start_step; i <= end_step; i++) { ADC_WRITE4(sc, ADC_STEPCFG(i), stepconfig); ADC_WRITE4(sc, ADC_STEPDLY(i), STEPDLY_OPEN); } /* Y coordinates */ stepconfig = ADC_STEP_FIFO1 | (4 << ADC_STEP_AVG_SHIFT) | ADC_STEP_MODE_HW_ONESHOT | sc->sc_yn_bit | ADC_STEP_INM(8); if (sc->sc_tsc_wires == 4) stepconfig |= ADC_STEP_INP(sc->sc_xp_inp) | sc->sc_yp_bit; else if (sc->sc_tsc_wires == 5) stepconfig |= ADC_STEP_INP(4) | sc->sc_xp_bit | sc->sc_xn_bit | sc->sc_yp_bit; else if (sc->sc_tsc_wires == 8) stepconfig |= ADC_STEP_INP(sc->sc_xp_inp) | sc->sc_yp_bit; start_step = ADC_STEPS - (sc->sc_coord_readouts*2 + 2) + 1; end_step = start_step + sc->sc_coord_readouts - 1; for (i = start_step; i <= end_step; i++) { ADC_WRITE4(sc, ADC_STEPCFG(i), stepconfig); ADC_WRITE4(sc, ADC_STEPDLY(i), STEPDLY_OPEN); } /* Charge config */ val = ADC_READ4(sc, ADC_IDLECONFIG); ADC_WRITE4(sc, ADC_TC_CHARGE_STEPCONFIG, val); ADC_WRITE4(sc, ADC_TC_CHARGE_DELAY, sc->sc_charge_delay); /* 2 steps for Z */ start_step = ADC_STEPS - (sc->sc_coord_readouts + 2) + 1; stepconfig = ADC_STEP_FIFO1 | (4 << ADC_STEP_AVG_SHIFT) | ADC_STEP_MODE_HW_ONESHOT | sc->sc_yp_bit | sc->sc_xn_bit | ADC_STEP_INP(sc->sc_xp_inp) | ADC_STEP_INM(8); ADC_WRITE4(sc, ADC_STEPCFG(start_step), stepconfig); ADC_WRITE4(sc, ADC_STEPDLY(start_step), STEPDLY_OPEN); start_step++; stepconfig |= ADC_STEP_INP(sc->sc_yn_inp); ADC_WRITE4(sc, ADC_STEPCFG(start_step), stepconfig); ADC_WRITE4(sc, ADC_STEPDLY(start_step), STEPDLY_OPEN); ADC_WRITE4(sc, ADC_FIFO1THRESHOLD, (sc->sc_coord_readouts*2 + 2) - 1); sc->sc_tsc_enabled = 1; start_step = ADC_STEPS - (sc->sc_coord_readouts*2 + 2) + 1; end_step = ADC_STEPS; for (i = start_step; i <= end_step; i++) { sc->sc_tsc_enabled |= (1 << i); } TI_ADC_UNLOCK(sc); } static void ti_adc_idlestep_init(struct ti_adc_softc *sc) { uint32_t val; val = ADC_STEP_YNN_SW | ADC_STEP_INM(8) | ADC_STEP_INP(8) | ADC_STEP_YPN_SW; ADC_WRITE4(sc, ADC_IDLECONFIG, val); } static int ti_adc_config_wires(struct ti_adc_softc *sc, int *wire_configs, int nwire_configs) { int i; int wire, ai; for (i = 0; i < nwire_configs; i++) { wire = wire_configs[i] & 0xf; ai = (wire_configs[i] >> 4) & 0xf; switch (wire) { case ORDER_XP: sc->sc_xp_bit = ADC_STEP_XPP_SW; sc->sc_xp_inp = ai; break; case ORDER_XN: sc->sc_xn_bit = ADC_STEP_XNN_SW; sc->sc_xn_inp = ai; break; case ORDER_YP: sc->sc_yp_bit = ADC_STEP_YPP_SW; sc->sc_yp_inp = ai; break; case ORDER_YN: sc->sc_yn_bit = ADC_STEP_YNN_SW; sc->sc_yn_inp = ai; break; default: device_printf(sc->sc_dev, "Invalid wire config\n"); return (-1); } } return (0); } static int ti_adc_probe(device_t dev) { if (!ofw_bus_is_compatible(dev, "ti,am3359-tscadc")) return (ENXIO); device_set_desc(dev, "TI ADC controller"); return (BUS_PROBE_DEFAULT); } static int ti_adc_attach(device_t dev) { int err, rid, i; struct ti_adc_softc *sc; uint32_t rev, reg; phandle_t node, child; pcell_t cell; int *channels; int nwire_configs; int *wire_configs; sc = device_get_softc(dev); sc->sc_dev = dev; node = ofw_bus_get_node(dev); sc->sc_tsc_wires = 0; sc->sc_coord_readouts = 1; sc->sc_x_plate_resistance = 0; sc->sc_charge_delay = DEFAULT_CHARGE_DELAY; /* Read "tsc" node properties */ child = ofw_bus_find_child(node, "tsc"); if (child != 0 && OF_hasprop(child, "ti,wires")) { if ((OF_getencprop(child, "ti,wires", &cell, sizeof(cell))) > 0) sc->sc_tsc_wires = cell; if ((OF_getencprop(child, "ti,coordinate-readouts", &cell, sizeof(cell))) > 0) sc->sc_coord_readouts = cell; if ((OF_getencprop(child, "ti,x-plate-resistance", &cell, sizeof(cell))) > 0) sc->sc_x_plate_resistance = cell; if ((OF_getencprop(child, "ti,charge-delay", &cell, sizeof(cell))) > 0) sc->sc_charge_delay = cell; nwire_configs = OF_getencprop_alloc_multi(child, "ti,wire-config", sizeof(*wire_configs), (void **)&wire_configs); if (nwire_configs != sc->sc_tsc_wires) { device_printf(sc->sc_dev, "invalid number of ti,wire-config: %d (should be %d)\n", nwire_configs, sc->sc_tsc_wires); OF_prop_free(wire_configs); return (EINVAL); } err = ti_adc_config_wires(sc, wire_configs, nwire_configs); OF_prop_free(wire_configs); if (err) return (EINVAL); } /* Read "adc" node properties */ child = ofw_bus_find_child(node, "adc"); if (child != 0) { sc->sc_adc_nchannels = OF_getencprop_alloc_multi(child, "ti,adc-channels", sizeof(*channels), (void **)&channels); if (sc->sc_adc_nchannels > 0) { for (i = 0; i < sc->sc_adc_nchannels; i++) sc->sc_adc_channels[i] = channels[i]; OF_prop_free(channels); } } /* Sanity check FDT data */ if (sc->sc_tsc_wires + sc->sc_adc_nchannels > TI_ADC_NPINS) { device_printf(dev, "total number of channels (%d) is larger than %d\n", sc->sc_tsc_wires + sc->sc_adc_nchannels, TI_ADC_NPINS); return (ENXIO); } rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "cannot allocate memory window\n"); return (ENXIO); } /* Activate the ADC_TSC module. */ err = ti_sysc_clock_enable(device_get_parent(dev)); if (err) return (err); rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->sc_irq_res) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); device_printf(dev, "cannot allocate interrupt\n"); return (ENXIO); } if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, ti_adc_intr, sc, &sc->sc_intrhand) != 0) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); device_printf(dev, "Unable to setup the irq handler.\n"); return (ENXIO); } /* Check the ADC revision. */ rev = ADC_READ4(sc, ti_sysc_get_rev_address_offset_host(device_get_parent(dev))); device_printf(dev, "scheme: %#x func: %#x rtl: %d rev: %d.%d custom rev: %d\n", (rev & ADC_REV_SCHEME_MSK) >> ADC_REV_SCHEME_SHIFT, (rev & ADC_REV_FUNC_MSK) >> ADC_REV_FUNC_SHIFT, (rev & ADC_REV_RTL_MSK) >> ADC_REV_RTL_SHIFT, (rev & ADC_REV_MAJOR_MSK) >> ADC_REV_MAJOR_SHIFT, rev & ADC_REV_MINOR_MSK, (rev & ADC_REV_CUSTOM_MSK) >> ADC_REV_CUSTOM_SHIFT); reg = ADC_READ4(sc, ADC_CTRL); ADC_WRITE4(sc, ADC_CTRL, reg | ADC_CTRL_STEP_WP | ADC_CTRL_STEP_ID); /* * Set the ADC prescaler to 2400 if touchscreen is not enabled * and to 24 if it is. This sets the ADC clock to ~10Khz and * ~1Mhz respectively (CLK_M_OSC / prescaler). */ if (sc->sc_tsc_wires) ADC_WRITE4(sc, ADC_CLKDIV, 24 - 1); else ADC_WRITE4(sc, ADC_CLKDIV, 2400 - 1); TI_ADC_LOCK_INIT(sc); ti_adc_idlestep_init(sc); ti_adc_inputs_init(sc); ti_adc_sysctl_init(sc); ti_adc_tsc_init(sc); TI_ADC_LOCK(sc); ti_adc_setup(sc); TI_ADC_UNLOCK(sc); #ifdef EVDEV_SUPPORT if (sc->sc_tsc_wires > 0) { sc->sc_evdev = evdev_alloc(); evdev_set_name(sc->sc_evdev, device_get_desc(dev)); evdev_set_phys(sc->sc_evdev, device_get_nameunit(dev)); evdev_set_id(sc->sc_evdev, BUS_VIRTUAL, 0, 0, 0); evdev_support_prop(sc->sc_evdev, INPUT_PROP_DIRECT); evdev_support_event(sc->sc_evdev, EV_SYN); evdev_support_event(sc->sc_evdev, EV_ABS); evdev_support_event(sc->sc_evdev, EV_KEY); evdev_support_abs(sc->sc_evdev, ABS_X, 0, ADC_MAX_VALUE, 0, 0, 0); evdev_support_abs(sc->sc_evdev, ABS_Y, 0, ADC_MAX_VALUE, 0, 0, 0); evdev_support_key(sc->sc_evdev, BTN_TOUCH); err = evdev_register(sc->sc_evdev); if (err) { device_printf(dev, "failed to register evdev: error=%d\n", err); ti_adc_detach(dev); return (err); } sc->sc_pen_down = 0; sc->sc_x = -1; sc->sc_y = -1; } #endif /* EVDEV */ return (0); } static int ti_adc_detach(device_t dev) { struct ti_adc_softc *sc; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); sc = device_get_softc(dev); /* Turn off the ADC. */ TI_ADC_LOCK(sc); ti_adc_reset(sc); ti_adc_setup(sc); #ifdef EVDEV_SUPPORT evdev_free(sc->sc_evdev); #endif TI_ADC_UNLOCK(sc); TI_ADC_LOCK_DESTROY(sc); if (sc->sc_intrhand) bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intrhand); if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); - return (bus_generic_detach(dev)); + return (0); } static device_method_t ti_adc_methods[] = { DEVMETHOD(device_probe, ti_adc_probe), DEVMETHOD(device_attach, ti_adc_attach), DEVMETHOD(device_detach, ti_adc_detach), DEVMETHOD_END }; static driver_t ti_adc_driver = { "ti_adc", ti_adc_methods, sizeof(struct ti_adc_softc), }; DRIVER_MODULE(ti_adc, simplebus, ti_adc_driver, 0, 0); MODULE_VERSION(ti_adc, 1); MODULE_DEPEND(ti_adc, simplebus, 1, 1, 1); MODULE_DEPEND(ti_adc, ti_sysc, 1, 1, 1); #ifdef EVDEV_SUPPORT MODULE_DEPEND(ti_adc, evdev, 1, 1, 1); #endif diff --git a/sys/arm64/nvidia/tegra210/max77620.c b/sys/arm64/nvidia/tegra210/max77620.c index b33d73e71f90..a7d938cb8123 100644 --- a/sys/arm64/nvidia/tegra210/max77620.c +++ b/sys/arm64/nvidia/tegra210/max77620.c @@ -1,504 +1,509 @@ /*- * Copyright (c) 2019 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * MAX77620 PMIC driver */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "clock_if.h" #include "regdev_if.h" #include "max77620.h" static struct ofw_compat_data compat_data[] = { {"maxim,max77620", 1}, {NULL, 0}, }; #define LOCK(_sc) sx_xlock(&(_sc)->lock) #define UNLOCK(_sc) sx_xunlock(&(_sc)->lock) #define LOCK_INIT(_sc) sx_init(&(_sc)->lock, "max77620") #define LOCK_DESTROY(_sc) sx_destroy(&(_sc)->lock); #define ASSERT_LOCKED(_sc) sx_assert(&(_sc)->lock, SA_XLOCKED); #define ASSERT_UNLOCKED(_sc) sx_assert(&(_sc)->lock, SA_UNLOCKED); #define MAX77620_DEVICE_ID 0x0C /* * Raw register access function. */ int max77620_read(struct max77620_softc *sc, uint8_t reg, uint8_t *val) { uint8_t addr; int rv; struct iic_msg msgs[2] = { {0, IIC_M_WR, 1, &addr}, {0, IIC_M_RD, 1, val}, }; msgs[0].slave = sc->bus_addr; msgs[1].slave = sc->bus_addr; addr = reg; rv = iicbus_transfer(sc->dev, msgs, 2); if (rv != 0) { device_printf(sc->dev, "Error when reading reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } int max77620_read_buf(struct max77620_softc *sc, uint8_t reg, uint8_t *buf, size_t size) { uint8_t addr; int rv; struct iic_msg msgs[2] = { {0, IIC_M_WR, 1, &addr}, {0, IIC_M_RD, size, buf}, }; msgs[0].slave = sc->bus_addr; msgs[1].slave = sc->bus_addr; addr = reg; rv = iicbus_transfer(sc->dev, msgs, 2); if (rv != 0) { device_printf(sc->dev, "Error when reading reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } int max77620_write(struct max77620_softc *sc, uint8_t reg, uint8_t val) { uint8_t data[2]; int rv; struct iic_msg msgs[1] = { {0, IIC_M_WR, 2, data}, }; msgs[0].slave = sc->bus_addr; data[0] = reg; data[1] = val; rv = iicbus_transfer(sc->dev, msgs, 1); if (rv != 0) { device_printf(sc->dev, "Error when writing reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } int max77620_write_buf(struct max77620_softc *sc, uint8_t reg, uint8_t *buf, size_t size) { uint8_t data[1]; int rv; struct iic_msg msgs[2] = { {0, IIC_M_WR, 1, data}, {0, IIC_M_WR | IIC_M_NOSTART, size, buf}, }; msgs[0].slave = sc->bus_addr; msgs[1].slave = sc->bus_addr; data[0] = reg; rv = iicbus_transfer(sc->dev, msgs, 2); if (rv != 0) { device_printf(sc->dev, "Error when writing reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } int max77620_modify(struct max77620_softc *sc, uint8_t reg, uint8_t clear, uint8_t set) { uint8_t val; int rv; rv = max77620_read(sc, reg, &val); if (rv != 0) return (rv); val &= ~clear; val |= set; rv = max77620_write(sc, reg, val); if (rv != 0) return (rv); return (0); } static int max77620_parse_fps(struct max77620_softc *sc, int id, phandle_t node) { int val; if (OF_getencprop(node, "maxim,shutdown-fps-time-period-us", &val, sizeof(val)) >= 0) { val = min(val, MAX77620_FPS_PERIOD_MAX_US); val = max(val, MAX77620_FPS_PERIOD_MIN_US); sc->shutdown_fps[id] = val; } if (OF_getencprop(node, "maxim,suspend-fps-time-period-us", &val, sizeof(val)) >= 0) { val = min(val, MAX77620_FPS_PERIOD_MAX_US); val = max(val, MAX77620_FPS_PERIOD_MIN_US); sc->suspend_fps[id] = val; } if (OF_getencprop(node, "maxim,fps-event-source", &val, sizeof(val)) >= 0) { if (val > 2) { device_printf(sc->dev, "Invalid 'fps-event-source' " "value: %d\n", val); return (EINVAL); } sc->event_source[id] = val; } return (0); } static int max77620_parse_fdt(struct max77620_softc *sc, phandle_t node) { phandle_t fpsnode; char fps_name[6]; int i, rv; for (i = 0; i < MAX77620_FPS_COUNT; i++) { sc->shutdown_fps[i] = -1; sc->suspend_fps[i] = -1; sc->event_source[i] = -1; } fpsnode = ofw_bus_find_child(node, "fps"); if (fpsnode > 0) { for (i = 0; i < MAX77620_FPS_COUNT; i++) { sprintf(fps_name, "fps%d", i); node = ofw_bus_find_child(node, fps_name); if (node <= 0) continue; rv = max77620_parse_fps(sc, i, node); if (rv != 0) return (rv); } } return (0); } static int max77620_get_version(struct max77620_softc *sc) { uint8_t buf[6]; int i; int rv; /* Verify ID string (5 bytes ). */ for (i = 0; i <= 6; i++) { rv = RD1(sc, MAX77620_REG_CID0 + i , buf + i); if (rv != 0) { device_printf(sc->dev, "Cannot read chip ID: %d\n", rv); return (ENXIO); } } if (bootverbose) { device_printf(sc->dev, " ID: [0x%02X, 0x%02X, 0x%02X, 0x%02X]\n", buf[0], buf[1], buf[2], buf[3]); } device_printf(sc->dev, " MAX77620 version - OTP: 0x%02X, ES: 0x%02X\n", buf[4], buf[5]); return (0); } static uint8_t max77620_encode_fps_period(struct max77620_softc *sc, int val) { uint8_t i; int period; period = MAX77620_FPS_PERIOD_MIN_US; for (i = 0; i < 7; i++) { if (period >= val) return (i); period *= 2; } return (i); } static int max77620_init(struct max77620_softc *sc) { uint8_t mask, val, tmp; int i, rv; mask = 0; val = 0; for (i = 0; i < MAX77620_FPS_COUNT; i++) { if (sc->shutdown_fps[i] != -1) { mask |= MAX77620_FPS_TIME_PERIOD_MASK; tmp = max77620_encode_fps_period(sc, sc->shutdown_fps[i]); val |= (tmp << MAX77620_FPS_TIME_PERIOD_SHIFT) & MAX77620_FPS_TIME_PERIOD_MASK; } if (sc->event_source[i] != -1) { mask |= MAX77620_FPS_EN_SRC_MASK; tmp = sc->event_source[i]; val |= (tmp << MAX77620_FPS_EN_SRC_SHIFT) & MAX77620_FPS_EN_SRC_MASK; if (sc->event_source[i] == 2) { mask |= MAX77620_FPS_ENFPS_SW_MASK; val |= MAX77620_FPS_ENFPS_SW; } } rv = RM1(sc, MAX77620_REG_FPS_CFG0 + i, mask, val); if (rv != 0) { device_printf(sc->dev, "I/O error: %d\n", rv); return (ENXIO); } } /* Global mask interrupts */ rv = RM1(sc, MAX77620_REG_INTENLBT, 0x81, 0x81); rv = RM1(sc, MAX77620_REG_IRQTOPM, 0x81, 0x81); if (rv != 0) return (ENXIO); return (0); } #ifdef notyet static void max77620_intr(void *arg) { struct max77620_softc *sc; uint8_t intenlbt, intlbt, irqtop, irqtopm, irqsd, irqmasksd; uint8_t irq_lvl2_l0_7, irq_lvl2_l8, irq_lvl2_gpio, irq_msk_l0_7, irq_msk_l8; uint8_t onoffirq, onoffirqm; sc = (struct max77620_softc *)arg; /* XXX Finish temperature alarms. */ RD1(sc, MAX77620_REG_INTENLBT, &intenlbt); RD1(sc, MAX77620_REG_INTLBT, &intlbt); RD1(sc, MAX77620_REG_IRQTOP, &irqtop); RD1(sc, MAX77620_REG_IRQTOPM, &irqtopm); RD1(sc, MAX77620_REG_IRQSD, &irqsd); RD1(sc, MAX77620_REG_IRQMASKSD, &irqmasksd); RD1(sc, MAX77620_REG_IRQ_LVL2_L0_7, &irq_lvl2_l0_7); RD1(sc, MAX77620_REG_IRQ_MSK_L0_7, &irq_msk_l0_7); RD1(sc, MAX77620_REG_IRQ_LVL2_L8, &irq_lvl2_l8); RD1(sc, MAX77620_REG_IRQ_MSK_L8, &irq_msk_l8); RD1(sc, MAX77620_REG_IRQ_LVL2_GPIO, &irq_lvl2_gpio); RD1(sc, MAX77620_REG_ONOFFIRQ, &onoffirq); RD1(sc, MAX77620_REG_ONOFFIRQM, &onoffirqm); printf("%s: intlbt: 0x%02X, intenlbt: 0x%02X\n", __func__, intlbt, intenlbt); printf("%s: irqtop: 0x%02X, irqtopm: 0x%02X\n", __func__, irqtop, irqtopm); printf("%s: irqsd: 0x%02X, irqmasksd: 0x%02X\n", __func__, irqsd, irqmasksd); printf("%s: onoffirq: 0x%02X, onoffirqm: 0x%02X\n", __func__, onoffirq, onoffirqm); printf("%s: irq_lvl2_l0_7: 0x%02X, irq_msk_l0_7: 0x%02X\n", __func__, irq_lvl2_l0_7, irq_msk_l0_7); printf("%s: irq_lvl2_l8: 0x%02X, irq_msk_l8: 0x%02X\n", __func__, irq_lvl2_l8, irq_msk_l8); printf("%s: irq_lvl2_gpio: 0x%02X\n", __func__, irq_lvl2_gpio); } #endif static int max77620_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "MAX77620 PMIC"); return (BUS_PROBE_DEFAULT); } static int max77620_attach(device_t dev) { struct max77620_softc *sc; int rv, rid; phandle_t node; sc = device_get_softc(dev); sc->dev = dev; sc->bus_addr = iicbus_get_addr(dev); node = ofw_bus_get_node(sc->dev); rv = 0; LOCK_INIT(sc); rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); #ifdef notyet /* Interrupt parent is not implemented */ if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate interrupt.\n"); rv = ENXIO; goto fail; } #endif rv = max77620_parse_fdt(sc, node); if (rv != 0) goto fail; rv = max77620_get_version(sc); if (rv != 0) goto fail; rv = max77620_init(sc); if (rv != 0) goto fail; rv = max77620_regulator_attach(sc, node); if (rv != 0) goto fail; rv = max77620_gpio_attach(sc, node); if (rv != 0) goto fail; rv = max77620_rtc_create(sc, node); if (rv != 0) goto fail; fdt_pinctrl_register(dev, NULL); fdt_pinctrl_configure_by_name(dev, "default"); /* Setup interrupt. */ #ifdef notyet rv = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, max77620_intr, sc, &sc->irq_h); if (rv) { device_printf(dev, "Cannot setup interrupt.\n"); goto fail; } #endif return (bus_generic_attach(dev)); fail: if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); LOCK_DESTROY(sc); return (rv); } static int max77620_detach(device_t dev) { struct max77620_softc *sc; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); sc = device_get_softc(dev); if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); LOCK_DESTROY(sc); - return (bus_generic_detach(dev)); + return (0); } static phandle_t max77620_gpio_get_node(device_t bus, device_t dev) { /* We only have one child, the GPIO bus, which needs our own node. */ return (ofw_bus_get_node(bus)); } static device_method_t max77620_methods[] = { /* Device interface */ DEVMETHOD(device_probe, max77620_probe), DEVMETHOD(device_attach, max77620_attach), DEVMETHOD(device_detach, max77620_detach), /* Regdev interface */ DEVMETHOD(regdev_map, max77620_regulator_map), /* GPIO protocol interface */ DEVMETHOD(gpio_get_bus, max77620_gpio_get_bus), DEVMETHOD(gpio_pin_max, max77620_gpio_pin_max), DEVMETHOD(gpio_pin_getname, max77620_gpio_pin_getname), DEVMETHOD(gpio_pin_getflags, max77620_gpio_pin_getflags), DEVMETHOD(gpio_pin_getcaps, max77620_gpio_pin_getcaps), DEVMETHOD(gpio_pin_setflags, max77620_gpio_pin_setflags), DEVMETHOD(gpio_pin_get, max77620_gpio_pin_get), DEVMETHOD(gpio_pin_set, max77620_gpio_pin_set), DEVMETHOD(gpio_pin_toggle, max77620_gpio_pin_toggle), DEVMETHOD(gpio_map_gpios, max77620_gpio_map_gpios), /* fdt_pinctrl interface */ DEVMETHOD(fdt_pinctrl_configure, max77620_pinmux_configure), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, max77620_gpio_get_node), DEVMETHOD_END }; static DEFINE_CLASS_0(gpio, max77620_driver, max77620_methods, sizeof(struct max77620_softc)); EARLY_DRIVER_MODULE(max77620, iicbus, max77620_driver, NULL, NULL, 74); diff --git a/sys/arm64/nvidia/tegra210/max77620_rtc.c b/sys/arm64/nvidia/tegra210/max77620_rtc.c index 29808af27819..dca90549443c 100644 --- a/sys/arm64/nvidia/tegra210/max77620_rtc.c +++ b/sys/arm64/nvidia/tegra210/max77620_rtc.c @@ -1,419 +1,424 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2020 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "clock_if.h" #include "ofw_iicbus_if.h" #include "max77620.h" #define MAX77620_RTC_INT 0x00 #define MAX77620_RTC_INTM 0x01 #define MAX77620_RTC_CONTROLM 0x02 #define MAX77620_RTC_CONTROL 0x03 #define RTC_CONTROL_MODE_24 (1 << 1) #define RTC_CONTROL_BCD_EN (1 << 0) #define MAX77620_RTC_UPDATE0 0x04 #define RTC_UPDATE0_RTC_RBUDR (1 << 4) #define RTC_UPDATE0_RTC_UDR (1 << 0) #define MAX77620_WTSR_SMPL_CNTL 0x06 #define MAX77620_RTC_SEC 0x07 #define MAX77620_RTC_MIN 0x08 #define MAX77620_RTC_HOUR 0x09 #define MAX77620_RTC_WEEKDAY 0x0A #define MAX77620_RTC_MONTH 0x0B #define MAX77620_RTC_YEAR 0x0C #define MAX77620_RTC_DATE 0x0D #define MAX77620_ALARM1_SEC 0x0E #define MAX77620_ALARM1_MIN 0x0F #define MAX77620_ALARM1_HOUR 0x10 #define MAX77620_ALARM1_WEEKDAY 0x11 #define MAX77620_ALARM1_MONTH 0x12 #define MAX77620_ALARM1_YEAR 0x13 #define MAX77620_ALARM1_DATE 0x14 #define MAX77620_ALARM2_SEC 0x15 #define MAX77620_ALARM2_MIN 0x16 #define MAX77620_ALARM2_HOUR 0x17 #define MAX77620_ALARM2_WEEKDAY 0x18 #define MAX77620_ALARM2_MONTH 0x19 #define MAX77620_ALARM2_YEAR 0x1A #define MAX77620_ALARM2_DATE 0x1B #define MAX77620_RTC_START_YEAR 2000 #define MAX77620_RTC_I2C_ADDR 0x68 #define LOCK(_sc) sx_xlock(&(_sc)->lock) #define UNLOCK(_sc) sx_xunlock(&(_sc)->lock) #define LOCK_INIT(_sc) sx_init(&(_sc)->lock, "max77620_rtc") #define LOCK_DESTROY(_sc) sx_destroy(&(_sc)->lock); struct max77620_rtc_softc { device_t dev; struct sx lock; int bus_addr; }; char max77620_rtc_compat[] = "maxim,max77620_rtc"; /* * Raw register access function. */ static int max77620_rtc_read(struct max77620_rtc_softc *sc, uint8_t reg, uint8_t *val) { uint8_t addr; int rv; struct iic_msg msgs[2] = { {0, IIC_M_WR, 1, &addr}, {0, IIC_M_RD, 1, val}, }; msgs[0].slave = sc->bus_addr; msgs[1].slave = sc->bus_addr; addr = reg; rv = iicbus_transfer(sc->dev, msgs, 2); if (rv != 0) { device_printf(sc->dev, "Error when reading reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } static int max77620_rtc_read_buf(struct max77620_rtc_softc *sc, uint8_t reg, uint8_t *buf, size_t size) { uint8_t addr; int rv; struct iic_msg msgs[2] = { {0, IIC_M_WR, 1, &addr}, {0, IIC_M_RD, size, buf}, }; msgs[0].slave = sc->bus_addr; msgs[1].slave = sc->bus_addr; addr = reg; rv = iicbus_transfer(sc->dev, msgs, 2); if (rv != 0) { device_printf(sc->dev, "Error when reading reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } static int max77620_rtc_write(struct max77620_rtc_softc *sc, uint8_t reg, uint8_t val) { uint8_t data[2]; int rv; struct iic_msg msgs[1] = { {0, IIC_M_WR, 2, data}, }; msgs[0].slave = sc->bus_addr; data[0] = reg; data[1] = val; rv = iicbus_transfer(sc->dev, msgs, 1); if (rv != 0) { device_printf(sc->dev, "Error when writing reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } static int max77620_rtc_write_buf(struct max77620_rtc_softc *sc, uint8_t reg, uint8_t *buf, size_t size) { uint8_t data[1]; int rv; struct iic_msg msgs[2] = { {0, IIC_M_WR, 1, data}, {0, IIC_M_WR | IIC_M_NOSTART, size, buf}, }; msgs[0].slave = sc->bus_addr; msgs[1].slave = sc->bus_addr; data[0] = reg; rv = iicbus_transfer(sc->dev, msgs, 2); if (rv != 0) { device_printf(sc->dev, "Error when writing reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } static int max77620_rtc_modify(struct max77620_rtc_softc *sc, uint8_t reg, uint8_t clear, uint8_t set) { uint8_t val; int rv; rv = max77620_rtc_read(sc, reg, &val); if (rv != 0) return (rv); val &= ~clear; val |= set; rv = max77620_rtc_write(sc, reg, val); if (rv != 0) return (rv); return (0); } static int max77620_rtc_update(struct max77620_rtc_softc *sc, bool for_read) { uint8_t reg; int rv; reg = for_read ? RTC_UPDATE0_RTC_RBUDR: RTC_UPDATE0_RTC_UDR; rv = max77620_rtc_modify(sc, MAX77620_RTC_UPDATE0, reg, reg); if (rv != 0) return (rv); DELAY(16000); return (rv); } static int max77620_rtc_gettime(device_t dev, struct timespec *ts) { struct max77620_rtc_softc *sc; struct clocktime ct; uint8_t buf[7]; int rv; sc = device_get_softc(dev); LOCK(sc); rv = max77620_rtc_update(sc, true); if (rv != 0) { UNLOCK(sc); device_printf(sc->dev, "Failed to strobe RTC data\n"); return (rv); } rv = max77620_rtc_read_buf(sc, MAX77620_RTC_SEC, buf, nitems(buf)); UNLOCK(sc); if (rv != 0) { device_printf(sc->dev, "Failed to read RTC data\n"); return (rv); } ct.nsec = 0; ct.sec = bcd2bin(buf[0] & 0x7F); ct.min = bcd2bin(buf[1] & 0x7F); ct.hour = bcd2bin(buf[2] & 0x3F); ct.dow = ffs(buf[3] & 07); ct.mon = bcd2bin(buf[4] & 0x1F); ct.year = bcd2bin(buf[5] & 0x7F) + MAX77620_RTC_START_YEAR; ct.day = bcd2bin(buf[6] & 0x3F); return (clock_ct_to_ts(&ct, ts)); } static int max77620_rtc_settime(device_t dev, struct timespec *ts) { struct max77620_rtc_softc *sc; struct clocktime ct; uint8_t buf[7]; int rv; sc = device_get_softc(dev); clock_ts_to_ct(ts, &ct); if (ct.year < MAX77620_RTC_START_YEAR) return (EINVAL); buf[0] = bin2bcd(ct.sec); buf[1] = bin2bcd(ct.min); buf[2] = bin2bcd(ct.hour); buf[3] = 1 << ct.dow; buf[4] = bin2bcd(ct.mon); buf[5] = bin2bcd(ct.year - MAX77620_RTC_START_YEAR); buf[6] = bin2bcd(ct.day); LOCK(sc); rv = max77620_rtc_write_buf(sc, MAX77620_RTC_SEC, buf, nitems(buf)); if (rv != 0) { UNLOCK(sc); device_printf(sc->dev, "Failed to write RTC data\n"); return (rv); } rv = max77620_rtc_update(sc, false); UNLOCK(sc); if (rv != 0) { device_printf(sc->dev, "Failed to update RTC data\n"); return (rv); } return (0); } static int max77620_rtc_probe(device_t dev) { const char *compat; /* * TODO: * ofw_bus_is_compatible() should use compat string from devinfo cache * maximum size of OFW property should be defined in public header */ if ((compat = ofw_bus_get_compat(dev)) == NULL) return (ENXIO); if (strncasecmp(compat, max77620_rtc_compat, 255) != 0) return (ENXIO); device_set_desc(dev, "MAX77620 RTC"); return (BUS_PROBE_DEFAULT); } static int max77620_rtc_attach(device_t dev) { struct max77620_rtc_softc *sc; uint8_t reg; int rv; sc = device_get_softc(dev); sc->dev = dev; sc->bus_addr = iicbus_get_addr(dev); LOCK_INIT(sc); reg = RTC_CONTROL_MODE_24 | RTC_CONTROL_BCD_EN; rv = max77620_rtc_modify(sc, MAX77620_RTC_CONTROLM, reg, reg); if (rv != 0) { device_printf(sc->dev, "Failed to configure RTC\n"); goto fail; } rv = max77620_rtc_modify(sc, MAX77620_RTC_CONTROL, reg, reg); if (rv != 0) { device_printf(sc->dev, "Failed to configure RTC\n"); goto fail; } rv = max77620_rtc_update(sc, false); if (rv != 0) { device_printf(sc->dev, "Failed to update RTC data\n"); return (rv); } clock_register(sc->dev, 1000000); return (bus_generic_attach(dev)); fail: LOCK_DESTROY(sc); return (rv); } static int max77620_rtc_detach(device_t dev) { struct max77620_softc *sc; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); sc = device_get_softc(dev); LOCK_DESTROY(sc); - return (bus_generic_detach(dev)); + return (0); } /* * The secondary address of MAX77620 (RTC function) is not in DT, * add it manualy as subdevice */ int max77620_rtc_create(struct max77620_softc *sc, phandle_t node) { device_t parent, child; int rv; parent = device_get_parent(sc->dev); child = BUS_ADD_CHILD(parent, 0, NULL, DEVICE_UNIT_ANY); if (child == NULL) { device_printf(sc->dev, "Cannot create MAX77620 RTC device.\n"); return (ENXIO); } rv = OFW_IICBUS_SET_DEVINFO(parent, child, -1, "rtc@68", max77620_rtc_compat, MAX77620_RTC_I2C_ADDR << 1); if (rv != 0) { device_printf(sc->dev, "Cannot setup MAX77620 RTC device.\n"); return (ENXIO); } return (0); } static device_method_t max77620_rtc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, max77620_rtc_probe), DEVMETHOD(device_attach, max77620_rtc_attach), DEVMETHOD(device_detach, max77620_rtc_detach), /* RTC interface */ DEVMETHOD(clock_gettime, max77620_rtc_gettime), DEVMETHOD(clock_settime, max77620_rtc_settime), DEVMETHOD_END }; static DEFINE_CLASS_0(rtc, max77620_rtc_driver, max77620_rtc_methods, sizeof(struct max77620_rtc_softc)); EARLY_DRIVER_MODULE(max77620rtc_, iicbus, max77620_rtc_driver, NULL, NULL, 74); diff --git a/sys/dev/dwwdt/dwwdt.c b/sys/dev/dwwdt/dwwdt.c index 89f94fff9bad..c433317a8d87 100644 --- a/sys/dev/dwwdt/dwwdt.c +++ b/sys/dev/dwwdt/dwwdt.c @@ -1,373 +1,378 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 BusyTech * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Registers */ #define DWWDT_CR 0x00 #define DWWDT_CR_WDT_EN (1 << 0) #define DWWDT_CR_RESP_MODE (1 << 1) #define DWWDT_TORR 0x04 #define DWWDT_CCVR 0x08 #define DWWDT_CRR 0x0C #define DWWDT_CRR_KICK 0x76 #define DWWDT_STAT 0x10 #define DWWDT_STAT_STATUS 0x01 #define DWWDT_EOI 0x14 #define DWWDT_READ4(sc, reg) bus_read_4((sc)->sc_mem_res, (reg)) #define DWWDT_WRITE4(sc, reg, val) \ bus_write_4((sc)->sc_mem_res, (reg), (val)) /* * 47 = 16 (timeout shift of dwwdt) + 30 (1s ~= 2 ** 30ns) + 1 * (pre-restart delay) */ #define DWWDT_EXP_OFFSET 47 struct dwwdt_softc { device_t sc_dev; struct resource *sc_mem_res; struct resource *sc_irq_res; void *sc_intr_cookie; clk_t sc_clk; uint64_t sc_clk_freq; eventhandler_tag sc_evtag; int sc_mem_rid; int sc_irq_rid; enum { DWWDT_STOPPED, DWWDT_RUNNING, } sc_status; }; static struct ofw_compat_data compat_data[] = { { "snps,dw-wdt", 1 }, { NULL, 0 } }; SYSCTL_NODE(_dev, OID_AUTO, dwwdt, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Synopsys Designware watchdog timer"); /* Setting this to true disables full restart mode. */ static bool dwwdt_prevent_restart = false; SYSCTL_BOOL(_dev_dwwdt, OID_AUTO, prevent_restart, CTLFLAG_RW | CTLFLAG_MPSAFE, &dwwdt_prevent_restart, 0, "Disable system reset on timeout"); static bool dwwdt_debug_enabled = false; SYSCTL_BOOL(_dev_dwwdt, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_MPSAFE, &dwwdt_debug_enabled, 0, "Enable debug mode"); static bool dwwdt_panic_first = true; SYSCTL_BOOL(_dev_dwwdt, OID_AUTO, panic_first, CTLFLAG_RW | CTLFLAG_MPSAFE, &dwwdt_panic_first, 0, "Try to panic on timeout, reset on another timeout"); static int dwwdt_probe(device_t); static int dwwdt_attach(device_t); static int dwwdt_detach(device_t); static int dwwdt_shutdown(device_t); static void dwwdt_intr(void *); static void dwwdt_event(void *, unsigned int, int *); /* Helpers */ static inline void dwwdt_start(struct dwwdt_softc *sc); static inline bool dwwdt_started(const struct dwwdt_softc *sc); static inline void dwwdt_stop(struct dwwdt_softc *sc); static inline void dwwdt_set_timeout(const struct dwwdt_softc *sc, int val); static void dwwdt_debug(device_t); static void dwwdt_debug(device_t dev) { /* * Reading from EOI may clear interrupt flag. */ const struct dwwdt_softc *sc = device_get_softc(dev); device_printf(dev, "Registers dump: \n"); device_printf(dev, " CR: %08x\n", DWWDT_READ4(sc, DWWDT_CR)); device_printf(dev, " CCVR: %08x\n", DWWDT_READ4(sc, DWWDT_CCVR)); device_printf(dev, " CRR: %08x\n", DWWDT_READ4(sc, DWWDT_CRR)); device_printf(dev, " STAT: %08x\n", DWWDT_READ4(sc, DWWDT_STAT)); device_printf(dev, "Clock: %s\n", clk_get_name(sc->sc_clk)); device_printf(dev, " FREQ: %lu\n", sc->sc_clk_freq); } static inline bool dwwdt_started(const struct dwwdt_softc *sc) { /* CR_WDT_E bit can be clear only by full CPU reset. */ return ((DWWDT_READ4(sc, DWWDT_CR) & DWWDT_CR_WDT_EN) != 0); } static void inline dwwdt_start(struct dwwdt_softc *sc) { uint32_t val; /* Enable watchdog */ val = DWWDT_READ4(sc, DWWDT_CR); val |= DWWDT_CR_WDT_EN | DWWDT_CR_RESP_MODE; DWWDT_WRITE4(sc, DWWDT_CR, val); sc->sc_status = DWWDT_RUNNING; } static void inline dwwdt_stop(struct dwwdt_softc *sc) { sc->sc_status = DWWDT_STOPPED; dwwdt_set_timeout(sc, 0x0f); } static void inline dwwdt_set_timeout(const struct dwwdt_softc *sc, int val) { DWWDT_WRITE4(sc, DWWDT_TORR, val); DWWDT_WRITE4(sc, DWWDT_CRR, DWWDT_CRR_KICK); } static void dwwdt_intr(void *arg) { struct dwwdt_softc *sc = arg; KASSERT((DWWDT_READ4(sc, DWWDT_STAT) & DWWDT_STAT_STATUS) != 0, ("Missing interrupt status bit?")); if (dwwdt_prevent_restart || sc->sc_status == DWWDT_STOPPED) { /* * Confirm interrupt reception. Restart counter. * This also emulates stopping watchdog. */ (void)DWWDT_READ4(sc, DWWDT_EOI); return; } if (dwwdt_panic_first) panic("dwwdt pre-timeout interrupt"); } static void dwwdt_event(void *arg, unsigned int cmd, int *error) { struct dwwdt_softc *sc = arg; const int exponent = flsl(sc->sc_clk_freq); int timeout; int val; timeout = cmd & WD_INTERVAL; val = MAX(0, timeout + exponent - DWWDT_EXP_OFFSET + 1); dwwdt_stop(sc); if (cmd == 0 || val > 0x0f) { /* * Set maximum time between interrupts and Leave watchdog * disabled. */ return; } dwwdt_set_timeout(sc, val); dwwdt_start(sc); *error = 0; if (dwwdt_debug_enabled) dwwdt_debug(sc->sc_dev); } static int dwwdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Synopsys Designware watchdog timer"); return (BUS_PROBE_DEFAULT); } static int dwwdt_attach(device_t dev) { struct dwwdt_softc *sc; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_mem_rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_mem_rid, RF_ACTIVE); if (sc->sc_mem_res == NULL) { device_printf(dev, "cannot allocate memory resource\n"); goto err_no_mem; } sc->sc_irq_rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irq_rid, RF_ACTIVE); if (sc->sc_irq_res == NULL) { device_printf(dev, "cannot allocate ireq resource\n"); goto err_no_irq; } sc->sc_intr_cookie = NULL; if (bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE | INTR_TYPE_MISC, NULL, dwwdt_intr, sc, &sc->sc_intr_cookie) != 0) { device_printf(dev, "cannot setup interrupt routine\n"); goto err_no_intr; } if (clk_get_by_ofw_index(dev, 0, 0, &sc->sc_clk) != 0) { device_printf(dev, "cannot find clock\n"); goto err_no_clock; } if (clk_enable(sc->sc_clk) != 0) { device_printf(dev, "cannot enable clock\n"); goto err_no_freq; } if (clk_get_freq(sc->sc_clk, &sc->sc_clk_freq) != 0) { device_printf(dev, "cannot get clock frequency\n"); goto err_no_freq; } if (sc->sc_clk_freq == 0UL) goto err_no_freq; sc->sc_evtag = EVENTHANDLER_REGISTER(watchdog_list, dwwdt_event, sc, 0); sc->sc_status = DWWDT_STOPPED; return (bus_generic_attach(dev)); err_no_freq: clk_release(sc->sc_clk); err_no_clock: bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intr_cookie); err_no_intr: bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, sc->sc_irq_res); err_no_irq: bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); err_no_mem: return (ENXIO); } static int dwwdt_detach(device_t dev) { struct dwwdt_softc *sc = device_get_softc(dev); + int error; if (dwwdt_started(sc)) { /* * Once started it cannot be stopped. Prevent module unload * instead. */ return (EBUSY); } + error = bus_generic_detach(dev); + if (error != 0) + return (error); + EVENTHANDLER_DEREGISTER(watchdog_list, sc->sc_evtag); sc->sc_evtag = NULL; if (sc->sc_clk != NULL) clk_release(sc->sc_clk); if (sc->sc_intr_cookie != NULL) bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intr_cookie); if (sc->sc_irq_res) { bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, sc->sc_irq_res); } if (sc->sc_mem_res) { bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); } - return (bus_generic_detach(dev)); + return (0); } static int dwwdt_shutdown(device_t dev) { struct dwwdt_softc *sc; sc = device_get_softc(dev); /* Prevent restarts during shutdown. */ dwwdt_prevent_restart = true; dwwdt_stop(sc); return (bus_generic_shutdown(dev)); } static device_method_t dwwdt_methods[] = { DEVMETHOD(device_probe, dwwdt_probe), DEVMETHOD(device_attach, dwwdt_attach), DEVMETHOD(device_detach, dwwdt_detach), DEVMETHOD(device_shutdown, dwwdt_shutdown), {0, 0} }; static driver_t dwwdt_driver = { "dwwdt", dwwdt_methods, sizeof(struct dwwdt_softc), }; DRIVER_MODULE(dwwdt, simplebus, dwwdt_driver, NULL, NULL); MODULE_VERSION(dwwdt, 1); OFWBUS_PNP_INFO(compat_data); diff --git a/sys/dev/ena/ena.c b/sys/dev/ena/ena.c index e9d4530e9085..f0b6cec1bb61 100644 --- a/sys/dev/ena/ena.c +++ b/sys/dev/ena/ena.c @@ -1,4256 +1,4260 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "opt_rss.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ena.h" #include "ena_datapath.h" #include "ena_rss.h" #include "ena_sysctl.h" #ifdef DEV_NETMAP #include "ena_netmap.h" #endif /* DEV_NETMAP */ /********************************************************* * Function prototypes *********************************************************/ static int ena_probe(device_t); static void ena_intr_msix_mgmnt(void *); static void ena_free_pci_resources(struct ena_adapter *); static int ena_change_mtu(if_t, int); static inline void ena_alloc_counters(counter_u64_t *, int); static inline void ena_free_counters(counter_u64_t *, int); static inline void ena_reset_counters(counter_u64_t *, int); static void ena_init_io_rings_common(struct ena_adapter *, struct ena_ring *, uint16_t); static void ena_init_io_rings_basic(struct ena_adapter *); static void ena_init_io_rings_advanced(struct ena_adapter *); static void ena_init_io_rings(struct ena_adapter *); static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int); static void ena_free_all_io_rings_resources(struct ena_adapter *); static int ena_setup_tx_dma_tag(struct ena_adapter *); static int ena_free_tx_dma_tag(struct ena_adapter *); static int ena_setup_rx_dma_tag(struct ena_adapter *); static int ena_free_rx_dma_tag(struct ena_adapter *); static void ena_release_all_tx_dmamap(struct ena_ring *); static int ena_setup_tx_resources(struct ena_adapter *, int); static void ena_free_tx_resources(struct ena_adapter *, int); static int ena_setup_all_tx_resources(struct ena_adapter *); static void ena_free_all_tx_resources(struct ena_adapter *); static int ena_setup_rx_resources(struct ena_adapter *, unsigned int); static void ena_free_rx_resources(struct ena_adapter *, unsigned int); static int ena_setup_all_rx_resources(struct ena_adapter *); static void ena_free_all_rx_resources(struct ena_adapter *); static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *, struct ena_rx_buffer *); static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *, struct ena_rx_buffer *); static void ena_free_rx_bufs(struct ena_adapter *, unsigned int); static void ena_refill_all_rx_bufs(struct ena_adapter *); static void ena_free_all_rx_bufs(struct ena_adapter *); static void ena_free_tx_bufs(struct ena_adapter *, unsigned int); static void ena_free_all_tx_bufs(struct ena_adapter *); static void ena_destroy_all_tx_queues(struct ena_adapter *); static void ena_destroy_all_rx_queues(struct ena_adapter *); static void ena_destroy_all_io_queues(struct ena_adapter *); static int ena_create_io_queues(struct ena_adapter *); static int ena_handle_msix(void *); static int ena_enable_msix(struct ena_adapter *); static void ena_setup_mgmnt_intr(struct ena_adapter *); static int ena_setup_io_intr(struct ena_adapter *); static int ena_request_mgmnt_irq(struct ena_adapter *); static int ena_request_io_irq(struct ena_adapter *); static void ena_free_mgmnt_irq(struct ena_adapter *); static void ena_free_io_irq(struct ena_adapter *); static void ena_free_irqs(struct ena_adapter *); static void ena_disable_msix(struct ena_adapter *); static void ena_unmask_all_io_irqs(struct ena_adapter *); static int ena_up_complete(struct ena_adapter *); static uint64_t ena_get_counter(if_t, ift_counter); static int ena_media_change(if_t); static void ena_media_status(if_t, struct ifmediareq *); static void ena_init(void *); static int ena_ioctl(if_t, u_long, caddr_t); static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *); static void ena_update_host_info(struct ena_admin_host_info *, if_t); static void ena_update_hwassist(struct ena_adapter *); static void ena_setup_ifnet(device_t, struct ena_adapter *, struct ena_com_dev_get_features_ctx *); static int ena_enable_wc(device_t, struct resource *); static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *, struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *); static int ena_map_llq_mem_bar(device_t, struct ena_com_dev *); static uint32_t ena_calc_max_io_queue_num(device_t, struct ena_com_dev *, struct ena_com_dev_get_features_ctx *); static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *, struct ena_adapter *); static void ena_config_host_info(struct ena_com_dev *, device_t); static int ena_attach(device_t); static int ena_detach(device_t); static int ena_device_init(struct ena_adapter *, device_t, struct ena_com_dev_get_features_ctx *, int *); static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *); static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *); static void unimplemented_aenq_handler(void *, struct ena_admin_aenq_entry *); static int ena_copy_eni_metrics(struct ena_adapter *); static int ena_copy_srd_metrics(struct ena_adapter *); static int ena_copy_customer_metrics(struct ena_adapter *); static void ena_timer_service(void *); static enum ena_regs_reset_reason_types check_cdesc_in_tx_cq(struct ena_adapter *, struct ena_ring *); #ifdef DEV_NETMAP static int ena_reinit_netmap(struct ena_adapter *adapter); #endif static char ena_version[] = ENA_DEVICE_NAME ENA_DRV_MODULE_NAME " v" ENA_DRV_MODULE_VERSION; static ena_vendor_info_t ena_vendor_info_array[] = { { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0 }, { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF_RSERV0, 0 }, { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0 }, { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF_RSERV0, 0 }, /* Last entry */ { 0, 0, 0 } }; struct sx ena_global_lock; /* * Contains pointers to event handlers, e.g. link state chage. */ static struct ena_aenq_handlers aenq_handlers; void ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; *(bus_addr_t *)arg = segs[0].ds_addr; } int ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma, int mapflags, bus_size_t alignment, int domain) { struct ena_adapter *adapter = device_get_softc(dmadev); device_t pdev = adapter->pdev; uint32_t maxsize; uint64_t dma_space_addr; int error; maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE; dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width); if (unlikely(dma_space_addr == 0)) dma_space_addr = BUS_SPACE_MAXADDR; error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */ alignment, 0, /* alignment, bounds */ dma_space_addr, /* lowaddr of exclusion window */ BUS_SPACE_MAXADDR, /* highaddr of exclusion window */ NULL, NULL, /* filter, filterarg */ maxsize, /* maxsize */ 1, /* nsegments */ maxsize, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dma->tag); if (unlikely(error != 0)) { ena_log(pdev, ERR, "bus_dma_tag_create failed: %d\n", error); goto fail_tag; } error = bus_dma_tag_set_domain(dma->tag, domain); if (unlikely(error != 0)) { ena_log(pdev, ERR, "bus_dma_tag_set_domain failed: %d\n", error); goto fail_map_create; } error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map); if (unlikely(error != 0)) { ena_log(pdev, ERR, "bus_dmamem_alloc(%ju) failed: %d\n", (uintmax_t)size, error); goto fail_map_create; } dma->paddr = 0; error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, ena_dmamap_callback, &dma->paddr, mapflags); if (unlikely((error != 0) || (dma->paddr == 0))) { ena_log(pdev, ERR, "bus_dmamap_load failed: %d\n", error); goto fail_map_load; } bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); fail_map_load: bus_dmamem_free(dma->tag, dma->vaddr, dma->map); fail_map_create: bus_dma_tag_destroy(dma->tag); fail_tag: dma->tag = NULL; dma->vaddr = NULL; dma->paddr = 0; return (error); } static void ena_free_pci_resources(struct ena_adapter *adapter) { device_t pdev = adapter->pdev; if (adapter->memory != NULL) { bus_release_resource(pdev, SYS_RES_MEMORY, PCIR_BAR(ENA_MEM_BAR), adapter->memory); } if (adapter->registers != NULL) { bus_release_resource(pdev, SYS_RES_MEMORY, PCIR_BAR(ENA_REG_BAR), adapter->registers); } if (adapter->msix != NULL) { bus_release_resource(pdev, SYS_RES_MEMORY, adapter->msix_rid, adapter->msix); } } static int ena_probe(device_t dev) { ena_vendor_info_t *ent; uint16_t pci_vendor_id = 0; uint16_t pci_device_id = 0; pci_vendor_id = pci_get_vendor(dev); pci_device_id = pci_get_device(dev); ent = ena_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id)) { ena_log_raw(DBG, "vendor=%x device=%x\n", pci_vendor_id, pci_device_id); device_set_desc(dev, ENA_DEVICE_DESC); return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } static int ena_change_mtu(if_t ifp, int new_mtu) { struct ena_adapter *adapter = if_getsoftc(ifp); device_t pdev = adapter->pdev; int rc; if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) { ena_log(pdev, ERR, "Invalid MTU setting. new_mtu: %d max mtu: %d min mtu: %d\n", new_mtu, adapter->max_mtu, ENA_MIN_MTU); return (EINVAL); } rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); if (likely(rc == 0)) { ena_log(pdev, DBG, "set MTU to %d\n", new_mtu); if_setmtu(ifp, new_mtu); } else { ena_log(pdev, ERR, "Failed to set MTU to %d\n", new_mtu); } return (rc); } static inline void ena_alloc_counters(counter_u64_t *begin, int size) { counter_u64_t *end = (counter_u64_t *)((char *)begin + size); for (; begin < end; ++begin) *begin = counter_u64_alloc(M_WAITOK); } static inline void ena_free_counters(counter_u64_t *begin, int size) { counter_u64_t *end = (counter_u64_t *)((char *)begin + size); for (; begin < end; ++begin) counter_u64_free(*begin); } static inline void ena_reset_counters(counter_u64_t *begin, int size) { counter_u64_t *end = (counter_u64_t *)((char *)begin + size); for (; begin < end; ++begin) counter_u64_zero(*begin); } static void ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring, uint16_t qid) { ring->qid = qid; ring->adapter = adapter; ring->ena_dev = adapter->ena_dev; atomic_store_8(&ring->first_interrupt, 0); ring->no_interrupt_event_cnt = 0; } static void ena_init_io_rings_basic(struct ena_adapter *adapter) { struct ena_com_dev *ena_dev; struct ena_ring *txr, *rxr; struct ena_que *que; int i; ena_dev = adapter->ena_dev; for (i = 0; i < adapter->num_io_queues; i++) { txr = &adapter->tx_ring[i]; rxr = &adapter->rx_ring[i]; /* TX/RX common ring state */ ena_init_io_rings_common(adapter, txr, i); ena_init_io_rings_common(adapter, rxr, i); /* TX specific ring state */ txr->tx_max_header_size = ena_dev->tx_max_header_size; txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; que = &adapter->que[i]; que->adapter = adapter; que->id = i; que->tx_ring = txr; que->rx_ring = rxr; txr->que = que; rxr->que = que; rxr->empty_rx_queue = 0; rxr->rx_mbuf_sz = ena_mbuf_sz; } } static void ena_init_io_rings_advanced(struct ena_adapter *adapter) { struct ena_ring *txr, *rxr; int i; for (i = 0; i < adapter->num_io_queues; i++) { txr = &adapter->tx_ring[i]; rxr = &adapter->rx_ring[i]; /* Allocate a buf ring */ txr->buf_ring_size = adapter->buf_ring_size; txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, M_WAITOK, &txr->ring_mtx); /* Allocate Tx statistics. */ ena_alloc_counters((counter_u64_t *)&txr->tx_stats, sizeof(txr->tx_stats)); txr->tx_last_cleanup_ticks = ticks; /* Allocate Rx statistics. */ ena_alloc_counters((counter_u64_t *)&rxr->rx_stats, sizeof(rxr->rx_stats)); /* Initialize locks */ snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)", device_get_nameunit(adapter->pdev), i); snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)", device_get_nameunit(adapter->pdev), i); mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF); } } static void ena_init_io_rings(struct ena_adapter *adapter) { /* * IO rings initialization can be divided into the 2 steps: * 1. Initialize variables and fields with initial values and copy * them from adapter/ena_dev (basic) * 2. Allocate mutex, counters and buf_ring (advanced) */ ena_init_io_rings_basic(adapter); ena_init_io_rings_advanced(adapter); } static void ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid) { struct ena_ring *txr = &adapter->tx_ring[qid]; struct ena_ring *rxr = &adapter->rx_ring[qid]; ena_free_counters((counter_u64_t *)&txr->tx_stats, sizeof(txr->tx_stats)); ena_free_counters((counter_u64_t *)&rxr->rx_stats, sizeof(rxr->rx_stats)); ENA_RING_MTX_LOCK(txr); drbr_free(txr->br, M_DEVBUF); ENA_RING_MTX_UNLOCK(txr); mtx_destroy(&txr->ring_mtx); } static void ena_free_all_io_rings_resources(struct ena_adapter *adapter) { int i; for (i = 0; i < adapter->num_io_queues; i++) ena_free_io_ring_resources(adapter, i); } static int ena_setup_tx_dma_tag(struct ena_adapter *adapter) { int ret; /* Create DMA tag for Tx buffers */ ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), 1, 0, /* alignment, bounds */ ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ BUS_SPACE_MAXADDR, /* highaddr of excl window */ NULL, NULL, /* filter, filterarg */ ENA_TSO_MAXSIZE, /* maxsize */ adapter->max_tx_sgl_size - 1, /* nsegments */ ENA_TSO_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &adapter->tx_buf_tag); return (ret); } static int ena_free_tx_dma_tag(struct ena_adapter *adapter) { int ret; ret = bus_dma_tag_destroy(adapter->tx_buf_tag); if (likely(ret == 0)) adapter->tx_buf_tag = NULL; return (ret); } static int ena_setup_rx_dma_tag(struct ena_adapter *adapter) { int ret; /* Create DMA tag for Rx buffers*/ ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */ 1, 0, /* alignment, bounds */ ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ BUS_SPACE_MAXADDR, /* highaddr of excl window */ NULL, NULL, /* filter, filterarg */ ena_mbuf_sz, /* maxsize */ adapter->max_rx_sgl_size, /* nsegments */ ena_mbuf_sz, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &adapter->rx_buf_tag); return (ret); } static int ena_free_rx_dma_tag(struct ena_adapter *adapter) { int ret; ret = bus_dma_tag_destroy(adapter->rx_buf_tag); if (likely(ret == 0)) adapter->rx_buf_tag = NULL; return (ret); } int validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id, int tx_req_id_rc) { struct ena_adapter *adapter = tx_ring->adapter; enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; if (unlikely(tx_req_id_rc != 0)) { if (tx_req_id_rc == ENA_COM_FAULT) { reset_reason = ENA_REGS_RESET_TX_DESCRIPTOR_MALFORMED; ena_log(adapter->pdev, ERR, "TX descriptor malformed. req_id %hu qid %hu\n", req_id, tx_ring->qid); } else if (tx_req_id_rc == ENA_COM_INVAL) { ena_log_nm(adapter->pdev, WARN, "Invalid req_id %hu in qid %hu\n", req_id, tx_ring->qid); counter_u64_add(tx_ring->tx_stats.bad_req_id, 1); } ena_trigger_reset(adapter, reset_reason); return (EFAULT); } return (0); } static void ena_release_all_tx_dmamap(struct ena_ring *tx_ring) { struct ena_adapter *adapter = tx_ring->adapter; struct ena_tx_buffer *tx_info; bus_dma_tag_t tx_tag = adapter->tx_buf_tag; int i; #ifdef DEV_NETMAP struct ena_netmap_tx_info *nm_info; int j; #endif /* DEV_NETMAP */ for (i = 0; i < tx_ring->ring_size; ++i) { tx_info = &tx_ring->tx_buffer_info[i]; #ifdef DEV_NETMAP if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) { nm_info = &tx_info->nm_info; for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) { if (nm_info->map_seg[j] != NULL) { bus_dmamap_destroy(tx_tag, nm_info->map_seg[j]); nm_info->map_seg[j] = NULL; } } } #endif /* DEV_NETMAP */ if (tx_info->dmamap != NULL) { bus_dmamap_destroy(tx_tag, tx_info->dmamap); tx_info->dmamap = NULL; } } } /** * ena_setup_tx_resources - allocate Tx resources (Descriptors) * @adapter: network interface device structure * @qid: queue index * * Returns 0 on success, otherwise on failure. **/ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) { device_t pdev = adapter->pdev; char thread_name[MAXCOMLEN + 1]; struct ena_que *que = &adapter->que[qid]; struct ena_ring *tx_ring = que->tx_ring; cpuset_t *cpu_mask = NULL; int size, i, err; #ifdef DEV_NETMAP bus_dmamap_t *map; int j; ena_netmap_reset_tx_ring(adapter, qid); #endif /* DEV_NETMAP */ size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); if (unlikely(tx_ring->tx_buffer_info == NULL)) return (ENOMEM); size = sizeof(uint16_t) * tx_ring->ring_size; tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); if (unlikely(tx_ring->free_tx_ids == NULL)) goto err_buf_info_free; size = tx_ring->tx_max_header_size; tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); if (unlikely(tx_ring->push_buf_intermediate_buf == NULL)) goto err_tx_ids_free; /* Req id stack for TX OOO completions */ for (i = 0; i < tx_ring->ring_size; i++) tx_ring->free_tx_ids[i] = i; /* Reset TX statistics. */ ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats, sizeof(tx_ring->tx_stats)); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; tx_ring->acum_pkts = 0; /* Make sure that drbr is empty */ ENA_RING_MTX_LOCK(tx_ring); drbr_flush(adapter->ifp, tx_ring->br); ENA_RING_MTX_UNLOCK(tx_ring); /* ... and create the buffer DMA maps */ for (i = 0; i < tx_ring->ring_size; i++) { err = bus_dmamap_create(adapter->tx_buf_tag, 0, &tx_ring->tx_buffer_info[i].dmamap); if (unlikely(err != 0)) { ena_log(pdev, ERR, "Unable to create Tx DMA map for buffer %d\n", i); goto err_map_release; } #ifdef DEV_NETMAP if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) { map = tx_ring->tx_buffer_info[i].nm_info.map_seg; for (j = 0; j < ENA_PKT_MAX_BUFS; j++) { err = bus_dmamap_create(adapter->tx_buf_tag, 0, &map[j]); if (unlikely(err != 0)) { ena_log(pdev, ERR, "Unable to create Tx DMA for buffer %d %d\n", i, j); goto err_map_release; } } } #endif /* DEV_NETMAP */ } /* Allocate taskqueues */ TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring); tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT, taskqueue_thread_enqueue, &tx_ring->enqueue_tq); if (unlikely(tx_ring->enqueue_tq == NULL)) { ena_log(pdev, ERR, "Unable to create taskqueue for enqueue task\n"); i = tx_ring->ring_size; goto err_map_release; } tx_ring->running = true; #ifdef RSS cpu_mask = &que->cpu_mask; snprintf(thread_name, sizeof(thread_name), "%s txeq %d", device_get_nameunit(adapter->pdev), que->cpu); #else snprintf(thread_name, sizeof(thread_name), "%s txeq %d", device_get_nameunit(adapter->pdev), que->id); #endif taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, PI_NET, cpu_mask, "%s", thread_name); return (0); err_map_release: ena_release_all_tx_dmamap(tx_ring); err_tx_ids_free: free(tx_ring->free_tx_ids, M_DEVBUF); tx_ring->free_tx_ids = NULL; err_buf_info_free: free(tx_ring->tx_buffer_info, M_DEVBUF); tx_ring->tx_buffer_info = NULL; return (ENOMEM); } /** * ena_free_tx_resources - Free Tx Resources per Queue * @adapter: network interface device structure * @qid: queue index * * Free all transmit software resources **/ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) { struct ena_ring *tx_ring = &adapter->tx_ring[qid]; #ifdef DEV_NETMAP struct ena_netmap_tx_info *nm_info; int j; #endif /* DEV_NETMAP */ while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL)) taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task); taskqueue_free(tx_ring->enqueue_tq); ENA_RING_MTX_LOCK(tx_ring); /* Flush buffer ring, */ drbr_flush(adapter->ifp, tx_ring->br); /* Free buffer DMA maps, */ for (int i = 0; i < tx_ring->ring_size; i++) { bus_dmamap_sync(adapter->tx_buf_tag, tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(adapter->tx_buf_tag, tx_ring->tx_buffer_info[i].dmamap); bus_dmamap_destroy(adapter->tx_buf_tag, tx_ring->tx_buffer_info[i].dmamap); #ifdef DEV_NETMAP if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) { nm_info = &tx_ring->tx_buffer_info[i].nm_info; for (j = 0; j < ENA_PKT_MAX_BUFS; j++) { if (nm_info->socket_buf_idx[j] != 0) { bus_dmamap_sync(adapter->tx_buf_tag, nm_info->map_seg[j], BUS_DMASYNC_POSTWRITE); ena_netmap_unload(adapter, nm_info->map_seg[j]); } bus_dmamap_destroy(adapter->tx_buf_tag, nm_info->map_seg[j]); nm_info->socket_buf_idx[j] = 0; } } #endif /* DEV_NETMAP */ m_freem(tx_ring->tx_buffer_info[i].mbuf); tx_ring->tx_buffer_info[i].mbuf = NULL; } ENA_RING_MTX_UNLOCK(tx_ring); /* And free allocated memory. */ free(tx_ring->tx_buffer_info, M_DEVBUF); tx_ring->tx_buffer_info = NULL; free(tx_ring->free_tx_ids, M_DEVBUF); tx_ring->free_tx_ids = NULL; free(tx_ring->push_buf_intermediate_buf, M_DEVBUF); tx_ring->push_buf_intermediate_buf = NULL; } /** * ena_setup_all_tx_resources - allocate all queues Tx resources * @adapter: network interface device structure * * Returns 0 on success, otherwise on failure. **/ static int ena_setup_all_tx_resources(struct ena_adapter *adapter) { int i, rc; for (i = 0; i < adapter->num_io_queues; i++) { rc = ena_setup_tx_resources(adapter, i); if (rc != 0) { ena_log(adapter->pdev, ERR, "Allocation for Tx Queue %u failed\n", i); goto err_setup_tx; } } return (0); err_setup_tx: /* Rewind the index freeing the rings as we go */ while (i--) ena_free_tx_resources(adapter, i); return (rc); } /** * ena_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: network interface device structure * * Free all transmit software resources **/ static void ena_free_all_tx_resources(struct ena_adapter *adapter) { int i; for (i = 0; i < adapter->num_io_queues; i++) ena_free_tx_resources(adapter, i); } /** * ena_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: network interface device structure * @qid: queue index * * Returns 0 on success, otherwise on failure. **/ static int ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid) { device_t pdev = adapter->pdev; struct ena_que *que = &adapter->que[qid]; struct ena_ring *rx_ring = que->rx_ring; int size, err, i; size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size; #ifdef DEV_NETMAP ena_netmap_reset_rx_ring(adapter, qid); rx_ring->initialized = false; #endif /* DEV_NETMAP */ /* * Alloc extra element so in rx path * we can always prefetch rx_info + 1 */ size += sizeof(struct ena_rx_buffer); rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO); size = sizeof(uint16_t) * rx_ring->ring_size; rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK); for (i = 0; i < rx_ring->ring_size; i++) rx_ring->free_rx_ids[i] = i; /* Reset RX statistics. */ ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats, sizeof(rx_ring->rx_stats)); rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; /* ... and create the buffer DMA maps */ for (i = 0; i < rx_ring->ring_size; i++) { err = bus_dmamap_create(adapter->rx_buf_tag, 0, &(rx_ring->rx_buffer_info[i].map)); if (err != 0) { ena_log(pdev, ERR, "Unable to create Rx DMA map for buffer %d\n", i); goto err_buf_info_unmap; } } /* Create LRO for the ring */ if ((if_getcapenable(adapter->ifp) & IFCAP_LRO) != 0) { int err = tcp_lro_init(&rx_ring->lro); if (err != 0) { ena_log(pdev, ERR, "LRO[%d] Initialization failed!\n", qid); } else { ena_log(pdev, DBG, "RX Soft LRO[%d] Initialized\n", qid); rx_ring->lro.ifp = adapter->ifp; } } return (0); err_buf_info_unmap: while (i--) { bus_dmamap_destroy(adapter->rx_buf_tag, rx_ring->rx_buffer_info[i].map); } free(rx_ring->free_rx_ids, M_DEVBUF); rx_ring->free_rx_ids = NULL; free(rx_ring->rx_buffer_info, M_DEVBUF); rx_ring->rx_buffer_info = NULL; return (ENOMEM); } /** * ena_free_rx_resources - Free Rx Resources * @adapter: network interface device structure * @qid: queue index * * Free all receive software resources **/ static void ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid) { struct ena_ring *rx_ring = &adapter->rx_ring[qid]; /* Free buffer DMA maps, */ for (int i = 0; i < rx_ring->ring_size; i++) { bus_dmamap_sync(adapter->rx_buf_tag, rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD); m_freem(rx_ring->rx_buffer_info[i].mbuf); rx_ring->rx_buffer_info[i].mbuf = NULL; bus_dmamap_unload(adapter->rx_buf_tag, rx_ring->rx_buffer_info[i].map); bus_dmamap_destroy(adapter->rx_buf_tag, rx_ring->rx_buffer_info[i].map); } /* free LRO resources, */ tcp_lro_free(&rx_ring->lro); /* free allocated memory */ free(rx_ring->rx_buffer_info, M_DEVBUF); rx_ring->rx_buffer_info = NULL; free(rx_ring->free_rx_ids, M_DEVBUF); rx_ring->free_rx_ids = NULL; } /** * ena_setup_all_rx_resources - allocate all queues Rx resources * @adapter: network interface device structure * * Returns 0 on success, otherwise on failure. **/ static int ena_setup_all_rx_resources(struct ena_adapter *adapter) { int i, rc = 0; for (i = 0; i < adapter->num_io_queues; i++) { rc = ena_setup_rx_resources(adapter, i); if (rc != 0) { ena_log(adapter->pdev, ERR, "Allocation for Rx Queue %u failed\n", i); goto err_setup_rx; } } return (0); err_setup_rx: /* rewind the index freeing the rings as we go */ while (i--) ena_free_rx_resources(adapter, i); return (rc); } /** * ena_free_all_rx_resources - Free Rx resources for all queues * @adapter: network interface device structure * * Free all receive software resources **/ static void ena_free_all_rx_resources(struct ena_adapter *adapter) { int i; for (i = 0; i < adapter->num_io_queues; i++) ena_free_rx_resources(adapter, i); } static inline int ena_alloc_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) { device_t pdev = adapter->pdev; struct ena_com_buf *ena_buf; bus_dma_segment_t segs[1]; int nsegs, error; int mlen; /* if previous allocated frag is not used */ if (unlikely(rx_info->mbuf != NULL)) return (0); /* Get mbuf using UMA allocator */ rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_ring->rx_mbuf_sz); if (unlikely(rx_info->mbuf == NULL)) { counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1); rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (unlikely(rx_info->mbuf == NULL)) { counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); return (ENOMEM); } mlen = MCLBYTES; } else { mlen = rx_ring->rx_mbuf_sz; } /* Set mbuf length*/ rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen; /* Map packets for DMA */ ena_log(pdev, DBG, "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n", adapter->rx_buf_tag, rx_info->mbuf, rx_info->mbuf->m_len); error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map, rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); if (unlikely((error != 0) || (nsegs != 1))) { ena_log(pdev, WARN, "failed to map mbuf, error: %d, nsegs: %d\n", error, nsegs); counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1); goto exit; } bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); ena_buf = &rx_info->ena_buf; ena_buf->paddr = segs[0].ds_addr; ena_buf->len = mlen; ena_log(pdev, DBG, "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n", rx_info->mbuf, rx_info, ena_buf->len, (uintmax_t)ena_buf->paddr); return (0); exit: m_freem(rx_info->mbuf); rx_info->mbuf = NULL; return (EFAULT); } static void ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) { if (rx_info->mbuf == NULL) { ena_log(adapter->pdev, WARN, "Trying to free unallocated buffer\n"); return; } bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map); m_freem(rx_info->mbuf); rx_info->mbuf = NULL; } /** * ena_refill_rx_bufs - Refills ring with descriptors * @rx_ring: the ring which we want to feed with free descriptors * @num: number of descriptors to refill * Refills the ring with newly allocated DMA-mapped mbufs for receiving **/ int ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num) { struct ena_adapter *adapter = rx_ring->adapter; device_t pdev = adapter->pdev; uint16_t next_to_use, req_id; uint32_t i; int rc; ena_log_io(adapter->pdev, DBG, "refill qid: %d\n", rx_ring->qid); next_to_use = rx_ring->next_to_use; for (i = 0; i < num; i++) { struct ena_rx_buffer *rx_info; ena_log_io(pdev, DBG, "RX buffer - next to use: %d\n", next_to_use); req_id = rx_ring->free_rx_ids[next_to_use]; rx_info = &rx_ring->rx_buffer_info[req_id]; #ifdef DEV_NETMAP if (ena_rx_ring_in_netmap(adapter, rx_ring->qid)) rc = ena_netmap_alloc_rx_slot(adapter, rx_ring, rx_info); else #endif /* DEV_NETMAP */ rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info); if (unlikely(rc != 0)) { ena_log_io(pdev, WARN, "failed to alloc buffer for rx queue %d\n", rx_ring->qid); break; } rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, &rx_info->ena_buf, req_id); if (unlikely(rc != 0)) { ena_log_io(pdev, WARN, "failed to add buffer for rx queue %d\n", rx_ring->qid); break; } next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, rx_ring->ring_size); } if (unlikely(i < num)) { counter_u64_add(rx_ring->rx_stats.refil_partial, 1); ena_log_io(pdev, WARN, "refilled rx qid %d with only %d mbufs (from %d)\n", rx_ring->qid, i, num); } if (likely(i != 0)) ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); rx_ring->next_to_use = next_to_use; return (i); } #ifdef DEV_NETMAP static int ena_reinit_netmap(struct ena_adapter *adapter) { int rc; netmap_detach(adapter->ifp); rc = ena_netmap_attach(adapter); if (rc != 0) ena_log(adapter->pdev, ERR, "netmap attach failed: %d\n", rc); return rc; } #endif /* DEV_NETMAP */ int ena_update_buf_ring_size(struct ena_adapter *adapter, uint32_t new_buf_ring_size) { uint32_t old_buf_ring_size; int rc = 0; bool dev_was_up; old_buf_ring_size = adapter->buf_ring_size; adapter->buf_ring_size = new_buf_ring_size; dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); ena_down(adapter); /* Reconfigure buf ring for all Tx rings. */ ena_free_all_io_rings_resources(adapter); ena_init_io_rings_advanced(adapter); #ifdef DEV_NETMAP rc = ena_reinit_netmap(adapter); if (rc != 0) return rc; #endif /* DEV_NETMAP */ if (dev_was_up) { /* * If ena_up() fails, it's not because of recent buf_ring size * changes. Because of that, we just want to revert old drbr * value and trigger the reset because something else had to * go wrong. */ rc = ena_up(adapter); if (unlikely(rc != 0)) { ena_log(adapter->pdev, ERR, "Failed to configure device after setting new drbr size: %u. Reverting old value: %u and triggering the reset\n", new_buf_ring_size, old_buf_ring_size); /* Revert old size and trigger the reset */ adapter->buf_ring_size = old_buf_ring_size; ena_free_all_io_rings_resources(adapter); ena_init_io_rings_advanced(adapter); #ifdef DEV_NETMAP rc = ena_reinit_netmap(adapter); if (rc != 0) return rc; #endif /* DEV_NETMAP */ ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER); } } return (rc); } int ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size, uint32_t new_rx_size) { uint32_t old_tx_size, old_rx_size; int rc = 0; bool dev_was_up; old_tx_size = adapter->requested_tx_ring_size; old_rx_size = adapter->requested_rx_ring_size; adapter->requested_tx_ring_size = new_tx_size; adapter->requested_rx_ring_size = new_rx_size; dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); ena_down(adapter); /* Configure queues with new size. */ ena_init_io_rings_basic(adapter); #ifdef DEV_NETMAP rc = ena_reinit_netmap(adapter); if (rc != 0) return rc; #endif /* DEV_NETMAP */ if (dev_was_up) { rc = ena_up(adapter); if (unlikely(rc != 0)) { ena_log(adapter->pdev, ERR, "Failed to configure device with the new sizes - Tx: %u Rx: %u. Reverting old values - Tx: %u Rx: %u\n", new_tx_size, new_rx_size, old_tx_size, old_rx_size); /* Revert old size. */ adapter->requested_tx_ring_size = old_tx_size; adapter->requested_rx_ring_size = old_rx_size; ena_init_io_rings_basic(adapter); #ifdef DEV_NETMAP rc = ena_reinit_netmap(adapter); if (rc != 0) return rc; #endif /* DEV_NETMAP */ /* And try again. */ rc = ena_up(adapter); if (unlikely(rc != 0)) { ena_log(adapter->pdev, ERR, "Failed to revert old queue sizes. Triggering device reset.\n"); /* * If we've failed again, something had to go * wrong. After reset, the device should try to * go up */ ENA_FLAG_SET_ATOMIC( ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER); } } } return (rc); } static void ena_update_io_rings(struct ena_adapter *adapter, uint32_t num) { ena_free_all_io_rings_resources(adapter); /* Force indirection table to be reinitialized */ ena_com_rss_destroy(adapter->ena_dev); adapter->num_io_queues = num; ena_init_io_rings(adapter); } int ena_update_base_cpu(struct ena_adapter *adapter, int new_num) { int old_num; int rc = 0; bool dev_was_up; dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); old_num = adapter->irq_cpu_base; ena_down(adapter); adapter->irq_cpu_base = new_num; if (dev_was_up) { rc = ena_up(adapter); if (unlikely(rc != 0)) { ena_log(adapter->pdev, ERR, "Failed to configure device %d IRQ base CPU. " "Reverting to previous value: %d\n", new_num, old_num); adapter->irq_cpu_base = old_num; rc = ena_up(adapter); if (unlikely(rc != 0)) { ena_log(adapter->pdev, ERR, "Failed to revert to previous setup." "Triggering device reset.\n"); ENA_FLAG_SET_ATOMIC( ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER); } } } return (rc); } int ena_update_cpu_stride(struct ena_adapter *adapter, uint32_t new_num) { uint32_t old_num; int rc = 0; bool dev_was_up; dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); old_num = adapter->irq_cpu_stride; ena_down(adapter); adapter->irq_cpu_stride = new_num; if (dev_was_up) { rc = ena_up(adapter); if (unlikely(rc != 0)) { ena_log(adapter->pdev, ERR, "Failed to configure device %d IRQ CPU stride. " "Reverting to previous value: %d\n", new_num, old_num); adapter->irq_cpu_stride = old_num; rc = ena_up(adapter); if (unlikely(rc != 0)) { ena_log(adapter->pdev, ERR, "Failed to revert to previous setup." "Triggering device reset.\n"); ENA_FLAG_SET_ATOMIC( ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER); } } } return (rc); } /* Caller should sanitize new_num */ int ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num) { uint32_t old_num; int rc = 0; bool dev_was_up; dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); old_num = adapter->num_io_queues; ena_down(adapter); ena_update_io_rings(adapter, new_num); #ifdef DEV_NETMAP rc = ena_reinit_netmap(adapter); if (rc != 0) return rc; #endif /* DEV_NETMAP */ if (dev_was_up) { rc = ena_up(adapter); if (unlikely(rc != 0)) { ena_log(adapter->pdev, ERR, "Failed to configure device with %u IO queues. " "Reverting to previous value: %u\n", new_num, old_num); ena_update_io_rings(adapter, old_num); #ifdef DEV_NETMAP rc = ena_reinit_netmap(adapter); if (rc != 0) return rc; #endif /* DEV_NETMAP */ rc = ena_up(adapter); if (unlikely(rc != 0)) { ena_log(adapter->pdev, ERR, "Failed to revert to previous setup IO " "queues. Triggering device reset.\n"); ENA_FLAG_SET_ATOMIC( ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER); } } } return (rc); } static void ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid) { struct ena_ring *rx_ring = &adapter->rx_ring[qid]; unsigned int i; for (i = 0; i < rx_ring->ring_size; i++) { struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; if (rx_info->mbuf != NULL) ena_free_rx_mbuf(adapter, rx_ring, rx_info); #ifdef DEV_NETMAP if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) && (if_getcapenable(adapter->ifp) & IFCAP_NETMAP)) { if (rx_info->netmap_buf_idx != 0) ena_netmap_free_rx_slot(adapter, rx_ring, rx_info); } #endif /* DEV_NETMAP */ } } /** * ena_refill_all_rx_bufs - allocate all queues Rx buffers * @adapter: network interface device structure * */ static void ena_refill_all_rx_bufs(struct ena_adapter *adapter) { struct ena_ring *rx_ring; int i, rc, bufs_num; for (i = 0; i < adapter->num_io_queues; i++) { rx_ring = &adapter->rx_ring[i]; bufs_num = rx_ring->ring_size - 1; rc = ena_refill_rx_bufs(rx_ring, bufs_num); if (unlikely(rc != bufs_num)) ena_log_io(adapter->pdev, WARN, "refilling Queue %d failed. " "Allocated %d buffers from: %d\n", i, rc, bufs_num); #ifdef DEV_NETMAP rx_ring->initialized = true; #endif /* DEV_NETMAP */ } } static void ena_free_all_rx_bufs(struct ena_adapter *adapter) { int i; for (i = 0; i < adapter->num_io_queues; i++) ena_free_rx_bufs(adapter, i); } /** * ena_free_tx_bufs - Free Tx Buffers per Queue * @adapter: network interface device structure * @qid: queue index **/ static void ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid) { bool print_once = true; struct ena_ring *tx_ring = &adapter->tx_ring[qid]; ENA_RING_MTX_LOCK(tx_ring); for (int i = 0; i < tx_ring->ring_size; i++) { struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; if (tx_info->mbuf == NULL) continue; if (print_once) { ena_log(adapter->pdev, WARN, "free uncompleted tx mbuf qid %d idx 0x%x\n", qid, i); print_once = false; } else { ena_log(adapter->pdev, DBG, "free uncompleted tx mbuf qid %d idx 0x%x\n", qid, i); } bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap); m_free(tx_info->mbuf); tx_info->mbuf = NULL; } ENA_RING_MTX_UNLOCK(tx_ring); } static void ena_free_all_tx_bufs(struct ena_adapter *adapter) { for (int i = 0; i < adapter->num_io_queues; i++) ena_free_tx_bufs(adapter, i); } static void ena_destroy_all_tx_queues(struct ena_adapter *adapter) { uint16_t ena_qid; int i; for (i = 0; i < adapter->num_io_queues; i++) { ena_qid = ENA_IO_TXQ_IDX(i); ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); } } static void ena_destroy_all_rx_queues(struct ena_adapter *adapter) { uint16_t ena_qid; int i; for (i = 0; i < adapter->num_io_queues; i++) { ena_qid = ENA_IO_RXQ_IDX(i); ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); } } static void ena_destroy_all_io_queues(struct ena_adapter *adapter) { struct ena_que *queue; int i; for (i = 0; i < adapter->num_io_queues; i++) { queue = &adapter->que[i]; while (taskqueue_cancel(queue->cleanup_tq, &queue->cleanup_task, NULL)) taskqueue_drain(queue->cleanup_tq, &queue->cleanup_task); taskqueue_free(queue->cleanup_tq); } ena_destroy_all_tx_queues(adapter); ena_destroy_all_rx_queues(adapter); } static int ena_create_io_queues(struct ena_adapter *adapter) { struct ena_com_dev *ena_dev = adapter->ena_dev; struct ena_com_create_io_ctx ctx; struct ena_ring *ring; struct ena_que *queue; uint16_t ena_qid; uint32_t msix_vector; cpuset_t *cpu_mask = NULL; int rc, i; /* Create TX queues */ for (i = 0; i < adapter->num_io_queues; i++) { msix_vector = ENA_IO_IRQ_IDX(i); ena_qid = ENA_IO_TXQ_IDX(i); ctx.mem_queue_type = ena_dev->tx_mem_queue_type; ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; ctx.queue_size = adapter->requested_tx_ring_size; ctx.msix_vector = msix_vector; ctx.qid = ena_qid; ctx.numa_node = adapter->que[i].domain; rc = ena_com_create_io_queue(ena_dev, &ctx); if (rc != 0) { ena_log(adapter->pdev, ERR, "Failed to create io TX queue #%d rc: %d\n", i, rc); goto err_tx; } ring = &adapter->tx_ring[i]; rc = ena_com_get_io_handlers(ena_dev, ena_qid, &ring->ena_com_io_sq, &ring->ena_com_io_cq); if (rc != 0) { ena_log(adapter->pdev, ERR, "Failed to get TX queue handlers. TX queue num" " %d rc: %d\n", i, rc); ena_com_destroy_io_queue(ena_dev, ena_qid); goto err_tx; } if (ctx.numa_node >= 0) { ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); } } /* Create RX queues */ for (i = 0; i < adapter->num_io_queues; i++) { msix_vector = ENA_IO_IRQ_IDX(i); ena_qid = ENA_IO_RXQ_IDX(i); ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; ctx.queue_size = adapter->requested_rx_ring_size; ctx.msix_vector = msix_vector; ctx.qid = ena_qid; ctx.numa_node = adapter->que[i].domain; rc = ena_com_create_io_queue(ena_dev, &ctx); if (unlikely(rc != 0)) { ena_log(adapter->pdev, ERR, "Failed to create io RX queue[%d] rc: %d\n", i, rc); goto err_rx; } ring = &adapter->rx_ring[i]; rc = ena_com_get_io_handlers(ena_dev, ena_qid, &ring->ena_com_io_sq, &ring->ena_com_io_cq); if (unlikely(rc != 0)) { ena_log(adapter->pdev, ERR, "Failed to get RX queue handlers. RX queue num" " %d rc: %d\n", i, rc); ena_com_destroy_io_queue(ena_dev, ena_qid); goto err_rx; } if (ctx.numa_node >= 0) { ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); } } for (i = 0; i < adapter->num_io_queues; i++) { queue = &adapter->que[i]; NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); queue->cleanup_tq = taskqueue_create_fast("ena cleanup", M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq); #ifdef RSS cpu_mask = &queue->cpu_mask; #endif taskqueue_start_threads_cpuset(&queue->cleanup_tq, 1, PI_NET, cpu_mask, "%s queue %d cleanup", device_get_nameunit(adapter->pdev), i); } return (0); err_rx: while (i--) ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); i = adapter->num_io_queues; err_tx: while (i--) ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); return (ENXIO); } /********************************************************************* * * MSIX & Interrupt Service routine * **********************************************************************/ /** * ena_handle_msix - MSIX Interrupt Handler for admin/async queue * @arg: interrupt number **/ static void ena_intr_msix_mgmnt(void *arg) { struct ena_adapter *adapter = (struct ena_adapter *)arg; ena_com_admin_q_comp_intr_handler(adapter->ena_dev); if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))) ena_com_aenq_intr_handler(adapter->ena_dev, arg); } /** * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx * @arg: queue **/ static int ena_handle_msix(void *arg) { struct ena_que *queue = arg; struct ena_adapter *adapter = queue->adapter; if_t ifp = adapter->ifp; if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) return (FILTER_STRAY); taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task); return (FILTER_HANDLED); } static int ena_enable_msix(struct ena_adapter *adapter) { device_t dev = adapter->pdev; int msix_vecs, msix_req; int i, rc = 0; if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { ena_log(dev, ERR, "Error, MSI-X is already enabled\n"); return (EINVAL); } /* Reserved the max msix vectors we might need */ msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues); adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry), M_DEVBUF, M_WAITOK | M_ZERO); ena_log(dev, DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs); for (i = 0; i < msix_vecs; i++) { adapter->msix_entries[i].entry = i; /* Vectors must start from 1 */ adapter->msix_entries[i].vector = i + 1; } msix_req = msix_vecs; rc = pci_alloc_msix(dev, &msix_vecs); if (unlikely(rc != 0)) { ena_log(dev, ERR, "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc); rc = ENOSPC; goto err_msix_free; } if (msix_vecs != msix_req) { if (msix_vecs == ENA_ADMIN_MSIX_VEC) { ena_log(dev, ERR, "Not enough number of MSI-x allocated: %d\n", msix_vecs); pci_release_msi(dev); rc = ENOSPC; goto err_msix_free; } ena_log(dev, ERR, "Enable only %d MSI-x (out of %d), reduce " "the number of queues\n", msix_vecs, msix_req); } adapter->msix_vecs = msix_vecs; ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); return (0); err_msix_free: free(adapter->msix_entries, M_DEVBUF); adapter->msix_entries = NULL; return (rc); } static void ena_setup_mgmnt_intr(struct ena_adapter *adapter) { snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", device_get_nameunit(adapter->pdev)); /* * Handler is NULL on purpose, it will be set * when mgmnt interrupt is acquired */ adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL; adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector; } static int ena_setup_io_intr(struct ena_adapter *adapter) { #ifdef RSS int num_buckets = rss_getnumbuckets(); static int last_bind = 0; int cur_bind; int idx; #endif int irq_idx; if (adapter->msix_entries == NULL) return (EINVAL); #ifdef RSS if (adapter->first_bind < 0) { adapter->first_bind = last_bind; last_bind = (last_bind + adapter->num_io_queues) % num_buckets; } cur_bind = adapter->first_bind; #endif for (int i = 0; i < adapter->num_io_queues; i++) { irq_idx = ENA_IO_IRQ_IDX(i); snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i); adapter->irq_tbl[irq_idx].handler = ena_handle_msix; adapter->irq_tbl[irq_idx].data = &adapter->que[i]; adapter->irq_tbl[irq_idx].vector = adapter->msix_entries[irq_idx].vector; ena_log(adapter->pdev, DBG, "ena_setup_io_intr vector: %d\n", adapter->msix_entries[irq_idx].vector); if (adapter->irq_cpu_base > ENA_BASE_CPU_UNSPECIFIED) { adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu = (unsigned)(adapter->irq_cpu_base + i * adapter->irq_cpu_stride) % (unsigned)mp_ncpus; CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask); } #ifdef RSS adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu = rss_getcpu(cur_bind); cur_bind = (cur_bind + 1) % num_buckets; CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask); for (idx = 0; idx < MAXMEMDOM; ++idx) { if (CPU_ISSET(adapter->que[i].cpu, &cpuset_domain[idx])) break; } adapter->que[i].domain = idx; #else adapter->que[i].domain = -1; #endif } return (0); } static int ena_request_mgmnt_irq(struct ena_adapter *adapter) { device_t pdev = adapter->pdev; struct ena_irq *irq; unsigned long flags; int rc, rcc; flags = RF_ACTIVE | RF_SHAREABLE; irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, &irq->vector, flags); if (unlikely(irq->res == NULL)) { ena_log(pdev, ERR, "could not allocate irq vector: %d\n", irq->vector); return (ENXIO); } rc = bus_setup_intr(adapter->pdev, irq->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, irq->data, &irq->cookie); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "failed to register interrupt handler for irq %ju: %d\n", rman_get_start(irq->res), rc); goto err_res_free; } irq->requested = true; return (rc); err_res_free: ena_log(pdev, INFO, "releasing resource for irq %d\n", irq->vector); rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, irq->res); if (unlikely(rcc != 0)) ena_log(pdev, ERR, "dev has no parent while releasing res for irq: %d\n", irq->vector); irq->res = NULL; return (rc); } static int ena_request_io_irq(struct ena_adapter *adapter) { device_t pdev = adapter->pdev; struct ena_irq *irq; unsigned long flags = 0; int rc = 0, i, rcc; if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) { ena_log(pdev, ERR, "failed to request I/O IRQ: MSI-X is not enabled\n"); return (EINVAL); } else { flags = RF_ACTIVE | RF_SHAREABLE; } for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { irq = &adapter->irq_tbl[i]; if (unlikely(irq->requested)) continue; irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, &irq->vector, flags); if (unlikely(irq->res == NULL)) { rc = ENOMEM; ena_log(pdev, ERR, "could not allocate irq vector: %d\n", irq->vector); goto err; } rc = bus_setup_intr(adapter->pdev, irq->res, INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, irq->data, &irq->cookie); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "failed to register interrupt handler for irq %ju: %d\n", rman_get_start(irq->res), rc); goto err; } irq->requested = true; if (adapter->rss_enabled || adapter->irq_cpu_base > ENA_BASE_CPU_UNSPECIFIED) { rc = bus_bind_intr(adapter->pdev, irq->res, irq->cpu); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "failed to bind interrupt handler for irq %ju to cpu %d: %d\n", rman_get_start(irq->res), irq->cpu, rc); goto err; } ena_log(pdev, INFO, "queue %d - cpu %d\n", i - ENA_IO_IRQ_FIRST_IDX, irq->cpu); } } return (rc); err: for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) { irq = &adapter->irq_tbl[i]; rcc = 0; /* Once we entered err: section and irq->requested is true we free both intr and resources */ if (irq->requested) { rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); if (unlikely(rcc != 0)) ena_log(pdev, ERR, "could not release irq: %d, error: %d\n", irq->vector, rcc); } /* If we entered err: section without irq->requested set we know it was bus_alloc_resource_any() that needs cleanup, provided res is not NULL. In case res is NULL no work in needed in this iteration */ rcc = 0; if (irq->res != NULL) { rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, irq->res); } if (unlikely(rcc != 0)) ena_log(pdev, ERR, "dev has no parent while releasing res for irq: %d\n", irq->vector); irq->requested = false; irq->res = NULL; } return (rc); } static void ena_free_mgmnt_irq(struct ena_adapter *adapter) { device_t pdev = adapter->pdev; struct ena_irq *irq; int rc; irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; if (irq->requested) { ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector); rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); if (unlikely(rc != 0)) ena_log(pdev, ERR, "failed to tear down irq: %d\n", irq->vector); irq->requested = 0; } if (irq->res != NULL) { ena_log(pdev, DBG, "release resource irq: %d\n", irq->vector); rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, irq->res); irq->res = NULL; if (unlikely(rc != 0)) ena_log(pdev, ERR, "dev has no parent while releasing res for irq: %d\n", irq->vector); } } static void ena_free_io_irq(struct ena_adapter *adapter) { device_t pdev = adapter->pdev; struct ena_irq *irq; int rc; for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { irq = &adapter->irq_tbl[i]; if (irq->requested) { ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector); rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "failed to tear down irq: %d\n", irq->vector); } irq->requested = 0; } if (irq->res != NULL) { ena_log(pdev, DBG, "release resource irq: %d\n", irq->vector); rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, irq->res); irq->res = NULL; if (unlikely(rc != 0)) { ena_log(pdev, ERR, "dev has no parent while releasing res for irq: %d\n", irq->vector); } } } } static void ena_free_irqs(struct ena_adapter *adapter) { ena_free_io_irq(adapter); ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); } static void ena_disable_msix(struct ena_adapter *adapter) { if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); pci_release_msi(adapter->pdev); } adapter->msix_vecs = 0; free(adapter->msix_entries, M_DEVBUF); adapter->msix_entries = NULL; } static void ena_unmask_all_io_irqs(struct ena_adapter *adapter) { struct ena_com_io_cq *io_cq; struct ena_eth_io_intr_reg intr_reg; struct ena_ring *tx_ring; uint16_t ena_qid; int i; /* Unmask interrupts for all queues */ for (i = 0; i < adapter->num_io_queues; i++) { ena_qid = ENA_IO_TXQ_IDX(i); io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; ena_com_update_intr_reg(&intr_reg, 0, 0, true, false); tx_ring = &adapter->tx_ring[i]; counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1); ena_com_unmask_intr(io_cq, &intr_reg); } } static int ena_up_complete(struct ena_adapter *adapter) { int rc; if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) { rc = ena_rss_configure(adapter); if (rc != 0) { ena_log(adapter->pdev, ERR, "Failed to configure RSS\n"); return (rc); } } rc = ena_change_mtu(adapter->ifp, if_getmtu(adapter->ifp)); if (unlikely(rc != 0)) return (rc); ena_refill_all_rx_bufs(adapter); ena_reset_counters((counter_u64_t *)&adapter->hw_stats, sizeof(adapter->hw_stats)); return (0); } static void set_io_rings_size(struct ena_adapter *adapter, int new_tx_size, int new_rx_size) { int i; for (i = 0; i < adapter->num_io_queues; i++) { adapter->tx_ring[i].ring_size = new_tx_size; adapter->rx_ring[i].ring_size = new_rx_size; } } static int create_queues_with_size_backoff(struct ena_adapter *adapter) { device_t pdev = adapter->pdev; int rc; uint32_t cur_rx_ring_size, cur_tx_ring_size; uint32_t new_rx_ring_size, new_tx_ring_size; /* * Current queue sizes might be set to smaller than the requested * ones due to past queue allocation failures. */ set_io_rings_size(adapter, adapter->requested_tx_ring_size, adapter->requested_rx_ring_size); while (1) { /* Allocate transmit descriptors */ rc = ena_setup_all_tx_resources(adapter); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "err_setup_tx\n"); goto err_setup_tx; } /* Allocate receive descriptors */ rc = ena_setup_all_rx_resources(adapter); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "err_setup_rx\n"); goto err_setup_rx; } /* Create IO queues for Rx & Tx */ rc = ena_create_io_queues(adapter); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "create IO queues failed\n"); goto err_io_que; } return (0); err_io_que: ena_free_all_rx_resources(adapter); err_setup_rx: ena_free_all_tx_resources(adapter); err_setup_tx: /* * Lower the ring size if ENOMEM. Otherwise, return the * error straightaway. */ if (unlikely(rc != ENOMEM)) { ena_log(pdev, ERR, "Queue creation failed with error code: %d\n", rc); return (rc); } cur_tx_ring_size = adapter->tx_ring[0].ring_size; cur_rx_ring_size = adapter->rx_ring[0].ring_size; ena_log(pdev, ERR, "Not enough memory to create queues with sizes TX=%d, RX=%d\n", cur_tx_ring_size, cur_rx_ring_size); new_tx_ring_size = cur_tx_ring_size; new_rx_ring_size = cur_rx_ring_size; /* * Decrease the size of a larger queue, or decrease both if they * are the same size. */ if (cur_rx_ring_size <= cur_tx_ring_size) new_tx_ring_size = cur_tx_ring_size / 2; if (cur_rx_ring_size >= cur_tx_ring_size) new_rx_ring_size = cur_rx_ring_size / 2; if (new_tx_ring_size < ENA_MIN_RING_SIZE || new_rx_ring_size < ENA_MIN_RING_SIZE) { ena_log(pdev, ERR, "Queue creation failed with the smallest possible queue size" "of %d for both queues. Not retrying with smaller queues\n", ENA_MIN_RING_SIZE); return (rc); } ena_log(pdev, INFO, "Retrying queue creation with sizes TX=%d, RX=%d\n", new_tx_ring_size, new_rx_ring_size); set_io_rings_size(adapter, new_tx_ring_size, new_rx_ring_size); } } int ena_up(struct ena_adapter *adapter) { int rc = 0; ENA_LOCK_ASSERT(); if (unlikely(device_is_attached(adapter->pdev) == 0)) { ena_log(adapter->pdev, ERR, "device is not attached!\n"); return (ENXIO); } if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) return (0); ena_log(adapter->pdev, INFO, "device is going UP\n"); /* setup interrupts for IO queues */ rc = ena_setup_io_intr(adapter); if (unlikely(rc != 0)) { ena_log(adapter->pdev, ERR, "error setting up IO interrupt\n"); goto error; } rc = ena_request_io_irq(adapter); if (unlikely(rc != 0)) { ena_log(adapter->pdev, ERR, "err_req_irq\n"); goto error; } ena_log(adapter->pdev, INFO, "Creating %u IO queues. Rx queue size: %d, Tx queue size: %d, LLQ is %s\n", adapter->num_io_queues, adapter->requested_rx_ring_size, adapter->requested_tx_ring_size, (adapter->ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ? "ENABLED" : "DISABLED"); rc = create_queues_with_size_backoff(adapter); if (unlikely(rc != 0)) { ena_log(adapter->pdev, ERR, "error creating queues with size backoff\n"); goto err_create_queues_with_backoff; } if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) if_link_state_change(adapter->ifp, LINK_STATE_UP); rc = ena_up_complete(adapter); if (unlikely(rc != 0)) goto err_up_complete; counter_u64_add(adapter->dev_stats.interface_up, 1); ena_update_hwassist(adapter); if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter); ena_unmask_all_io_irqs(adapter); return (0); err_up_complete: ena_destroy_all_io_queues(adapter); ena_free_all_rx_resources(adapter); ena_free_all_tx_resources(adapter); err_create_queues_with_backoff: ena_free_io_irq(adapter); error: return (rc); } static uint64_t ena_get_counter(if_t ifp, ift_counter cnt) { struct ena_adapter *adapter; struct ena_hw_stats *stats; adapter = if_getsoftc(ifp); stats = &adapter->hw_stats; switch (cnt) { case IFCOUNTER_IPACKETS: return (counter_u64_fetch(stats->rx_packets)); case IFCOUNTER_OPACKETS: return (counter_u64_fetch(stats->tx_packets)); case IFCOUNTER_IBYTES: return (counter_u64_fetch(stats->rx_bytes)); case IFCOUNTER_OBYTES: return (counter_u64_fetch(stats->tx_bytes)); case IFCOUNTER_IQDROPS: return (counter_u64_fetch(stats->rx_drops)); case IFCOUNTER_OQDROPS: return (counter_u64_fetch(stats->tx_drops)); default: return (if_get_counter_default(ifp, cnt)); } } static int ena_media_change(if_t ifp) { /* Media Change is not supported by firmware */ return (0); } static void ena_media_status(if_t ifp, struct ifmediareq *ifmr) { struct ena_adapter *adapter = if_getsoftc(ifp); ena_log(adapter->pdev, DBG, "Media status update\n"); ENA_LOCK_LOCK(); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) { ENA_LOCK_UNLOCK(); ena_log(adapter->pdev, INFO, "Link is down\n"); return; } ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX; ENA_LOCK_UNLOCK(); } static void ena_init(void *arg) { struct ena_adapter *adapter = (struct ena_adapter *)arg; if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) { ENA_LOCK_LOCK(); ena_up(adapter); ENA_LOCK_UNLOCK(); } } static int ena_ioctl(if_t ifp, u_long command, caddr_t data) { struct ena_adapter *adapter; struct ifreq *ifr; int rc; adapter = if_getsoftc(ifp); ifr = (struct ifreq *)data; /* * Acquiring lock to prevent from running up and down routines parallel. */ rc = 0; switch (command) { case SIOCSIFMTU: if (if_getmtu(ifp) == ifr->ifr_mtu) break; ENA_LOCK_LOCK(); ena_down(adapter); ena_change_mtu(ifp, ifr->ifr_mtu); rc = ena_up(adapter); ENA_LOCK_UNLOCK(); break; case SIOCSIFFLAGS: if ((if_getflags(ifp) & IFF_UP) != 0) { if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { ena_log(adapter->pdev, INFO, "ioctl promisc/allmulti\n"); } } else { ENA_LOCK_LOCK(); rc = ena_up(adapter); ENA_LOCK_UNLOCK(); } } else { if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { ENA_LOCK_LOCK(); ena_down(adapter); ENA_LOCK_UNLOCK(); } } break; case SIOCADDMULTI: case SIOCDELMULTI: break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: { int reinit = 0; if (ifr->ifr_reqcap != if_getcapenable(ifp)) { if_setcapenable(ifp, ifr->ifr_reqcap); reinit = 1; } if ((reinit != 0) && ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) { ENA_LOCK_LOCK(); ena_down(adapter); rc = ena_up(adapter); ENA_LOCK_UNLOCK(); } } break; default: rc = ether_ioctl(ifp, command, data); break; } return (rc); } static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat) { int caps = 0; if ((feat->offload.tx & (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0) caps |= IFCAP_TXCSUM; if ((feat->offload.tx & (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0) caps |= IFCAP_TXCSUM_IPV6; if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0) caps |= IFCAP_TSO4; if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0) caps |= IFCAP_TSO6; if ((feat->offload.rx_supported & (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK | ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0) caps |= IFCAP_RXCSUM; if ((feat->offload.rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0) caps |= IFCAP_RXCSUM_IPV6; caps |= IFCAP_LRO | IFCAP_JUMBO_MTU; return (caps); } static void ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp) { host_info->supported_network_features[0] = (uint32_t)if_getcapabilities(ifp); } static void ena_update_hwassist(struct ena_adapter *adapter) { if_t ifp = adapter->ifp; uint32_t feat = adapter->tx_offload_cap; int cap = if_getcapenable(ifp); int flags = 0; if_clearhwassist(ifp); if ((cap & IFCAP_TXCSUM) != 0) { if ((feat & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0) flags |= CSUM_IP; if ((feat & (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0) flags |= CSUM_IP_UDP | CSUM_IP_TCP; } if ((cap & IFCAP_TXCSUM_IPV6) != 0) flags |= CSUM_IP6_UDP | CSUM_IP6_TCP; if ((cap & IFCAP_TSO4) != 0) flags |= CSUM_IP_TSO; if ((cap & IFCAP_TSO6) != 0) flags |= CSUM_IP6_TSO; if_sethwassistbits(ifp, flags, 0); } static void ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter, struct ena_com_dev_get_features_ctx *feat) { if_t ifp; int caps = 0; ifp = adapter->ifp = if_gethandle(IFT_ETHER); if_initname(ifp, device_get_name(pdev), device_get_unit(pdev)); if_setdev(ifp, pdev); if_setsoftc(ifp, adapter); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setinitfn(ifp, ena_init); if_settransmitfn(ifp, ena_mq_start); if_setqflushfn(ifp, ena_qflush); if_setioctlfn(ifp, ena_ioctl); if_setgetcounterfn(ifp, ena_get_counter); if_setsendqlen(ifp, adapter->requested_tx_ring_size); if_setsendqready(ifp); if_setmtu(ifp, ETHERMTU); if_setbaudrate(ifp, 0); /* Zeroize capabilities... */ if_setcapabilities(ifp, 0); if_setcapenable(ifp, 0); /* check hardware support */ caps = ena_get_dev_offloads(feat); /* ... and set them */ if_setcapabilitiesbit(ifp, caps, 0); /* TSO parameters */ if_sethwtsomax(ifp, ENA_TSO_MAXSIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)); if_sethwtsomaxsegcount(ifp, adapter->max_tx_sgl_size - 1); if_sethwtsomaxsegsize(ifp, ENA_TSO_MAXSIZE); if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); if_setcapenable(ifp, if_getcapabilities(ifp)); /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&adapter->media, IFM_IMASK, ena_media_change, ena_media_status); ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); ether_ifattach(ifp, adapter->mac_addr); } void ena_down(struct ena_adapter *adapter) { int rc; ENA_LOCK_ASSERT(); if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) return; ena_log(adapter->pdev, INFO, "device is going DOWN\n"); ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter); if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); ena_free_io_irq(adapter); if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) { rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); if (unlikely(rc != 0)) ena_log(adapter->pdev, ERR, "Device reset failed\n"); } ena_destroy_all_io_queues(adapter); ena_free_all_tx_bufs(adapter); ena_free_all_rx_bufs(adapter); ena_free_all_tx_resources(adapter); ena_free_all_rx_resources(adapter); counter_u64_add(adapter->dev_stats.interface_down, 1); } static uint32_t ena_calc_max_io_queue_num(device_t pdev, struct ena_com_dev *ena_dev, struct ena_com_dev_get_features_ctx *get_feat_ctx) { uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; /* Regular queues capabilities */ if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { struct ena_admin_queue_ext_feature_fields *max_queue_ext = &get_feat_ctx->max_queue_ext.max_queue_ext; io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num, max_queue_ext->max_rx_cq_num); io_tx_sq_num = max_queue_ext->max_tx_sq_num; io_tx_cq_num = max_queue_ext->max_tx_cq_num; } else { struct ena_admin_queue_feature_desc *max_queues = &get_feat_ctx->max_queues; io_tx_sq_num = max_queues->max_sq_num; io_tx_cq_num = max_queues->max_cq_num; io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num); } /* In case of LLQ use the llq fields for the tx SQ/CQ */ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) io_tx_sq_num = get_feat_ctx->llq.max_llq_num; max_num_io_queues = min_t(uint32_t, mp_ncpus, ENA_MAX_NUM_IO_QUEUES); max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_rx_num); max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_sq_num); max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_cq_num); /* 1 IRQ for mgmnt and 1 IRQ for each TX/RX pair */ max_num_io_queues = min_t(uint32_t, max_num_io_queues, pci_msix_count(pdev) - 1); #ifdef RSS max_num_io_queues = min_t(uint32_t, max_num_io_queues, rss_getnumbuckets()); #endif return (max_num_io_queues); } static int ena_enable_wc(device_t pdev, struct resource *res) { #if defined(__i386) || defined(__amd64) || defined(__aarch64__) vm_offset_t va; vm_size_t len; int rc; va = (vm_offset_t)rman_get_virtual(res); len = rman_get_size(res); /* Enable write combining */ rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "pmap_change_attr failed, %d\n", rc); return (rc); } return (0); #endif return (EOPNOTSUPP); } static int ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev, struct ena_admin_feature_llq_desc *llq, struct ena_llq_configurations *llq_default_configurations) { int rc; uint32_t llq_feature_mask; llq_feature_mask = 1 << ENA_ADMIN_LLQ; if (!(ena_dev->supported_features & llq_feature_mask)) { ena_log(pdev, WARN, "LLQ is not supported. Fallback to host mode policy.\n"); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; return (0); } if (ena_dev->mem_bar == NULL) { ena_log(pdev, WARN, "LLQ is advertised as supported but device doesn't expose mem bar.\n"); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; return (0); } rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); if (unlikely(rc != 0)) { ena_log(pdev, WARN, "Failed to configure the device mode. " "Fallback to host mode policy.\n"); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; } return (0); } static int ena_map_llq_mem_bar(device_t pdev, struct ena_com_dev *ena_dev) { struct ena_adapter *adapter = device_get_softc(pdev); int rc, rid; /* Try to allocate resources for LLQ bar */ rid = PCIR_BAR(ENA_MEM_BAR); adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (unlikely(adapter->memory == NULL)) { ena_log(pdev, WARN, "Unable to allocate LLQ bar resource. LLQ mode won't be used.\n"); return (0); } /* Enable write combining for better LLQ performance */ rc = ena_enable_wc(adapter->pdev, adapter->memory); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "failed to enable write combining.\n"); return (rc); } /* * Save virtual address of the device's memory region * for the ena_com layer. */ ena_dev->mem_bar = rman_get_virtual(adapter->memory); return (0); } static inline void ena_set_llq_configurations(struct ena_llq_configurations *llq_config, struct ena_admin_feature_llq_desc *llq, struct ena_adapter *adapter) { llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0) { if ((ena_force_large_llq_header == ENA_LLQ_HEADER_SIZE_POLICY_LARGE) || (ena_force_large_llq_header == ENA_LLQ_HEADER_SIZE_POLICY_DEFAULT && llq->entry_size_recommended == ENA_ADMIN_LIST_ENTRY_SIZE_256B)) { llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_256B; llq_config->llq_ring_entry_size_value = 256; adapter->llq_policy = ENA_ADMIN_LIST_ENTRY_SIZE_256B; } } else { llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; llq_config->llq_ring_entry_size_value = 128; adapter->llq_policy = ENA_ADMIN_LIST_ENTRY_SIZE_128B; } } static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx, struct ena_adapter *adapter) { struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; struct ena_com_dev *ena_dev = ctx->ena_dev; uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE; uint32_t rx_queue_size = ENA_DEFAULT_RING_SIZE; uint32_t max_tx_queue_size; uint32_t max_rx_queue_size; if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { struct ena_admin_queue_ext_feature_fields *max_queue_ext = &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; max_rx_queue_size = min_t(uint32_t, max_queue_ext->max_rx_cq_depth, max_queue_ext->max_rx_sq_depth); max_tx_queue_size = max_queue_ext->max_tx_cq_depth; if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, llq->max_llq_depth); else max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, max_queue_ext->max_tx_sq_depth); ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, max_queue_ext->max_per_packet_tx_descs); ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, max_queue_ext->max_per_packet_rx_descs); } else { struct ena_admin_queue_feature_desc *max_queues = &ctx->get_feat_ctx->max_queues; max_rx_queue_size = min_t(uint32_t, max_queues->max_cq_depth, max_queues->max_sq_depth); max_tx_queue_size = max_queues->max_cq_depth; if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, llq->max_llq_depth); else max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, max_queues->max_sq_depth); ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, max_queues->max_packet_tx_descs); ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, max_queues->max_packet_rx_descs); } if (adapter->llq_policy == ENA_ADMIN_LIST_ENTRY_SIZE_256B) { if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { if (llq->max_wide_llq_depth != max_tx_queue_size) { if (llq->max_wide_llq_depth == 0) { /* if there is no large llq max depth from device, we divide * the queue size by 2, leaving the amount of memory * used by the queues unchanged. */ max_tx_queue_size /= 2; } else { max_tx_queue_size = llq->max_wide_llq_depth; } ena_log(ctx->pdev, INFO, "Using large LLQ headers and decreasing maximum Tx queue size to %d\n", max_tx_queue_size); } else { ena_log(ctx->pdev, INFO, "Using large LLQ headers\n"); } } else { ena_log(ctx->pdev, WARN, "Using large headers failed: LLQ is disabled or device does not support large headers\n"); } } /* round down to the nearest power of 2 */ max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1); max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1); tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE, max_tx_queue_size); rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE, max_rx_queue_size); tx_queue_size = 1 << (flsl(tx_queue_size) - 1); rx_queue_size = 1 << (flsl(rx_queue_size) - 1); ctx->max_tx_queue_size = max_tx_queue_size; ctx->max_rx_queue_size = max_rx_queue_size; ctx->tx_queue_size = tx_queue_size; ctx->rx_queue_size = rx_queue_size; return (0); } static void ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev) { struct ena_admin_host_info *host_info; uintptr_t rid; int rc; /* Allocate only the host info */ rc = ena_com_allocate_host_info(ena_dev); if (unlikely(rc != 0)) { ena_log(dev, ERR, "Cannot allocate host info\n"); return; } host_info = ena_dev->host_attr.host_info; if (pci_get_id(dev, PCI_ID_RID, &rid) == 0) host_info->bdf = rid; host_info->os_type = ENA_ADMIN_OS_FREEBSD; host_info->kernel_ver = osreldate; sprintf(host_info->kernel_ver_str, "%d", osreldate); host_info->os_dist = 0; strncpy(host_info->os_dist_str, osrelease, sizeof(host_info->os_dist_str) - 1); host_info->driver_version = (ENA_DRV_MODULE_VER_MAJOR) | (ENA_DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | (ENA_DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); host_info->num_cpus = mp_ncpus; host_info->driver_supported_features = ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; rc = ena_com_set_host_attributes(ena_dev); if (unlikely(rc != 0)) { if (rc == EOPNOTSUPP) ena_log(dev, WARN, "Cannot set host attributes\n"); else ena_log(dev, ERR, "Cannot set host attributes\n"); goto err; } return; err: ena_com_delete_host_info(ena_dev); } static int ena_device_init(struct ena_adapter *adapter, device_t pdev, struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active) { struct ena_llq_configurations llq_config; struct ena_com_dev *ena_dev = adapter->ena_dev; bool readless_supported; uint32_t aenq_groups; int dma_width; int rc; rc = ena_com_mmio_reg_read_request_init(ena_dev); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "failed to init mmio read less\n"); return (rc); } /* * The PCIe configuration space revision id indicate if mmio reg * read is disabled */ readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ); ena_com_set_mmio_read_mode(ena_dev, readless_supported); rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "Can not reset device\n"); goto err_mmio_read_less; } rc = ena_com_validate_version(ena_dev); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "device version is too low\n"); goto err_mmio_read_less; } dma_width = ena_com_get_dma_width(ena_dev); if (unlikely(dma_width < 0)) { ena_log(pdev, ERR, "Invalid dma width value %d", dma_width); rc = dma_width; goto err_mmio_read_less; } adapter->dma_width = dma_width; /* ENA admin level init */ rc = ena_com_admin_init(ena_dev, &aenq_handlers); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "Can not initialize ena admin queue with device\n"); goto err_mmio_read_less; } /* * To enable the msix interrupts the driver needs to know the number * of queues. So the driver uses polling mode to retrieve this * information */ ena_com_set_admin_polling_mode(ena_dev, true); ena_config_host_info(ena_dev, pdev); /* Get Device Attributes */ rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "Cannot get attribute for ena device rc: %d\n", rc); goto err_admin_init; } aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | BIT(ENA_ADMIN_FATAL_ERROR) | BIT(ENA_ADMIN_WARNING) | BIT(ENA_ADMIN_NOTIFICATION) | BIT(ENA_ADMIN_KEEP_ALIVE) | BIT(ENA_ADMIN_CONF_NOTIFICATIONS) | BIT(ENA_ADMIN_DEVICE_REQUEST_RESET); aenq_groups &= get_feat_ctx->aenq.supported_groups; rc = ena_com_set_aenq_config(ena_dev, aenq_groups); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "Cannot configure aenq groups rc: %d\n", rc); goto err_admin_init; } *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); ena_set_llq_configurations(&llq_config, &get_feat_ctx->llq, adapter); rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq, &llq_config); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "Failed to set placement policy\n"); goto err_admin_init; } return (0); err_admin_init: ena_com_delete_host_info(ena_dev); ena_com_admin_destroy(ena_dev); err_mmio_read_less: ena_com_mmio_reg_read_request_destroy(ena_dev); return (rc); } static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter) { struct ena_com_dev *ena_dev = adapter->ena_dev; int rc; rc = ena_enable_msix(adapter); if (unlikely(rc != 0)) { ena_log(adapter->pdev, ERR, "Error with MSI-X enablement\n"); return (rc); } ena_setup_mgmnt_intr(adapter); rc = ena_request_mgmnt_irq(adapter); if (unlikely(rc != 0)) { ena_log(adapter->pdev, ERR, "Cannot setup mgmnt queue intr\n"); goto err_disable_msix; } ena_com_set_admin_polling_mode(ena_dev, false); ena_com_admin_aenq_enable(ena_dev); return (0); err_disable_msix: ena_disable_msix(adapter); return (rc); } /* Function called on ENA_ADMIN_KEEP_ALIVE event */ static void ena_keep_alive_wd(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; struct ena_admin_aenq_keep_alive_desc *desc; sbintime_t stime; uint64_t rx_drops; uint64_t tx_drops; desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low; counter_u64_zero(adapter->hw_stats.rx_drops); counter_u64_add(adapter->hw_stats.rx_drops, rx_drops); counter_u64_zero(adapter->hw_stats.tx_drops); counter_u64_add(adapter->hw_stats.tx_drops, tx_drops); stime = getsbinuptime(); atomic_store_rel_64(&adapter->keep_alive_timestamp, stime); } /* Check for keep alive expiration */ static void check_for_missing_keep_alive(struct ena_adapter *adapter) { sbintime_t timestamp, time; enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; if (adapter->wd_active == 0) return; if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) return; timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp); time = getsbinuptime() - timestamp; if (unlikely(time > adapter->keep_alive_timeout)) { ena_log(adapter->pdev, ERR, "Keep alive watchdog timeout.\n"); if (ena_com_aenq_has_keep_alive(adapter->ena_dev)) reset_reason = ENA_REGS_RESET_MISSING_ADMIN_INTERRUPT; ena_trigger_reset(adapter, reset_reason); } } /* Check if admin queue is enabled */ static void check_for_admin_com_state(struct ena_adapter *adapter) { enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_ADMIN_TO; if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == false)) { ena_log(adapter->pdev, ERR, "ENA admin queue is not in running state!\n"); counter_u64_add(adapter->dev_stats.admin_q_pause, 1); if (ena_com_get_missing_admin_interrupt(adapter->ena_dev)) reset_reason = ENA_REGS_RESET_MISSING_ADMIN_INTERRUPT; ena_trigger_reset(adapter, reset_reason); } } static int check_for_rx_interrupt_queue(struct ena_adapter *adapter, struct ena_ring *rx_ring) { if (likely(atomic_load_8(&rx_ring->first_interrupt))) return (0); if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) return (0); rx_ring->no_interrupt_event_cnt++; if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { ena_log(adapter->pdev, ERR, "Potential MSIX issue on Rx side Queue = %d. Reset the device\n", rx_ring->qid); ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT); return (EIO); } return (0); } static enum ena_regs_reset_reason_types check_cdesc_in_tx_cq(struct ena_adapter *adapter, struct ena_ring *tx_ring) { device_t pdev = adapter->pdev; int rc; u16 req_id; rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id); /* TX CQ is empty */ if (rc == ENA_COM_TRY_AGAIN) { ena_log(pdev, ERR, "No completion descriptors found in CQ %d\n", tx_ring->qid); return ENA_REGS_RESET_MISS_TX_CMPL; } /* TX CQ has cdescs */ ena_log(pdev, ERR, "Completion descriptors found in CQ %d", tx_ring->qid); return ENA_REGS_RESET_MISS_INTERRUPT; } static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, struct ena_ring *tx_ring) { uint32_t missed_tx = 0, new_missed_tx = 0; device_t pdev = adapter->pdev; struct bintime curtime, time; struct ena_tx_buffer *tx_buf; int time_since_last_cleanup; int missing_tx_comp_to; sbintime_t time_offset; int i, rc = 0; enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; bool cleanup_scheduled, cleanup_running; getbinuptime(&curtime); for (i = 0; i < tx_ring->ring_size; i++) { tx_buf = &tx_ring->tx_buffer_info[i]; if (bintime_isset(&tx_buf->timestamp) == 0) continue; time = curtime; bintime_sub(&time, &tx_buf->timestamp); time_offset = bttosbt(time); if (unlikely(!atomic_load_8(&tx_ring->first_interrupt) && time_offset > 2 * adapter->missing_tx_timeout)) { /* * If after graceful period interrupt is still not * received, we schedule a reset. */ ena_log(pdev, ERR, "Potential MSIX issue on Tx side Queue = %d. " "Reset the device\n", tx_ring->qid); ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT); return (EIO); } /* Check again if packet is still waiting */ if (unlikely(time_offset > adapter->missing_tx_timeout)) { if (tx_buf->print_once) { time_since_last_cleanup = TICKS_2_MSEC(ticks - tx_ring->tx_last_cleanup_ticks); missing_tx_comp_to = sbttoms( adapter->missing_tx_timeout); ena_log(pdev, WARN, "Found a Tx that wasn't completed on time, qid %d, index %d. " "%d msecs have passed since last cleanup. Missing Tx timeout value %d msecs.\n", tx_ring->qid, i, time_since_last_cleanup, missing_tx_comp_to); /* Add new TX completions which are missed */ new_missed_tx++; } tx_buf->print_once = false; missed_tx++; } } /* Checking if this TX ring missing TX completions have passed the threshold */ if (unlikely(missed_tx > adapter->missing_tx_threshold)) { ena_log(pdev, ERR, "The number of lost tx completion is above the threshold " "(%d > %d). Reset the device\n", missed_tx, adapter->missing_tx_threshold); /* Set the reset flag to prevent ena_cleanup() from running */ ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); /* Need to make sure that ENA_FLAG_TRIGGER_RESET is visible to ena_cleanup() and * that cleanup_running is visible to check_missing_comp_in_tx_queue() to * prevent the case of accessing CQ concurrently with check_cdesc_in_tx_cq() */ mb(); cleanup_scheduled = !!(atomic_load_16(&tx_ring->que->cleanup_task.ta_pending)); cleanup_running = !!(atomic_load_8((&tx_ring->cleanup_running))); if (!(cleanup_scheduled || cleanup_running)) reset_reason = check_cdesc_in_tx_cq(adapter, tx_ring); adapter->reset_reason = reset_reason; rc = EIO; } /* Add the newly discovered missing TX completions */ counter_u64_add(tx_ring->tx_stats.missing_tx_comp, new_missed_tx); return (rc); } /* * Check for TX which were not completed on time. * Timeout is defined by "missing_tx_timeout". * Reset will be performed if number of incompleted * transactions exceeds "missing_tx_threshold". */ static void check_for_missing_completions(struct ena_adapter *adapter) { struct ena_ring *tx_ring; struct ena_ring *rx_ring; int i, budget, rc; /* Make sure the driver doesn't turn the device in other process */ rmb(); if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) return; if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) return; if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT) return; budget = adapter->missing_tx_max_queues; for (i = adapter->next_monitored_tx_qid; i < adapter->num_io_queues; i++) { tx_ring = &adapter->tx_ring[i]; rx_ring = &adapter->rx_ring[i]; rc = check_missing_comp_in_tx_queue(adapter, tx_ring); if (unlikely(rc != 0)) return; rc = check_for_rx_interrupt_queue(adapter, rx_ring); if (unlikely(rc != 0)) return; budget--; if (budget == 0) { i++; break; } } adapter->next_monitored_tx_qid = i % adapter->num_io_queues; } /* trigger rx cleanup after 2 consecutive detections */ #define EMPTY_RX_REFILL 2 /* For the rare case where the device runs out of Rx descriptors and the * msix handler failed to refill new Rx descriptors (due to a lack of memory * for example). * This case will lead to a deadlock: * The device won't send interrupts since all the new Rx packets will be dropped * The msix handler won't allocate new Rx descriptors so the device won't be * able to send new packets. * * When such a situation is detected - execute rx cleanup task in another thread */ static void check_for_empty_rx_ring(struct ena_adapter *adapter) { struct ena_ring *rx_ring; int i, refill_required; if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) return; if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) return; for (i = 0; i < adapter->num_io_queues; i++) { rx_ring = &adapter->rx_ring[i]; refill_required = ena_com_free_q_entries( rx_ring->ena_com_io_sq); if (unlikely(refill_required == (rx_ring->ring_size - 1))) { rx_ring->empty_rx_queue++; if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { counter_u64_add(rx_ring->rx_stats.empty_rx_ring, 1); ena_log(adapter->pdev, WARN, "Rx ring %d is stalled. Triggering the refill function\n", i); taskqueue_enqueue(rx_ring->que->cleanup_tq, &rx_ring->que->cleanup_task); rx_ring->empty_rx_queue = 0; } } else { rx_ring->empty_rx_queue = 0; } } } static void ena_update_hints(struct ena_adapter *adapter, struct ena_admin_ena_hw_hints *hints) { struct ena_com_dev *ena_dev = adapter->ena_dev; if (hints->admin_completion_tx_timeout) ena_dev->admin_queue.completion_timeout = hints->admin_completion_tx_timeout * 1000; if (hints->mmio_read_timeout) /* convert to usec */ ena_dev->mmio_read.reg_read_to = hints->mmio_read_timeout * 1000; if (hints->missed_tx_completion_count_threshold_to_reset) adapter->missing_tx_threshold = hints->missed_tx_completion_count_threshold_to_reset; if (hints->missing_tx_completion_timeout) { if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT) adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT; else adapter->missing_tx_timeout = SBT_1MS * hints->missing_tx_completion_timeout; } if (hints->driver_watchdog_timeout) { if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; else adapter->keep_alive_timeout = SBT_1MS * hints->driver_watchdog_timeout; } } /** * ena_copy_eni_metrics - Get and copy ENI metrics from the HW. * @adapter: ENA device adapter * * Returns 0 on success, EOPNOTSUPP if current HW doesn't support those metrics * and other error codes on failure. * * This function can possibly cause a race with other calls to the admin queue. * Because of that, the caller should either lock this function or make sure * that there is no race in the current context. */ static int ena_copy_eni_metrics(struct ena_adapter *adapter) { static bool print_once = true; int rc; rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_metrics); if (rc != 0) { if (rc == ENA_COM_UNSUPPORTED) { if (print_once) { ena_log(adapter->pdev, WARN, "Retrieving ENI metrics is not supported.\n"); print_once = false; } else { ena_log(adapter->pdev, DBG, "Retrieving ENI metrics is not supported.\n"); } } else { ena_log(adapter->pdev, ERR, "Failed to get ENI metrics: %d\n", rc); } } return (rc); } static int ena_copy_srd_metrics(struct ena_adapter *adapter) { return ena_com_get_ena_srd_info(adapter->ena_dev, &adapter->ena_srd_info); } static int ena_copy_customer_metrics(struct ena_adapter *adapter) { struct ena_com_dev *dev; u32 supported_metrics_count; int rc, len; dev = adapter->ena_dev; supported_metrics_count = ena_com_get_customer_metric_count(dev); len = supported_metrics_count * sizeof(u64); /* Fill the data buffer */ rc = ena_com_get_customer_metrics(adapter->ena_dev, (char *)(adapter->customer_metrics_array), len); return (rc); } static void ena_timer_service(void *data) { struct ena_adapter *adapter = (struct ena_adapter *)data; struct ena_admin_host_info *host_info = adapter->ena_dev->host_attr.host_info; check_for_missing_keep_alive(adapter); check_for_admin_com_state(adapter); check_for_missing_completions(adapter); check_for_empty_rx_ring(adapter); /* * User controller update of the ENA metrics. * If the delay was set to 0, then the stats shouldn't be updated at * all. * Otherwise, wait 'metrics_sample_interval' seconds, before * updating stats. * As timer service is executed every second, it's enough to increment * appropriate counter each time the timer service is executed. */ if ((adapter->metrics_sample_interval != 0) && (++adapter->metrics_sample_interval_cnt >= adapter->metrics_sample_interval)) { taskqueue_enqueue(adapter->metrics_tq, &adapter->metrics_task); adapter->metrics_sample_interval_cnt = 0; } if (host_info != NULL) ena_update_host_info(host_info, adapter->ifp); if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { /* * Timeout when validating version indicates that the device * became unresponsive. If that happens skip the reset and * reschedule timer service, so the reset can be retried later. */ if (ena_com_validate_version(adapter->ena_dev) == ENA_COM_TIMER_EXPIRED) { ena_log(adapter->pdev, WARN, "FW unresponsive, skipping reset\n"); ENA_TIMER_RESET(adapter); return; } ena_log(adapter->pdev, WARN, "Trigger reset is on\n"); taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task); return; } /* * Schedule another timeout one second from now. */ ENA_TIMER_RESET(adapter); } void ena_destroy_device(struct ena_adapter *adapter, bool graceful) { if_t ifp = adapter->ifp; struct ena_com_dev *ena_dev = adapter->ena_dev; bool dev_up; if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) return; if (!graceful) if_link_state_change(ifp, LINK_STATE_DOWN); ENA_TIMER_DRAIN(adapter); dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); if (dev_up) ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); if (!graceful) ena_com_set_admin_running_state(ena_dev, false); if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) ena_down(adapter); /* * Stop the device from sending AENQ events (if the device was up, and * the trigger reset was on, ena_down already performs device reset) */ if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up)) ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); /* * IO rings resources should be freed because `ena_restore_device()` * calls (not directly) `ena_enable_msix()`, which re-allocates MSIX * vectors. The amount of MSIX vectors after destroy-restore may be * different than before. Therefore, IO rings resources should be * established from scratch each time. */ ena_free_all_io_rings_resources(adapter); ena_com_abort_admin_commands(ena_dev); ena_com_wait_for_abort_completion(ena_dev); ena_com_admin_destroy(ena_dev); ena_com_mmio_reg_read_request_destroy(ena_dev); adapter->reset_reason = ENA_REGS_RESET_NORMAL; ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); } static int ena_device_validate_params(struct ena_adapter *adapter, struct ena_com_dev_get_features_ctx *get_feat_ctx) { if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr, ETHER_ADDR_LEN) != 0) { ena_log(adapter->pdev, ERR, "Error, mac addresses differ\n"); return (EINVAL); } if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) { ena_log(adapter->pdev, ERR, "Error, device max mtu is smaller than ifp MTU\n"); return (EINVAL); } return 0; } int ena_restore_device(struct ena_adapter *adapter) { struct ena_com_dev_get_features_ctx get_feat_ctx; struct ena_com_dev *ena_dev = adapter->ena_dev; if_t ifp = adapter->ifp; device_t dev = adapter->pdev; int wd_active; int rc; ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active); if (rc != 0) { ena_log(dev, ERR, "Cannot initialize device\n"); goto err; } /* * Only enable WD if it was enabled before reset, so it won't override * value set by the user by the sysctl. */ if (adapter->wd_active != 0) adapter->wd_active = wd_active; rc = ena_device_validate_params(adapter, &get_feat_ctx); if (rc != 0) { ena_log(dev, ERR, "Validation of device parameters failed\n"); goto err_device_destroy; } ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); /* Make sure we don't have a race with AENQ Links state handler */ if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) if_link_state_change(ifp, LINK_STATE_UP); rc = ena_enable_msix_and_set_admin_interrupts(adapter); if (rc != 0) { ena_log(dev, ERR, "Enable MSI-X failed\n"); goto err_device_destroy; } /* * Effective value of used MSIX vectors should be the same as before * `ena_destroy_device()`, if possible, or closest to it if less vectors * are available. */ if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues) adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC; /* Re-initialize rings basic information */ ena_init_io_rings(adapter); /* If the interface was up before the reset bring it up */ if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) { rc = ena_up(adapter); if (rc != 0) { ena_log(dev, ERR, "Failed to create I/O queues\n"); goto err_disable_msix; } } /* Indicate that device is running again and ready to work */ ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); /* * As the AENQ handlers weren't executed during reset because * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the * timestamp must be updated again That will prevent next reset * caused by missing keep alive. */ adapter->keep_alive_timestamp = getsbinuptime(); ENA_TIMER_RESET(adapter); ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); return (rc); err_disable_msix: ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); err_device_destroy: ena_com_abort_admin_commands(ena_dev); ena_com_wait_for_abort_completion(ena_dev); ena_com_admin_destroy(ena_dev); ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); ena_com_mmio_reg_read_request_destroy(ena_dev); err: ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); ena_log(dev, ERR, "Reset attempt failed. Can not reset the device\n"); return (rc); } static void ena_metrics_task(void *arg, int pending) { struct ena_adapter *adapter = (struct ena_adapter *)arg; ENA_LOCK_LOCK(); if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) (void)ena_copy_customer_metrics(adapter); else if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS)) (void)ena_copy_eni_metrics(adapter); if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENA_SRD_INFO)) (void)ena_copy_srd_metrics(adapter); ENA_LOCK_UNLOCK(); } static void ena_reset_task(void *arg, int pending) { struct ena_adapter *adapter = (struct ena_adapter *)arg; ENA_LOCK_LOCK(); if (likely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { ena_increment_reset_counter(adapter); ena_destroy_device(adapter, false); ena_restore_device(adapter); ena_log(adapter->pdev, INFO, "Device reset completed successfully, Driver info: %s\n", ena_version); } ENA_LOCK_UNLOCK(); } static void ena_free_stats(struct ena_adapter *adapter) { ena_free_counters((counter_u64_t *)&adapter->hw_stats, sizeof(struct ena_hw_stats)); ena_free_counters((counter_u64_t *)&adapter->dev_stats, sizeof(struct ena_stats_dev)); } /** * ena_attach - Device Initialization Routine * @pdev: device information struct * * Returns 0 on success, otherwise on failure. * * ena_attach initializes an adapter identified by a device structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ static int ena_attach(device_t pdev) { struct ena_com_dev_get_features_ctx get_feat_ctx; struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; static int version_printed; struct ena_adapter *adapter; struct ena_com_dev *ena_dev = NULL; uint32_t max_num_io_queues; int msix_rid; int rid, rc; adapter = device_get_softc(pdev); adapter->pdev = pdev; adapter->first_bind = -1; /* * Set up the timer service - driver is responsible for avoiding * concurrency, as the callout won't be using any locking inside. */ ENA_TIMER_INIT(adapter); adapter->keep_alive_timeout = ENA_DEFAULT_KEEP_ALIVE_TO; adapter->missing_tx_timeout = ENA_DEFAULT_TX_CMP_TO; adapter->missing_tx_max_queues = ENA_DEFAULT_TX_MONITORED_QUEUES; adapter->missing_tx_threshold = ENA_DEFAULT_TX_CMP_THRESHOLD; adapter->irq_cpu_base = ENA_BASE_CPU_UNSPECIFIED; adapter->irq_cpu_stride = 0; #ifdef RSS adapter->rss_enabled = 1; #endif if (version_printed++ == 0) ena_log(pdev, INFO, "%s\n", ena_version); /* Allocate memory for ena_dev structure */ ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF, M_WAITOK | M_ZERO); adapter->ena_dev = ena_dev; ena_dev->dmadev = pdev; rid = PCIR_BAR(ENA_REG_BAR); adapter->memory = NULL; adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (unlikely(adapter->registers == NULL)) { ena_log(pdev, ERR, "unable to allocate bus resource: registers!\n"); rc = ENOMEM; goto err_dev_free; } /* MSIx vector table may reside on BAR0 with registers or on BAR1. */ msix_rid = pci_msix_table_bar(pdev); if (msix_rid != rid) { adapter->msix = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &msix_rid, RF_ACTIVE); if (unlikely(adapter->msix == NULL)) { ena_log(pdev, ERR, "unable to allocate bus resource: msix!\n"); rc = ENOMEM; goto err_pci_free; } adapter->msix_rid = msix_rid; } ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF, M_WAITOK | M_ZERO); /* Store register resources */ ((struct ena_bus *)(ena_dev->bus))->reg_bar_t = rman_get_bustag( adapter->registers); ((struct ena_bus *)(ena_dev->bus))->reg_bar_h = rman_get_bushandle( adapter->registers); if (unlikely(((struct ena_bus *)(ena_dev->bus))->reg_bar_h == 0)) { ena_log(pdev, ERR, "failed to pmap registers bar\n"); rc = ENXIO; goto err_bus_free; } rc = ena_map_llq_mem_bar(pdev, ena_dev); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "Failed to map ENA mem bar"); goto err_bus_free; } ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US; /* Initially clear all the flags */ ENA_FLAG_ZERO(adapter); /* Device initialization */ rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "ENA device init failed! (err: %d)\n", rc); rc = ENXIO; goto err_bus_free; } if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) adapter->disable_meta_caching = !!( get_feat_ctx.llq.accel_mode.u.get.supported_flags & BIT(ENA_ADMIN_DISABLE_META_CACHING)); adapter->keep_alive_timestamp = getsbinuptime(); adapter->tx_offload_cap = get_feat_ctx.offload.tx; memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr, ETHER_ADDR_LEN); calc_queue_ctx.pdev = pdev; calc_queue_ctx.ena_dev = ena_dev; calc_queue_ctx.get_feat_ctx = &get_feat_ctx; /* Calculate initial and maximum IO queue number and size */ max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx); rc = ena_calc_io_queue_size(&calc_queue_ctx, adapter); if (unlikely((rc != 0) || (max_num_io_queues <= 0))) { rc = EFAULT; goto err_com_free; } adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size; adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size; adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; adapter->max_num_io_queues = max_num_io_queues; adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE; adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; adapter->reset_reason = ENA_REGS_RESET_NORMAL; /* set up dma tags for rx and tx buffers */ rc = ena_setup_tx_dma_tag(adapter); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "Failed to create TX DMA tag\n"); goto err_com_free; } rc = ena_setup_rx_dma_tag(adapter); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "Failed to create RX DMA tag\n"); goto err_tx_tag_free; } /* * The amount of requested MSIX vectors is equal to * adapter::max_num_io_queues (see `ena_enable_msix()`), plus a constant * number of admin queue interrupts. The former is initially determined * by HW capabilities (see `ena_calc_max_io_queue_num())` but may not be * achieved if there are not enough system resources. By default, the * number of effectively used IO queues is the same but later on it can * be limited by the user using sysctl interface. */ rc = ena_enable_msix_and_set_admin_interrupts(adapter); if (unlikely(rc != 0)) { ena_log(pdev, ERR, "Failed to enable and set the admin interrupts\n"); goto err_io_free; } /* By default all of allocated MSIX vectors are actively used */ adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC; /* initialize rings basic information */ ena_init_io_rings(adapter); rc = ena_com_allocate_customer_metrics_buffer(ena_dev); if (rc) { ena_log(pdev, ERR, "Failed to allocate customer metrics buffer.\n"); goto err_msix_free; } rc = ena_sysctl_allocate_customer_metrics_buffer(adapter); if (unlikely(rc)){ ena_log(pdev, ERR, "Failed to allocate sysctl customer metrics buffer.\n"); goto err_metrics_buffer_destroy; } /* Initialize statistics */ ena_alloc_counters((counter_u64_t *)&adapter->dev_stats, sizeof(struct ena_stats_dev)); ena_alloc_counters((counter_u64_t *)&adapter->hw_stats, sizeof(struct ena_hw_stats)); ena_sysctl_add_nodes(adapter); /* setup network interface */ ena_setup_ifnet(pdev, adapter, &get_feat_ctx); /* Initialize reset task queue */ TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter); adapter->reset_tq = taskqueue_create("ena_reset_enqueue", M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq); taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, "%s rstq", device_get_nameunit(adapter->pdev)); /* Initialize metrics task queue */ TASK_INIT(&adapter->metrics_task, 0, ena_metrics_task, adapter); adapter->metrics_tq = taskqueue_create("ena_metrics_enqueue", M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->metrics_tq); taskqueue_start_threads(&adapter->metrics_tq, 1, PI_NET, "%s metricsq", device_get_nameunit(adapter->pdev)); #ifdef DEV_NETMAP rc = ena_netmap_attach(adapter); if (rc != 0) { ena_log(pdev, ERR, "netmap attach failed: %d\n", rc); goto err_detach; } #endif /* DEV_NETMAP */ /* Tell the stack that the interface is not active */ if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); /* Run the timer service */ ENA_TIMER_RESET(adapter); return (0); #ifdef DEV_NETMAP err_detach: ether_ifdetach(adapter->ifp); ifmedia_removeall(&adapter->media); free(adapter->customer_metrics_array, M_DEVBUF); #endif /* DEV_NETMAP */ err_metrics_buffer_destroy: ena_com_delete_customer_metrics_buffer(ena_dev); err_msix_free: ena_free_stats(adapter); ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR); ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); err_io_free: ena_free_all_io_rings_resources(adapter); ena_free_rx_dma_tag(adapter); err_tx_tag_free: ena_free_tx_dma_tag(adapter); err_com_free: ena_com_admin_destroy(ena_dev); ena_com_delete_host_info(ena_dev); ena_com_mmio_reg_read_request_destroy(ena_dev); err_bus_free: free(ena_dev->bus, M_DEVBUF); err_pci_free: ena_free_pci_resources(adapter); err_dev_free: free(ena_dev, M_DEVBUF); return (rc); } /** * ena_detach - Device Removal Routine * @pdev: device information struct * * ena_detach is called by the device subsystem to alert the driver * that it should release a PCI device. **/ static int ena_detach(device_t pdev) { struct ena_adapter *adapter = device_get_softc(pdev); struct ena_com_dev *ena_dev = adapter->ena_dev; int rc; /* Make sure VLANS are not using driver */ if (if_vlantrunkinuse(adapter->ifp)) { ena_log(adapter->pdev, ERR, "VLAN is in use, detach first\n"); return (EBUSY); } + rc = bus_generic_detach(pdev); + if (rc != 0) + return (rc); + ether_ifdetach(adapter->ifp); ifmedia_removeall(&adapter->media); /* Stop timer service */ ENA_LOCK_LOCK(); ENA_TIMER_DRAIN(adapter); ENA_LOCK_UNLOCK(); /* Release metrics task */ while (taskqueue_cancel(adapter->metrics_tq, &adapter->metrics_task, NULL)) taskqueue_drain(adapter->metrics_tq, &adapter->metrics_task); taskqueue_free(adapter->metrics_tq); /* Release reset task */ while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL)) taskqueue_drain(adapter->reset_tq, &adapter->reset_task); taskqueue_free(adapter->reset_tq); ENA_LOCK_LOCK(); ena_down(adapter); ena_destroy_device(adapter, true); ENA_LOCK_UNLOCK(); /* Restore unregistered sysctl queue nodes. */ ena_sysctl_update_queue_node_nb(adapter, adapter->num_io_queues, adapter->max_num_io_queues); #ifdef DEV_NETMAP netmap_detach(adapter->ifp); #endif /* DEV_NETMAP */ ena_free_stats(adapter); rc = ena_free_rx_dma_tag(adapter); if (unlikely(rc != 0)) ena_log(adapter->pdev, WARN, "Unmapped RX DMA tag associations\n"); rc = ena_free_tx_dma_tag(adapter); if (unlikely(rc != 0)) ena_log(adapter->pdev, WARN, "Unmapped TX DMA tag associations\n"); ena_free_irqs(adapter); ena_free_pci_resources(adapter); if (adapter->rss_indir != NULL) free(adapter->rss_indir, M_DEVBUF); if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) ena_com_rss_destroy(ena_dev); ena_com_delete_host_info(ena_dev); free(adapter->customer_metrics_array, M_DEVBUF); ena_com_delete_customer_metrics_buffer(ena_dev); if_free(adapter->ifp); free(ena_dev->bus, M_DEVBUF); free(ena_dev, M_DEVBUF); - return (bus_generic_detach(pdev)); + return (0); } /****************************************************************************** ******************************** AENQ Handlers ******************************* *****************************************************************************/ /** * ena_update_on_link_change: * Notify the network interface about the change in link status **/ static void ena_update_on_link_change(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; struct ena_admin_aenq_link_change_desc *aenq_desc; int status; if_t ifp; aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; ifp = adapter->ifp; status = aenq_desc->flags & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; if (status != 0) { ena_log(adapter->pdev, INFO, "link is UP\n"); ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter); if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter)) if_link_state_change(ifp, LINK_STATE_UP); } else { ena_log(adapter->pdev, INFO, "link is DOWN\n"); if_link_state_change(ifp, LINK_STATE_DOWN); ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter); } } static void ena_notification(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; struct ena_admin_ena_hw_hints *hints; ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, adapter->ena_dev, "Invalid group(%x) expected %x\n", aenq_e->aenq_common_desc.group, ENA_ADMIN_NOTIFICATION); switch (aenq_e->aenq_common_desc.syndrome) { case ENA_ADMIN_UPDATE_HINTS: hints = (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4); ena_update_hints(adapter, hints); break; default: ena_log(adapter->pdev, ERR, "Invalid aenq notification link state %d\n", aenq_e->aenq_common_desc.syndrome); } } static void ena_lock_init(void *arg) { ENA_LOCK_INIT(); } SYSINIT(ena_lock_init, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_init, NULL); static void ena_lock_uninit(void *arg) { ENA_LOCK_DESTROY(); } SYSUNINIT(ena_lock_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_uninit, NULL); /** * This handler will called for unknown event group or unimplemented handlers **/ static void unimplemented_aenq_handler(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; ena_log(adapter->pdev, ERR, "Unknown event was received or event with unimplemented handler\n"); } static void ena_conf_notification(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; struct ena_admin_aenq_conf_notifications_desc *desc; u64 bitmap, bit; desc = (struct ena_admin_aenq_conf_notifications_desc *)aenq_e; bitmap = desc->notifications_bitmap; if (bitmap == 0) { ena_log(adapter->pdev, INFO, "Empty configuration notification bitmap\n"); return; } for (bit = ffsll(bitmap); bit != 0; bit = ffsll(bitmap)) { bit--; ena_log(adapter->pdev, INFO, "Sub-optimal configuration notification code: %" PRIu64 " Refer to AWS ENA documentation for additional details and mitigation options.\n", bit + 1); // Clear the processed bit bitmap &= ~(1UL << bit); } } static void ena_admin_device_request_reset(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; ena_log(adapter->pdev, WARN, "The device has detected an unhealthy state, reset is requested\n"); ena_trigger_reset(adapter, ENA_REGS_RESET_DEVICE_REQUEST); } static struct ena_aenq_handlers aenq_handlers = { .handlers = { [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, [ENA_ADMIN_NOTIFICATION] = ena_notification, [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, [ENA_ADMIN_CONF_NOTIFICATIONS] = ena_conf_notification, [ENA_ADMIN_DEVICE_REQUEST_RESET] = ena_admin_device_request_reset, }, .unimplemented_handler = unimplemented_aenq_handler }; /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t ena_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ena_probe), DEVMETHOD(device_attach, ena_attach), DEVMETHOD(device_detach, ena_detach), DEVMETHOD_END }; static driver_t ena_driver = { "ena", ena_methods, sizeof(struct ena_adapter), }; DRIVER_MODULE(ena, pci, ena_driver, 0, 0); MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array, nitems(ena_vendor_info_array) - 1); MODULE_DEPEND(ena, pci, 1, 1, 1); MODULE_DEPEND(ena, ether, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(ena, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ /*********************************************************************/ diff --git a/sys/dev/fdt/simplebus.c b/sys/dev/fdt/simplebus.c index 37db238f2108..0f41d3433d75 100644 --- a/sys/dev/fdt/simplebus.c +++ b/sys/dev/fdt/simplebus.c @@ -1,533 +1,538 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include /* * Bus interface. */ static int simplebus_probe(device_t dev); static struct resource *simplebus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static void simplebus_probe_nomatch(device_t bus, device_t child); static int simplebus_print_child(device_t bus, device_t child); static device_t simplebus_add_child(device_t dev, u_int order, const char *name, int unit); static struct resource_list *simplebus_get_resource_list(device_t bus, device_t child); static ssize_t simplebus_get_property(device_t bus, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type); /* * ofw_bus interface */ static const struct ofw_bus_devinfo *simplebus_get_devinfo(device_t bus, device_t child); /* * Driver methods. */ static device_method_t simplebus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, simplebus_probe), DEVMETHOD(device_attach, simplebus_attach), DEVMETHOD(device_detach, simplebus_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_add_child, simplebus_add_child), DEVMETHOD(bus_print_child, simplebus_print_child), DEVMETHOD(bus_probe_nomatch, simplebus_probe_nomatch), DEVMETHOD(bus_read_ivar, bus_generic_read_ivar), DEVMETHOD(bus_write_ivar, bus_generic_write_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, simplebus_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_map_resource, bus_generic_map_resource), DEVMETHOD(bus_unmap_resource, bus_generic_unmap_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), DEVMETHOD(bus_get_resource_list, simplebus_get_resource_list), DEVMETHOD(bus_get_property, simplebus_get_property), DEVMETHOD(bus_get_device_path, ofw_bus_gen_get_device_path), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, simplebus_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; DEFINE_CLASS_0(simplebus, simplebus_driver, simplebus_methods, sizeof(struct simplebus_softc)); EARLY_DRIVER_MODULE(simplebus, ofwbus, simplebus_driver, 0, 0, BUS_PASS_BUS); EARLY_DRIVER_MODULE(simplebus, simplebus, simplebus_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); static int simplebus_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); /* * XXX We should attach only to pure' compatible = "simple-bus"', * without any other compatible string. * For now, filter only know cases: * "syscon", "simple-bus"; is handled by fdt/syscon driver * "simple-mfd", "simple-bus"; is handled by fdt/simple-mfd driver */ if (ofw_bus_is_compatible(dev, "syscon") || ofw_bus_is_compatible(dev, "simple-mfd")) return (ENXIO); /* * FDT data puts a "simple-bus" compatible string on many things that * have children but aren't really buses in our world. Without a * ranges property we will fail to attach, so just fail to probe too. */ if (!(ofw_bus_is_compatible(dev, "simple-bus") && ofw_bus_has_prop(dev, "ranges")) && (ofw_bus_get_type(dev) == NULL || strcmp(ofw_bus_get_type(dev), "soc") != 0)) return (ENXIO); device_set_desc(dev, "Flattened device tree simple bus"); return (BUS_PROBE_GENERIC); } int simplebus_attach_impl(device_t dev) { struct simplebus_softc *sc; phandle_t node; sc = device_get_softc(dev); simplebus_init(dev, 0); if ((sc->flags & SB_FLAG_NO_RANGES) == 0 && simplebus_fill_ranges(sc->node, sc) < 0) { device_printf(dev, "could not get ranges\n"); return (ENXIO); } /* * In principle, simplebus could have an interrupt map, but ignore that * for now */ for (node = OF_child(sc->node); node > 0; node = OF_peer(node)) simplebus_add_device(dev, node, 0, NULL, -1, NULL); return (0); } int simplebus_attach(device_t dev) { int rv; rv = simplebus_attach_impl(dev); if (rv != 0) return (rv); return (bus_generic_attach(dev)); } int simplebus_detach(device_t dev) { struct simplebus_softc *sc; + int rv; + + rv = bus_generic_detach(dev); + if (rv != 0) + return (rv); sc = device_get_softc(dev); if (sc->ranges != NULL) free(sc->ranges, M_DEVBUF); - return (bus_generic_detach(dev)); + return (0); } void simplebus_init(device_t dev, phandle_t node) { struct simplebus_softc *sc; sc = device_get_softc(dev); if (node == 0) node = ofw_bus_get_node(dev); sc->dev = dev; sc->node = node; /* * Some important numbers */ sc->acells = 2; OF_getencprop(node, "#address-cells", &sc->acells, sizeof(sc->acells)); sc->scells = 1; OF_getencprop(node, "#size-cells", &sc->scells, sizeof(sc->scells)); } int simplebus_fill_ranges(phandle_t node, struct simplebus_softc *sc) { int host_address_cells; cell_t *base_ranges; ssize_t nbase_ranges; int err; int i, j, k; err = OF_searchencprop(OF_parent(node), "#address-cells", &host_address_cells, sizeof(host_address_cells)); if (err <= 0) return (-1); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges < 0) return (-1); sc->nranges = nbase_ranges / sizeof(cell_t) / (sc->acells + host_address_cells + sc->scells); if (sc->nranges == 0) return (0); sc->ranges = malloc(sc->nranges * sizeof(sc->ranges[0]), M_DEVBUF, M_WAITOK); base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); OF_getencprop(node, "ranges", base_ranges, nbase_ranges); for (i = 0, j = 0; i < sc->nranges; i++) { sc->ranges[i].bus = 0; for (k = 0; k < sc->acells; k++) { sc->ranges[i].bus <<= 32; sc->ranges[i].bus |= base_ranges[j++]; } sc->ranges[i].host = 0; for (k = 0; k < host_address_cells; k++) { sc->ranges[i].host <<= 32; sc->ranges[i].host |= base_ranges[j++]; } sc->ranges[i].size = 0; for (k = 0; k < sc->scells; k++) { sc->ranges[i].size <<= 32; sc->ranges[i].size |= base_ranges[j++]; } } free(base_ranges, M_DEVBUF); return (sc->nranges); } struct simplebus_devinfo * simplebus_setup_dinfo(device_t dev, phandle_t node, struct simplebus_devinfo *di) { struct simplebus_softc *sc; struct simplebus_devinfo *ndi; sc = device_get_softc(dev); if (di == NULL) ndi = malloc(sizeof(*ndi), M_DEVBUF, M_WAITOK | M_ZERO); else ndi = di; if (ofw_bus_gen_setup_devinfo(&ndi->obdinfo, node) != 0) { if (di == NULL) free(ndi, M_DEVBUF); return (NULL); } resource_list_init(&ndi->rl); ofw_bus_reg_to_rl(dev, node, sc->acells, sc->scells, &ndi->rl); ofw_bus_intr_to_rl(dev, node, &ndi->rl, NULL); return (ndi); } device_t simplebus_add_device(device_t dev, phandle_t node, u_int order, const char *name, int unit, struct simplebus_devinfo *di) { struct simplebus_devinfo *ndi; device_t cdev; if ((ndi = simplebus_setup_dinfo(dev, node, di)) == NULL) return (NULL); cdev = device_add_child_ordered(dev, order, name, unit); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", ndi->obdinfo.obd_name); resource_list_free(&ndi->rl); ofw_bus_gen_destroy_devinfo(&ndi->obdinfo); if (di == NULL) free(ndi, M_DEVBUF); return (NULL); } device_set_ivars(cdev, ndi); return(cdev); } static device_t simplebus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t cdev; struct simplebus_devinfo *ndi; cdev = device_add_child_ordered(dev, order, name, unit); if (cdev == NULL) return (NULL); ndi = malloc(sizeof(*ndi), M_DEVBUF, M_WAITOK | M_ZERO); ndi->obdinfo.obd_node = -1; resource_list_init(&ndi->rl); device_set_ivars(cdev, ndi); return (cdev); } static const struct ofw_bus_devinfo * simplebus_get_devinfo(device_t bus __unused, device_t child) { struct simplebus_devinfo *ndi; ndi = device_get_ivars(child); if (ndi == NULL) return (NULL); return (&ndi->obdinfo); } static struct resource_list * simplebus_get_resource_list(device_t bus __unused, device_t child) { struct simplebus_devinfo *ndi; ndi = device_get_ivars(child); if (ndi == NULL) return (NULL); return (&ndi->rl); } static ssize_t simplebus_get_property(device_t bus, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type) { phandle_t node, xref; ssize_t ret, i; uint32_t *buffer; uint64_t val; switch (type) { case DEVICE_PROP_ANY: case DEVICE_PROP_BUFFER: case DEVICE_PROP_UINT32: case DEVICE_PROP_UINT64: case DEVICE_PROP_HANDLE: break; default: return (-1); } node = ofw_bus_get_node(child); if (propvalue == NULL || size == 0) return (OF_getproplen(node, propname)); /* * Integer values are stored in BE format. * If caller declared that the underlying property type is uint32_t * we need to do the conversion to match host endianness. */ if (type == DEVICE_PROP_UINT32) return (OF_getencprop(node, propname, propvalue, size)); /* * uint64_t also requires endianness handling. * In FDT every 8 byte value is stored using two uint32_t variables * in BE format. Now, since the upper bits are stored as the first * of the pair, both halves require swapping. */ if (type == DEVICE_PROP_UINT64) { ret = OF_getencprop(node, propname, propvalue, size); if (ret <= 0) { return (ret); } buffer = (uint32_t *)propvalue; for (i = 0; i < size / 4; i += 2) { val = (uint64_t)buffer[i] << 32 | buffer[i + 1]; ((uint64_t *)buffer)[i / 2] = val; } return (ret); } if (type == DEVICE_PROP_HANDLE) { if (size < sizeof(node)) return (-1); ret = OF_getencprop(node, propname, &xref, sizeof(xref)); if (ret <= 0) return (ret); node = OF_node_from_xref(xref); if (propvalue != NULL) *(uint32_t *)propvalue = node; return (ret); } return (OF_getprop(node, propname, propvalue, size)); } static struct resource * simplebus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct simplebus_softc *sc; struct simplebus_devinfo *di; struct resource_list_entry *rle; int j; sc = device_get_softc(bus); /* * Request for the default allocation with a given rid: use resource * list stored in the local device info. */ if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); rle = resource_list_find(&di->rl, type, *rid); if (rle == NULL) { if (bootverbose) device_printf(bus, "no default resources for " "rid = %d, type = %d\n", *rid, type); return (NULL); } start = rle->start; end = rle->end; count = rle->count; } if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; if (type == SYS_RES_MEMORY) { /* Remap through ranges property */ for (j = 0; j < sc->nranges; j++) { if (start >= sc->ranges[j].bus && end < sc->ranges[j].bus + sc->ranges[j].size) { start -= sc->ranges[j].bus; start += sc->ranges[j].host; end -= sc->ranges[j].bus; end += sc->ranges[j].host; break; } } if (j == sc->nranges && sc->nranges != 0) { if (bootverbose) device_printf(bus, "Could not map resource " "%#jx-%#jx\n", start, end); return (NULL); } } return (bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static int simplebus_print_res(struct simplebus_devinfo *di) { int rv; if (di == NULL) return (0); rv = 0; rv += resource_list_print_type(&di->rl, "mem", SYS_RES_MEMORY, "%#jx"); rv += resource_list_print_type(&di->rl, "irq", SYS_RES_IRQ, "%jd"); return (rv); } static void simplebus_probe_nomatch(device_t bus, device_t child) { const char *name, *type, *compat; if (!bootverbose) return; compat = ofw_bus_get_compat(child); if (compat == NULL) return; name = ofw_bus_get_name(child); type = ofw_bus_get_type(child); device_printf(bus, "<%s>", name != NULL ? name : "unknown"); simplebus_print_res(device_get_ivars(child)); if (!ofw_bus_status_okay(child)) printf(" disabled"); if (type) printf(" type %s", type); printf(" compat %s (no driver attached)\n", compat); } static int simplebus_print_child(device_t bus, device_t child) { int rv; rv = bus_print_child_header(bus, child); rv += simplebus_print_res(device_get_ivars(child)); if (!ofw_bus_status_okay(child)) rv += printf(" disabled"); rv += bus_print_child_footer(bus, child); return (rv); } diff --git a/sys/dev/gve/gve_main.c b/sys/dev/gve/gve_main.c index f8a37b9f37a9..15d57ed0f2ac 100644 --- a/sys/dev/gve/gve_main.c +++ b/sys/dev/gve/gve_main.c @@ -1,863 +1,868 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2023 Google LLC * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "gve.h" #include "gve_adminq.h" #define GVE_DRIVER_VERSION "GVE-FBSD-1.0.1\n" #define GVE_VERSION_MAJOR 1 #define GVE_VERSION_MINOR 0 #define GVE_VERSION_SUB 1 #define GVE_DEFAULT_RX_COPYBREAK 256 /* Devices supported by this driver. */ static struct gve_dev { uint16_t vendor_id; uint16_t device_id; const char *name; } gve_devs[] = { { PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC, "gVNIC" } }; struct sx gve_global_lock; static int gve_verify_driver_compatibility(struct gve_priv *priv) { int err; struct gve_driver_info *driver_info; struct gve_dma_handle driver_info_mem; err = gve_dma_alloc_coherent(priv, sizeof(struct gve_driver_info), PAGE_SIZE, &driver_info_mem); if (err != 0) return (ENOMEM); driver_info = driver_info_mem.cpu_addr; *driver_info = (struct gve_driver_info) { .os_type = 3, /* Freebsd */ .driver_major = GVE_VERSION_MAJOR, .driver_minor = GVE_VERSION_MINOR, .driver_sub = GVE_VERSION_SUB, .os_version_major = htobe32(FBSD_VERSION_MAJOR), .os_version_minor = htobe32(FBSD_VERSION_MINOR), .os_version_sub = htobe32(FBSD_VERSION_PATCH), .driver_capability_flags = { htobe64(GVE_DRIVER_CAPABILITY_FLAGS1), htobe64(GVE_DRIVER_CAPABILITY_FLAGS2), htobe64(GVE_DRIVER_CAPABILITY_FLAGS3), htobe64(GVE_DRIVER_CAPABILITY_FLAGS4), }, }; snprintf(driver_info->os_version_str1, sizeof(driver_info->os_version_str1), "FreeBSD %u", __FreeBSD_version); bus_dmamap_sync(driver_info_mem.tag, driver_info_mem.map, BUS_DMASYNC_PREREAD); err = gve_adminq_verify_driver_compatibility(priv, sizeof(struct gve_driver_info), driver_info_mem.bus_addr); /* It's ok if the device doesn't support this */ if (err == EOPNOTSUPP) err = 0; gve_dma_free_coherent(&driver_info_mem); return (err); } static int gve_up(struct gve_priv *priv) { if_t ifp = priv->ifp; int err; GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock); if (device_is_attached(priv->dev) == 0) { device_printf(priv->dev, "Cannot bring the iface up when detached\n"); return (ENXIO); } if (gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP)) return (0); if_clearhwassist(ifp); if (if_getcapenable(ifp) & IFCAP_TXCSUM) if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0); if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) if_sethwassistbits(ifp, CSUM_IP6_TCP | CSUM_IP6_UDP, 0); if (if_getcapenable(ifp) & IFCAP_TSO4) if_sethwassistbits(ifp, CSUM_IP_TSO, 0); if (if_getcapenable(ifp) & IFCAP_TSO6) if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); err = gve_register_qpls(priv); if (err != 0) goto reset; err = gve_create_rx_rings(priv); if (err != 0) goto reset; err = gve_create_tx_rings(priv); if (err != 0) goto reset; if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); if (!gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) { if_link_state_change(ifp, LINK_STATE_UP); gve_set_state_flag(priv, GVE_STATE_FLAG_LINK_UP); } gve_unmask_all_queue_irqs(priv); gve_set_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP); priv->interface_up_cnt++; return (0); reset: gve_schedule_reset(priv); return (err); } static void gve_down(struct gve_priv *priv) { GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock); if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP)) return; if (gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) { if_link_state_change(priv->ifp, LINK_STATE_DOWN); gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP); } if_setdrvflagbits(priv->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); if (gve_destroy_rx_rings(priv) != 0) goto reset; if (gve_destroy_tx_rings(priv) != 0) goto reset; if (gve_unregister_qpls(priv) != 0) goto reset; gve_mask_all_queue_irqs(priv); gve_clear_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP); priv->interface_down_cnt++; return; reset: gve_schedule_reset(priv); } static int gve_set_mtu(if_t ifp, uint32_t new_mtu) { struct gve_priv *priv = if_getsoftc(ifp); int err; if ((new_mtu > priv->max_mtu) || (new_mtu < ETHERMIN)) { device_printf(priv->dev, "Invalid new MTU setting. new mtu: %d max mtu: %d min mtu: %d\n", new_mtu, priv->max_mtu, ETHERMIN); return (EINVAL); } err = gve_adminq_set_mtu(priv, new_mtu); if (err == 0) { if (bootverbose) device_printf(priv->dev, "MTU set to %d\n", new_mtu); if_setmtu(ifp, new_mtu); } else { device_printf(priv->dev, "Failed to set MTU to %d\n", new_mtu); } return (err); } static void gve_init(void *arg) { struct gve_priv *priv = (struct gve_priv *)arg; if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP)) { GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); gve_up(priv); GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); } } static int gve_ioctl(if_t ifp, u_long command, caddr_t data) { struct gve_priv *priv; struct ifreq *ifr; int rc = 0; priv = if_getsoftc(ifp); ifr = (struct ifreq *)data; switch (command) { case SIOCSIFMTU: if (if_getmtu(ifp) == ifr->ifr_mtu) break; GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); gve_down(priv); gve_set_mtu(ifp, ifr->ifr_mtu); rc = gve_up(priv); GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); break; case SIOCSIFFLAGS: if ((if_getflags(ifp) & IFF_UP) != 0) { if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); rc = gve_up(priv); GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); } } else { if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); gve_down(priv); GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); } } break; case SIOCSIFCAP: if (ifr->ifr_reqcap == if_getcapenable(ifp)) break; GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); gve_down(priv); if_setcapenable(ifp, ifr->ifr_reqcap); rc = gve_up(priv); GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); break; case SIOCSIFMEDIA: /* FALLTHROUGH */ case SIOCGIFMEDIA: rc = ifmedia_ioctl(ifp, ifr, &priv->media, command); break; default: rc = ether_ioctl(ifp, command, data); break; } return (rc); } static int gve_media_change(if_t ifp) { struct gve_priv *priv = if_getsoftc(ifp); device_printf(priv->dev, "Media change not supported\n"); return (0); } static void gve_media_status(if_t ifp, struct ifmediareq *ifmr) { struct gve_priv *priv = if_getsoftc(ifp); GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) { ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_AUTO; } else { ifmr->ifm_active |= IFM_NONE; } GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); } static uint64_t gve_get_counter(if_t ifp, ift_counter cnt) { struct gve_priv *priv; uint64_t rpackets = 0; uint64_t tpackets = 0; uint64_t rbytes = 0; uint64_t tbytes = 0; uint64_t rx_dropped_pkt = 0; uint64_t tx_dropped_pkt = 0; priv = if_getsoftc(ifp); gve_accum_stats(priv, &rpackets, &rbytes, &rx_dropped_pkt, &tpackets, &tbytes, &tx_dropped_pkt); switch (cnt) { case IFCOUNTER_IPACKETS: return (rpackets); case IFCOUNTER_OPACKETS: return (tpackets); case IFCOUNTER_IBYTES: return (rbytes); case IFCOUNTER_OBYTES: return (tbytes); case IFCOUNTER_IQDROPS: return (rx_dropped_pkt); case IFCOUNTER_OQDROPS: return (tx_dropped_pkt); default: return (if_get_counter_default(ifp, cnt)); } } static void gve_setup_ifnet(device_t dev, struct gve_priv *priv) { int caps = 0; if_t ifp; ifp = priv->ifp = if_alloc(IFT_ETHER); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setsoftc(ifp, priv); if_setdev(ifp, dev); if_setinitfn(ifp, gve_init); if_setioctlfn(ifp, gve_ioctl); if_settransmitfn(ifp, gve_xmit_ifp); if_setqflushfn(ifp, gve_qflush); #if __FreeBSD_version >= 1400086 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); #else if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_KNOWSEPOCH); #endif ifmedia_init(&priv->media, IFM_IMASK, gve_media_change, gve_media_status); if_setgetcounterfn(ifp, gve_get_counter); caps = IFCAP_RXCSUM | IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_TSO | IFCAP_LRO; if ((priv->supported_features & GVE_SUP_JUMBO_FRAMES_MASK) != 0) caps |= IFCAP_JUMBO_MTU; if_setcapabilities(ifp, caps); if_setcapenable(ifp, caps); if (bootverbose) device_printf(priv->dev, "Setting initial MTU to %d\n", priv->max_mtu); if_setmtu(ifp, priv->max_mtu); ether_ifattach(ifp, priv->mac); ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO); } static int gve_alloc_counter_array(struct gve_priv *priv) { int err; err = gve_dma_alloc_coherent(priv, sizeof(uint32_t) * priv->num_event_counters, PAGE_SIZE, &priv->counter_array_mem); if (err != 0) return (err); priv->counters = priv->counter_array_mem.cpu_addr; return (0); } static void gve_free_counter_array(struct gve_priv *priv) { if (priv->counters != NULL) gve_dma_free_coherent(&priv->counter_array_mem); priv->counter_array_mem = (struct gve_dma_handle){}; } static int gve_alloc_irq_db_array(struct gve_priv *priv) { int err; err = gve_dma_alloc_coherent(priv, sizeof(struct gve_irq_db) * (priv->num_queues), PAGE_SIZE, &priv->irqs_db_mem); if (err != 0) return (err); priv->irq_db_indices = priv->irqs_db_mem.cpu_addr; return (0); } static void gve_free_irq_db_array(struct gve_priv *priv) { if (priv->irq_db_indices != NULL) gve_dma_free_coherent(&priv->irqs_db_mem); priv->irqs_db_mem = (struct gve_dma_handle){}; } static void gve_free_rings(struct gve_priv *priv) { gve_free_irqs(priv); gve_free_tx_rings(priv); gve_free_rx_rings(priv); gve_free_qpls(priv); } static int gve_alloc_rings(struct gve_priv *priv) { int err; err = gve_alloc_qpls(priv); if (err != 0) goto abort; err = gve_alloc_rx_rings(priv); if (err != 0) goto abort; err = gve_alloc_tx_rings(priv); if (err != 0) goto abort; err = gve_alloc_irqs(priv); if (err != 0) goto abort; return (0); abort: gve_free_rings(priv); return (err); } static void gve_deconfigure_resources(struct gve_priv *priv) { int err; if (gve_get_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK)) { err = gve_adminq_deconfigure_device_resources(priv); if (err != 0) { device_printf(priv->dev, "Failed to deconfigure device resources: err=%d\n", err); return; } if (bootverbose) device_printf(priv->dev, "Deconfigured device resources\n"); gve_clear_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK); } gve_free_irq_db_array(priv); gve_free_counter_array(priv); } static int gve_configure_resources(struct gve_priv *priv) { int err; if (gve_get_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK)) return (0); err = gve_alloc_counter_array(priv); if (err != 0) return (err); err = gve_alloc_irq_db_array(priv); if (err != 0) goto abort; err = gve_adminq_configure_device_resources(priv); if (err != 0) { device_printf(priv->dev, "Failed to configure device resources: err=%d\n", err); err = (ENXIO); goto abort; } gve_set_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK); if (bootverbose) device_printf(priv->dev, "Configured device resources\n"); return (0); abort: gve_deconfigure_resources(priv); return (err); } static void gve_set_queue_cnts(struct gve_priv *priv) { priv->tx_cfg.max_queues = gve_reg_bar_read_4(priv, MAX_TX_QUEUES); priv->rx_cfg.max_queues = gve_reg_bar_read_4(priv, MAX_RX_QUEUES); priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; if (priv->default_num_queues > 0) { priv->tx_cfg.num_queues = MIN(priv->default_num_queues, priv->tx_cfg.num_queues); priv->rx_cfg.num_queues = MIN(priv->default_num_queues, priv->rx_cfg.num_queues); } priv->num_queues = priv->tx_cfg.num_queues + priv->rx_cfg.num_queues; priv->mgmt_msix_idx = priv->num_queues; } static int gve_alloc_adminq_and_describe_device(struct gve_priv *priv) { int err; if ((err = gve_adminq_alloc(priv)) != 0) return (err); if ((err = gve_verify_driver_compatibility(priv)) != 0) { device_printf(priv->dev, "Failed to verify driver compatibility: err=%d\n", err); goto abort; } if ((err = gve_adminq_describe_device(priv)) != 0) goto abort; gve_set_queue_cnts(priv); priv->num_registered_pages = 0; return (0); abort: gve_release_adminq(priv); return (err); } void gve_schedule_reset(struct gve_priv *priv) { if (gve_get_state_flag(priv, GVE_STATE_FLAG_IN_RESET)) return; device_printf(priv->dev, "Scheduling reset task!\n"); gve_set_state_flag(priv, GVE_STATE_FLAG_DO_RESET); taskqueue_enqueue(priv->service_tq, &priv->service_task); } static void gve_destroy(struct gve_priv *priv) { gve_down(priv); gve_deconfigure_resources(priv); gve_release_adminq(priv); } static void gve_restore(struct gve_priv *priv) { int err; err = gve_adminq_alloc(priv); if (err != 0) goto abort; err = gve_configure_resources(priv); if (err != 0) goto abort; err = gve_up(priv); if (err != 0) goto abort; return; abort: device_printf(priv->dev, "Restore failed!\n"); return; } static void gve_handle_reset(struct gve_priv *priv) { if (!gve_get_state_flag(priv, GVE_STATE_FLAG_DO_RESET)) return; gve_clear_state_flag(priv, GVE_STATE_FLAG_DO_RESET); gve_set_state_flag(priv, GVE_STATE_FLAG_IN_RESET); GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); if_setdrvflagbits(priv->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); if_link_state_change(priv->ifp, LINK_STATE_DOWN); gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP); /* * Releasing the adminq causes the NIC to destroy all resources * registered with it, so by clearing the flags beneath we cause * the subsequent gve_down call below to not attempt to tell the * NIC to destroy these resources again. * * The call to gve_down is needed in the first place to refresh * the state and the DMA-able memory within each driver ring. */ gve_release_adminq(priv); gve_clear_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK); gve_clear_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK); gve_clear_state_flag(priv, GVE_STATE_FLAG_RX_RINGS_OK); gve_clear_state_flag(priv, GVE_STATE_FLAG_TX_RINGS_OK); gve_down(priv); gve_restore(priv); GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); priv->reset_cnt++; gve_clear_state_flag(priv, GVE_STATE_FLAG_IN_RESET); } static void gve_handle_link_status(struct gve_priv *priv) { uint32_t status = gve_reg_bar_read_4(priv, DEVICE_STATUS); bool link_up = status & GVE_DEVICE_STATUS_LINK_STATUS; if (link_up == gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) return; if (link_up) { if (bootverbose) device_printf(priv->dev, "Device link is up.\n"); if_link_state_change(priv->ifp, LINK_STATE_UP); gve_set_state_flag(priv, GVE_STATE_FLAG_LINK_UP); } else { device_printf(priv->dev, "Device link is down.\n"); if_link_state_change(priv->ifp, LINK_STATE_DOWN); gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP); } } static void gve_service_task(void *arg, int pending) { struct gve_priv *priv = (struct gve_priv *)arg; uint32_t status = gve_reg_bar_read_4(priv, DEVICE_STATUS); if (((GVE_DEVICE_STATUS_RESET_MASK & status) != 0) && !gve_get_state_flag(priv, GVE_STATE_FLAG_IN_RESET)) { device_printf(priv->dev, "Device requested reset\n"); gve_set_state_flag(priv, GVE_STATE_FLAG_DO_RESET); } gve_handle_reset(priv); gve_handle_link_status(priv); } static int gve_probe(device_t dev) { uint16_t deviceid, vendorid; int i; vendorid = pci_get_vendor(dev); deviceid = pci_get_device(dev); for (i = 0; i < nitems(gve_devs); i++) { if (vendorid == gve_devs[i].vendor_id && deviceid == gve_devs[i].device_id) { device_set_desc(dev, gve_devs[i].name); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static void gve_free_sys_res_mem(struct gve_priv *priv) { if (priv->msix_table != NULL) bus_release_resource(priv->dev, SYS_RES_MEMORY, rman_get_rid(priv->msix_table), priv->msix_table); if (priv->db_bar != NULL) bus_release_resource(priv->dev, SYS_RES_MEMORY, rman_get_rid(priv->db_bar), priv->db_bar); if (priv->reg_bar != NULL) bus_release_resource(priv->dev, SYS_RES_MEMORY, rman_get_rid(priv->reg_bar), priv->reg_bar); } static int gve_attach(device_t dev) { struct gve_priv *priv; int rid; int err; priv = device_get_softc(dev); priv->dev = dev; GVE_IFACE_LOCK_INIT(priv->gve_iface_lock); pci_enable_busmaster(dev); rid = PCIR_BAR(GVE_REGISTER_BAR); priv->reg_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (priv->reg_bar == NULL) { device_printf(dev, "Failed to allocate BAR0\n"); err = ENXIO; goto abort; } rid = PCIR_BAR(GVE_DOORBELL_BAR); priv->db_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (priv->db_bar == NULL) { device_printf(dev, "Failed to allocate BAR2\n"); err = ENXIO; goto abort; } rid = pci_msix_table_bar(priv->dev); priv->msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (priv->msix_table == NULL) { device_printf(dev, "Failed to allocate msix table\n"); err = ENXIO; goto abort; } err = gve_alloc_adminq_and_describe_device(priv); if (err != 0) goto abort; err = gve_configure_resources(priv); if (err != 0) goto abort; err = gve_alloc_rings(priv); if (err != 0) goto abort; gve_setup_ifnet(dev, priv); priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK; bus_write_multi_1(priv->reg_bar, DRIVER_VERSION, GVE_DRIVER_VERSION, sizeof(GVE_DRIVER_VERSION) - 1); TASK_INIT(&priv->service_task, 0, gve_service_task, priv); priv->service_tq = taskqueue_create("gve service", M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &priv->service_tq); taskqueue_start_threads(&priv->service_tq, 1, PI_NET, "%s service tq", device_get_nameunit(priv->dev)); gve_setup_sysctl(priv); if (bootverbose) device_printf(priv->dev, "Successfully attached %s", GVE_DRIVER_VERSION); return (0); abort: gve_free_rings(priv); gve_deconfigure_resources(priv); gve_release_adminq(priv); gve_free_sys_res_mem(priv); GVE_IFACE_LOCK_DESTROY(priv->gve_iface_lock); return (err); } static int gve_detach(device_t dev) { struct gve_priv *priv = device_get_softc(dev); if_t ifp = priv->ifp; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); ether_ifdetach(ifp); GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); gve_destroy(priv); GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); gve_free_rings(priv); gve_free_sys_res_mem(priv); GVE_IFACE_LOCK_DESTROY(priv->gve_iface_lock); while (taskqueue_cancel(priv->service_tq, &priv->service_task, NULL)) taskqueue_drain(priv->service_tq, &priv->service_task); taskqueue_free(priv->service_tq); if_free(ifp); - return (bus_generic_detach(dev)); + return (0); } static device_method_t gve_methods[] = { DEVMETHOD(device_probe, gve_probe), DEVMETHOD(device_attach, gve_attach), DEVMETHOD(device_detach, gve_detach), DEVMETHOD_END }; static driver_t gve_driver = { "gve", gve_methods, sizeof(struct gve_priv) }; #if __FreeBSD_version < 1301503 static devclass_t gve_devclass; DRIVER_MODULE(gve, pci, gve_driver, gve_devclass, 0, 0); #else DRIVER_MODULE(gve, pci, gve_driver, 0, 0); #endif MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, gve, gve_devs, nitems(gve_devs)); diff --git a/sys/dev/iicbus/pmic/act8846.c b/sys/dev/iicbus/pmic/act8846.c index 5e166247f79b..e55fc70986a8 100644 --- a/sys/dev/iicbus/pmic/act8846.c +++ b/sys/dev/iicbus/pmic/act8846.c @@ -1,255 +1,260 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Michal Meloun * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * ACT8846 PMIC driver */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "regdev_if.h" static struct ofw_compat_data compat_data[] = { {"active-semi,act8846", 1}, {NULL, 0} }; #define LOCK(_sc) sx_xlock(&(_sc)->lock) #define UNLOCK(_sc) sx_xunlock(&(_sc)->lock) #define LOCK_INIT(_sc) sx_init(&(_sc)->lock, "act8846") #define LOCK_DESTROY(_sc) sx_destroy(&(_sc)->lock); #define ASSERT_LOCKED(_sc) sx_assert(&(_sc)->lock, SA_XLOCKED); #define ASSERT_UNLOCKED(_sc) sx_assert(&(_sc)->lock, SA_UNLOCKED); /* * Raw register access function. */ int act8846_read(struct act8846_softc *sc, uint8_t reg, uint8_t *val) { uint8_t addr; int rv; struct iic_msg msgs[2] = { {0, IIC_M_WR, 1, &addr}, {0, IIC_M_RD, 1, val}, }; msgs[0].slave = sc->bus_addr; msgs[1].slave = sc->bus_addr; addr = reg; rv = iicbus_transfer_excl(sc->dev, msgs, 2, IIC_INTRWAIT); if (rv != 0) { device_printf(sc->dev, "Error when reading reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } int act8846_read_buf(struct act8846_softc *sc, uint8_t reg, uint8_t *buf, size_t size) { uint8_t addr; int rv; struct iic_msg msgs[2] = { {0, IIC_M_WR, 1, &addr}, {0, IIC_M_RD, size, buf}, }; msgs[0].slave = sc->bus_addr; msgs[1].slave = sc->bus_addr; addr = reg; rv = iicbus_transfer_excl(sc->dev, msgs, 2, IIC_INTRWAIT); if (rv != 0) { device_printf(sc->dev, "Error when reading reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } int act8846_write(struct act8846_softc *sc, uint8_t reg, uint8_t val) { uint8_t data[2]; int rv; struct iic_msg msgs[1] = { {0, IIC_M_WR, 2, data}, }; msgs[0].slave = sc->bus_addr; data[0] = reg; data[1] = val; rv = iicbus_transfer_excl(sc->dev, msgs, 1, IIC_INTRWAIT); if (rv != 0) { device_printf(sc->dev, "Error when writing reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } int act8846_write_buf(struct act8846_softc *sc, uint8_t reg, uint8_t *buf, size_t size) { uint8_t data[1]; int rv; struct iic_msg msgs[2] = { {0, IIC_M_WR, 1, data}, {0, IIC_M_WR | IIC_M_NOSTART, size, buf}, }; msgs[0].slave = sc->bus_addr; msgs[1].slave = sc->bus_addr; data[0] = reg; rv = iicbus_transfer_excl(sc->dev, msgs, 2, IIC_INTRWAIT); if (rv != 0) { device_printf(sc->dev, "Error when writing reg 0x%02X, rv: %d\n", reg, rv); return (EIO); } return (0); } int act8846_modify(struct act8846_softc *sc, uint8_t reg, uint8_t clear, uint8_t set) { uint8_t val; int rv; rv = act8846_read(sc, reg, &val); if (rv != 0) return (rv); val &= ~clear; val |= set; rv = act8846_write(sc, reg, val); if (rv != 0) return (rv); return (0); } static int act8846_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "ACT8846 PMIC"); return (BUS_PROBE_DEFAULT); } static int act8846_attach(device_t dev) { struct act8846_softc *sc; int rv; phandle_t node; sc = device_get_softc(dev); sc->dev = dev; sc->bus_addr = iicbus_get_addr(dev); node = ofw_bus_get_node(sc->dev); rv = 0; LOCK_INIT(sc); rv = act8846_regulator_attach(sc, node); if (rv != 0) goto fail; return (bus_generic_attach(dev)); fail: LOCK_DESTROY(sc); return (rv); } static int act8846_detach(device_t dev) { struct act8846_softc *sc; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); sc = device_get_softc(dev); LOCK_DESTROY(sc); - return (bus_generic_detach(dev)); + return (0); } static device_method_t act8846_methods[] = { /* Device interface */ DEVMETHOD(device_probe, act8846_probe), DEVMETHOD(device_attach, act8846_attach), DEVMETHOD(device_detach, act8846_detach), /* Regdev interface */ DEVMETHOD(regdev_map, act8846_regulator_map), DEVMETHOD_END }; static DEFINE_CLASS_0(act8846_pmu, act8846_driver, act8846_methods, sizeof(struct act8846_softc)); EARLY_DRIVER_MODULE(act8846_pmic, iicbus, act8846_driver, NULL, NULL, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LAST); MODULE_VERSION(act8846_pmic, 1); MODULE_DEPEND(act8846_pmic, iicbus, IICBUS_MINVER, IICBUS_PREFVER, IICBUS_MAXVER); IICBUS_FDT_PNP_INFO(compat_data); diff --git a/sys/dev/mana/gdma_main.c b/sys/dev/mana/gdma_main.c index 59ac6911c784..b339badad925 100644 --- a/sys/dev/mana/gdma_main.c +++ b/sys/dev/mana/gdma_main.c @@ -1,1919 +1,1924 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Microsoft Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "gdma_util.h" #include "mana.h" static mana_vendor_id_t mana_id_table[] = { { PCI_VENDOR_ID_MICROSOFT, PCI_DEV_ID_MANA_VF}, /* Last entry */ { 0, 0} }; static inline uint32_t mana_gd_r32(struct gdma_context *g, uint64_t offset) { uint32_t v = bus_space_read_4(g->gd_bus.bar0_t, g->gd_bus.bar0_h, offset); rmb(); return (v); } #if defined(__amd64__) static inline uint64_t mana_gd_r64(struct gdma_context *g, uint64_t offset) { uint64_t v = bus_space_read_8(g->gd_bus.bar0_t, g->gd_bus.bar0_h, offset); rmb(); return (v); } #else static inline uint64_t mana_gd_r64(struct gdma_context *g, uint64_t offset) { uint64_t v; uint32_t *vp = (uint32_t *)&v; *vp = mana_gd_r32(g, offset); *(vp + 1) = mana_gd_r32(g, offset + 4); rmb(); return (v); } #endif static int mana_gd_query_max_resources(device_t dev) { struct gdma_context *gc = device_get_softc(dev); struct gdma_query_max_resources_resp resp = {}; struct gdma_general_req req = {}; int err; mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES, sizeof(req), sizeof(resp)); err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { device_printf(gc->dev, "Failed to query resource info: %d, 0x%x\n", err, resp.hdr.status); return err ? err : EPROTO; } mana_dbg(NULL, "max_msix %u, max_eq %u, max_cq %u, " "max_sq %u, max_rq %u\n", resp.max_msix, resp.max_eq, resp.max_cq, resp.max_sq, resp.max_rq); if (gc->num_msix_usable > resp.max_msix) gc->num_msix_usable = resp.max_msix; if (gc->num_msix_usable <= 1) return ENOSPC; gc->max_num_queues = mp_ncpus; if (gc->max_num_queues > MANA_MAX_NUM_QUEUES) gc->max_num_queues = MANA_MAX_NUM_QUEUES; if (gc->max_num_queues > resp.max_eq) gc->max_num_queues = resp.max_eq; if (gc->max_num_queues > resp.max_cq) gc->max_num_queues = resp.max_cq; if (gc->max_num_queues > resp.max_sq) gc->max_num_queues = resp.max_sq; if (gc->max_num_queues > resp.max_rq) gc->max_num_queues = resp.max_rq; return 0; } static int mana_gd_detect_devices(device_t dev) { struct gdma_context *gc = device_get_softc(dev); struct gdma_list_devices_resp resp = {}; struct gdma_general_req req = {}; struct gdma_dev_id gd_dev; uint32_t i, max_num_devs; uint16_t dev_type; int err; mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req), sizeof(resp)); err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { device_printf(gc->dev, "Failed to detect devices: %d, 0x%x\n", err, resp.hdr.status); return err ? err : EPROTO; } max_num_devs = min_t(uint32_t, MAX_NUM_GDMA_DEVICES, resp.num_of_devs); for (i = 0; i < max_num_devs; i++) { gd_dev = resp.devs[i]; dev_type = gd_dev.type; mana_dbg(NULL, "gdma dev %d, type %u\n", i, dev_type); /* HWC is already detected in mana_hwc_create_channel(). */ if (dev_type == GDMA_DEVICE_HWC) continue; if (dev_type == GDMA_DEVICE_MANA) { gc->mana.gdma_context = gc; gc->mana.dev_id = gd_dev; } } return gc->mana.dev_id.type == 0 ? ENODEV : 0; } int mana_gd_send_request(struct gdma_context *gc, uint32_t req_len, const void *req, uint32_t resp_len, void *resp) { struct hw_channel_context *hwc = gc->hwc.driver_data; return mana_hwc_send_request(hwc, req_len, req, resp_len, resp); } void mana_gd_dma_map_paddr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = arg; if (error) return; KASSERT(nseg == 1, ("too many segments %d!", nseg)); *paddr = segs->ds_addr; } int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, struct gdma_mem_info *gmi) { bus_addr_t dma_handle; void *buf; int err; if (!gc || !gmi) return EINVAL; if (length < PAGE_SIZE || !powerof2(length)) return EINVAL; err = bus_dma_tag_create(bus_get_dma_tag(gc->dev), /* parent */ PAGE_SIZE, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ length, /* maxsize */ 1, /* nsegments */ length, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg*/ &gmi->dma_tag); if (err) { device_printf(gc->dev, "failed to create dma tag, err: %d\n", err); return (err); } /* * Must have BUS_DMA_ZERO flag to clear the dma memory. * Otherwise the queue overflow detection mechanism does * not work. */ err = bus_dmamem_alloc(gmi->dma_tag, &buf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &gmi->dma_map); if (err) { device_printf(gc->dev, "failed to alloc dma mem, err: %d\n", err); bus_dma_tag_destroy(gmi->dma_tag); return (err); } err = bus_dmamap_load(gmi->dma_tag, gmi->dma_map, buf, length, mana_gd_dma_map_paddr, &dma_handle, BUS_DMA_NOWAIT); if (err) { device_printf(gc->dev, "failed to load dma mem, err: %d\n", err); bus_dmamem_free(gmi->dma_tag, buf, gmi->dma_map); bus_dma_tag_destroy(gmi->dma_tag); return (err); } gmi->dev = gc->dev; gmi->dma_handle = dma_handle; gmi->virt_addr = buf; gmi->length = length; return 0; } void mana_gd_free_memory(struct gdma_mem_info *gmi) { bus_dmamap_unload(gmi->dma_tag, gmi->dma_map); bus_dmamem_free(gmi->dma_tag, gmi->virt_addr, gmi->dma_map); bus_dma_tag_destroy(gmi->dma_tag); } int mana_gd_destroy_doorbell_page(struct gdma_context *gc, int doorbell_page) { struct gdma_destroy_resource_range_req req = {}; struct gdma_resp_hdr resp = {}; int err; mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_RESOURCE_RANGE, sizeof(req), sizeof(resp)); req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE; req.num_resources = 1; req.allocated_resources = doorbell_page; err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.status) { device_printf(gc->dev, "Failed to destroy doorbell page: ret %d, 0x%x\n", err, resp.status); return err ? err : EPROTO; } return 0; } int mana_gd_allocate_doorbell_page(struct gdma_context *gc, int *doorbell_page) { struct gdma_allocate_resource_range_req req = {}; struct gdma_allocate_resource_range_resp resp = {}; int err; mana_gd_init_req_hdr(&req.hdr, GDMA_ALLOCATE_RESOURCE_RANGE, sizeof(req), sizeof(resp)); req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE; req.num_resources = 1; req.alignment = 1; /* Have GDMA start searching from 0 */ req.allocated_resources = 0; err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { device_printf(gc->dev, "Failed to allocate doorbell page: ret %d, 0x%x\n", err, resp.hdr.status); return err ? err : EPROTO; } *doorbell_page = resp.allocated_resources; return 0; } static int mana_gd_create_hw_eq(struct gdma_context *gc, struct gdma_queue *queue) { struct gdma_create_queue_resp resp = {}; struct gdma_create_queue_req req = {}; int err; if (queue->type != GDMA_EQ) return EINVAL; mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_QUEUE, sizeof(req), sizeof(resp)); req.hdr.dev_id = queue->gdma_dev->dev_id; req.type = queue->type; req.pdid = queue->gdma_dev->pdid; req.doolbell_id = queue->gdma_dev->doorbell; req.gdma_region = queue->mem_info.dma_region_handle; req.queue_size = queue->queue_size; req.log2_throttle_limit = queue->eq.log2_throttle_limit; req.eq_pci_msix_index = queue->eq.msix_index; err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { device_printf(gc->dev, "Failed to create queue: %d, 0x%x\n", err, resp.hdr.status); return err ? err : EPROTO; } queue->id = resp.queue_index; queue->eq.disable_needed = true; queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; return 0; } static int mana_gd_disable_queue(struct gdma_queue *queue) { struct gdma_context *gc = queue->gdma_dev->gdma_context; struct gdma_disable_queue_req req = {}; struct gdma_general_resp resp = {}; int err; if (queue->type != GDMA_EQ) mana_warn(NULL, "Not event queue type 0x%x\n", queue->type); mana_gd_init_req_hdr(&req.hdr, GDMA_DISABLE_QUEUE, sizeof(req), sizeof(resp)); req.hdr.dev_id = queue->gdma_dev->dev_id; req.type = queue->type; req.queue_index = queue->id; req.alloc_res_id_on_creation = 1; err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { device_printf(gc->dev, "Failed to disable queue: %d, 0x%x\n", err, resp.hdr.status); return err ? err : EPROTO; } return 0; } #define DOORBELL_OFFSET_SQ 0x0 #define DOORBELL_OFFSET_RQ 0x400 #define DOORBELL_OFFSET_CQ 0x800 #define DOORBELL_OFFSET_EQ 0xFF8 static void mana_gd_ring_doorbell(struct gdma_context *gc, uint32_t db_index, enum gdma_queue_type q_type, uint32_t qid, uint32_t tail_ptr, uint8_t num_req) { union gdma_doorbell_entry e = {}; void __iomem *addr; addr = (char *)gc->db_page_base + gc->db_page_size * db_index; switch (q_type) { case GDMA_EQ: e.eq.id = qid; e.eq.tail_ptr = tail_ptr; e.eq.arm = num_req; addr = (char *)addr + DOORBELL_OFFSET_EQ; break; case GDMA_CQ: e.cq.id = qid; e.cq.tail_ptr = tail_ptr; e.cq.arm = num_req; addr = (char *)addr + DOORBELL_OFFSET_CQ; break; case GDMA_RQ: e.rq.id = qid; e.rq.tail_ptr = tail_ptr; e.rq.wqe_cnt = num_req; addr = (char *)addr + DOORBELL_OFFSET_RQ; break; case GDMA_SQ: e.sq.id = qid; e.sq.tail_ptr = tail_ptr; addr = (char *)addr + DOORBELL_OFFSET_SQ; break; default: mana_warn(NULL, "Invalid queue type 0x%x\n", q_type); return; } /* Ensure all writes are done before ring doorbell */ wmb(); #if defined(__amd64__) writeq(addr, e.as_uint64); #else uint32_t *p = (uint32_t *)&e.as_uint64; writel(addr, *p); writel((char *)addr + 4, *(p + 1)); #endif } void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue) { mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type, queue->id, queue->head * GDMA_WQE_BU_SIZE, 0); } void mana_gd_ring_cq(struct gdma_queue *cq, uint8_t arm_bit) { struct gdma_context *gc = cq->gdma_dev->gdma_context; uint32_t num_cqe = cq->queue_size / GDMA_CQE_SIZE; uint32_t head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS); mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id, head, arm_bit); } static void mana_gd_process_eqe(struct gdma_queue *eq) { uint32_t head = eq->head % (eq->queue_size / GDMA_EQE_SIZE); struct gdma_context *gc = eq->gdma_dev->gdma_context; struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr; union gdma_eqe_info eqe_info; enum gdma_eqe_type type; struct gdma_event event; struct gdma_queue *cq; struct gdma_eqe *eqe; uint32_t cq_id; eqe = &eq_eqe_ptr[head]; eqe_info.as_uint32 = eqe->eqe_info; type = eqe_info.type; switch (type) { case GDMA_EQE_COMPLETION: cq_id = eqe->details[0] & 0xFFFFFF; if (cq_id >= gc->max_num_cqs) { mana_warn(NULL, "failed: cq_id %u > max_num_cqs %u\n", cq_id, gc->max_num_cqs); break; } cq = gc->cq_table[cq_id]; if (!cq || cq->type != GDMA_CQ || cq->id != cq_id) { mana_warn(NULL, "failed: invalid cq_id %u\n", cq_id); break; } if (cq->cq.callback) cq->cq.callback(cq->cq.context, cq); break; case GDMA_EQE_TEST_EVENT: gc->test_event_eq_id = eq->id; mana_dbg(NULL, "EQE TEST EVENT received for EQ %u\n", eq->id); complete(&gc->eq_test_event); break; case GDMA_EQE_HWC_INIT_EQ_ID_DB: case GDMA_EQE_HWC_INIT_DATA: case GDMA_EQE_HWC_INIT_DONE: if (!eq->eq.callback) break; event.type = type; memcpy(&event.details, &eqe->details, GDMA_EVENT_DATA_SIZE); eq->eq.callback(eq->eq.context, eq, &event); break; default: break; } } static void mana_gd_process_eq_events(void *arg) { uint32_t owner_bits, new_bits, old_bits; union gdma_eqe_info eqe_info; struct gdma_eqe *eq_eqe_ptr; struct gdma_queue *eq = arg; struct gdma_context *gc; uint32_t head, num_eqe; struct gdma_eqe *eqe; int i, j; gc = eq->gdma_dev->gdma_context; num_eqe = eq->queue_size / GDMA_EQE_SIZE; eq_eqe_ptr = eq->queue_mem_ptr; bus_dmamap_sync(eq->mem_info.dma_tag, eq->mem_info.dma_map, BUS_DMASYNC_POSTREAD); /* Process up to 5 EQEs at a time, and update the HW head. */ for (i = 0; i < 5; i++) { eqe = &eq_eqe_ptr[eq->head % num_eqe]; eqe_info.as_uint32 = eqe->eqe_info; owner_bits = eqe_info.owner_bits; old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK; /* No more entries */ if (owner_bits == old_bits) break; new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK; if (owner_bits != new_bits) { /* Something wrong. Log for debugging purpose */ device_printf(gc->dev, "EQ %d: overflow detected, " "i = %d, eq->head = %u " "got owner_bits = %u, new_bits = %u " "eqe addr %p, eqe->eqe_info 0x%x, " "eqe type = %x, reserved1 = %x, client_id = %x, " "reserved2 = %x, owner_bits = %x\n", eq->id, i, eq->head, owner_bits, new_bits, eqe, eqe->eqe_info, eqe_info.type, eqe_info.reserved1, eqe_info.client_id, eqe_info.reserved2, eqe_info.owner_bits); uint32_t *eqe_dump = (uint32_t *) eq_eqe_ptr; for (j = 0; j < 20; j++) { device_printf(gc->dev, "%p: %x\t%x\t%x\t%x\n", &eqe_dump[j * 4], eqe_dump[j * 4], eqe_dump[j * 4 + 1], eqe_dump[j * 4 + 2], eqe_dump[j * 4 + 3]); } break; } rmb(); mana_gd_process_eqe(eq); eq->head++; } bus_dmamap_sync(eq->mem_info.dma_tag, eq->mem_info.dma_map, BUS_DMASYNC_PREREAD); head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS); mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id, head, SET_ARM_BIT); } static int mana_gd_register_irq(struct gdma_queue *queue, const struct gdma_queue_spec *spec) { struct gdma_dev *gd = queue->gdma_dev; struct gdma_irq_context *gic; struct gdma_context *gc; struct gdma_resource *r; unsigned int msi_index; int err; gc = gd->gdma_context; r = &gc->msix_resource; mtx_lock_spin(&r->lock_spin); msi_index = find_first_zero_bit(r->map, r->size); if (msi_index >= r->size) { err = ENOSPC; } else { bitmap_set(r->map, msi_index, 1); queue->eq.msix_index = msi_index; err = 0; } mtx_unlock_spin(&r->lock_spin); if (err) return err; if (unlikely(msi_index >= gc->num_msix_usable)) { device_printf(gc->dev, "chose an invalid msix index %d, usable %d\n", msi_index, gc->num_msix_usable); return ENOSPC; } gic = &gc->irq_contexts[msi_index]; if (unlikely(gic->handler || gic->arg)) { device_printf(gc->dev, "interrupt handler or arg already assigned, " "msix index: %d\n", msi_index); } gic->arg = queue; gic->handler = mana_gd_process_eq_events; mana_dbg(NULL, "registered msix index %d vector %d irq %ju\n", msi_index, gic->msix_e.vector, rman_get_start(gic->res)); return 0; } static void mana_gd_deregiser_irq(struct gdma_queue *queue) { struct gdma_dev *gd = queue->gdma_dev; struct gdma_irq_context *gic; struct gdma_context *gc; struct gdma_resource *r; unsigned int msix_index; gc = gd->gdma_context; r = &gc->msix_resource; /* At most num_online_cpus() + 1 interrupts are used. */ msix_index = queue->eq.msix_index; if (unlikely(msix_index >= gc->num_msix_usable)) return; gic = &gc->irq_contexts[msix_index]; gic->handler = NULL; gic->arg = NULL; mtx_lock_spin(&r->lock_spin); bitmap_clear(r->map, msix_index, 1); mtx_unlock_spin(&r->lock_spin); queue->eq.msix_index = INVALID_PCI_MSIX_INDEX; mana_dbg(NULL, "deregistered msix index %d vector %d irq %ju\n", msix_index, gic->msix_e.vector, rman_get_start(gic->res)); } int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq) { struct gdma_generate_test_event_req req = {}; struct gdma_general_resp resp = {}; device_t dev = gc->dev; int err; sx_xlock(&gc->eq_test_event_sx); init_completion(&gc->eq_test_event); gc->test_event_eq_id = INVALID_QUEUE_ID; mana_gd_init_req_hdr(&req.hdr, GDMA_GENERATE_TEST_EQE, sizeof(req), sizeof(resp)); req.hdr.dev_id = eq->gdma_dev->dev_id; req.queue_index = eq->id; err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err) { device_printf(dev, "test_eq failed: %d\n", err); goto out; } err = EPROTO; if (resp.hdr.status) { device_printf(dev, "test_eq failed: 0x%x\n", resp.hdr.status); goto out; } if (wait_for_completion_timeout(&gc->eq_test_event, 30 * hz)) { device_printf(dev, "test_eq timed out on queue %d\n", eq->id); goto out; } if (eq->id != gc->test_event_eq_id) { device_printf(dev, "test_eq got an event on wrong queue %d (%d)\n", gc->test_event_eq_id, eq->id); goto out; } err = 0; out: sx_xunlock(&gc->eq_test_event_sx); return err; } static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets, struct gdma_queue *queue) { int err; if (flush_evenets) { err = mana_gd_test_eq(gc, queue); if (err) device_printf(gc->dev, "Failed to flush EQ: %d\n", err); } mana_gd_deregiser_irq(queue); if (queue->eq.disable_needed) mana_gd_disable_queue(queue); } static int mana_gd_create_eq(struct gdma_dev *gd, const struct gdma_queue_spec *spec, bool create_hwq, struct gdma_queue *queue) { struct gdma_context *gc = gd->gdma_context; device_t dev = gc->dev; uint32_t log2_num_entries; int err; queue->eq.msix_index = INVALID_PCI_MSIX_INDEX; log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE); if (spec->eq.log2_throttle_limit > log2_num_entries) { device_printf(dev, "EQ throttling limit (%lu) > maximum EQE (%u)\n", spec->eq.log2_throttle_limit, log2_num_entries); return EINVAL; } err = mana_gd_register_irq(queue, spec); if (err) { device_printf(dev, "Failed to register irq: %d\n", err); return err; } queue->eq.callback = spec->eq.callback; queue->eq.context = spec->eq.context; queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries); queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1; if (create_hwq) { err = mana_gd_create_hw_eq(gc, queue); if (err) goto out; err = mana_gd_test_eq(gc, queue); if (err) goto out; } return 0; out: device_printf(dev, "Failed to create EQ: %d\n", err); mana_gd_destroy_eq(gc, false, queue); return err; } static void mana_gd_create_cq(const struct gdma_queue_spec *spec, struct gdma_queue *queue) { uint32_t log2_num_entries = ilog2(spec->queue_size / GDMA_CQE_SIZE); queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries); queue->cq.parent = spec->cq.parent_eq; queue->cq.context = spec->cq.context; queue->cq.callback = spec->cq.callback; } static void mana_gd_destroy_cq(struct gdma_context *gc, struct gdma_queue *queue) { uint32_t id = queue->id; if (id >= gc->max_num_cqs) return; if (!gc->cq_table[id]) return; gc->cq_table[id] = NULL; } int mana_gd_create_hwc_queue(struct gdma_dev *gd, const struct gdma_queue_spec *spec, struct gdma_queue **queue_ptr) { struct gdma_context *gc = gd->gdma_context; struct gdma_mem_info *gmi; struct gdma_queue *queue; int err; queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO); gmi = &queue->mem_info; err = mana_gd_alloc_memory(gc, spec->queue_size, gmi); if (err) goto free_q; queue->head = 0; queue->tail = 0; queue->queue_mem_ptr = gmi->virt_addr; queue->queue_size = spec->queue_size; queue->monitor_avl_buf = spec->monitor_avl_buf; queue->type = spec->type; queue->gdma_dev = gd; if (spec->type == GDMA_EQ) err = mana_gd_create_eq(gd, spec, false, queue); else if (spec->type == GDMA_CQ) mana_gd_create_cq(spec, queue); if (err) goto out; *queue_ptr = queue; return 0; out: mana_gd_free_memory(gmi); free_q: free(queue, M_DEVBUF); return err; } int mana_gd_destroy_dma_region(struct gdma_context *gc, gdma_obj_handle_t dma_region_handle) { struct gdma_destroy_dma_region_req req = {}; struct gdma_general_resp resp = {}; int err; if (dma_region_handle == GDMA_INVALID_DMA_REGION) return 0; mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req), sizeof(resp)); req.dma_region_handle = dma_region_handle; err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { device_printf(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n", err, resp.hdr.status); return EPROTO; } return 0; } static int mana_gd_create_dma_region(struct gdma_dev *gd, struct gdma_mem_info *gmi) { unsigned int num_page = gmi->length / PAGE_SIZE; struct gdma_create_dma_region_req *req = NULL; struct gdma_create_dma_region_resp resp = {}; struct gdma_context *gc = gd->gdma_context; struct hw_channel_context *hwc; uint32_t length = gmi->length; uint32_t req_msg_size; int err; int i; if (length < PAGE_SIZE || !powerof2(length)) { mana_err(NULL, "gmi size incorrect: %u\n", length); return EINVAL; } if (offset_in_page((uintptr_t)gmi->virt_addr) != 0) { mana_err(NULL, "gmi not page aligned: %p\n", gmi->virt_addr); return EINVAL; } hwc = gc->hwc.driver_data; req_msg_size = sizeof(*req) + num_page * sizeof(uint64_t); if (req_msg_size > hwc->max_req_msg_size) { mana_err(NULL, "req msg size too large: %u, %u\n", req_msg_size, hwc->max_req_msg_size); return EINVAL; } req = malloc(req_msg_size, M_DEVBUF, M_WAITOK | M_ZERO); mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION, req_msg_size, sizeof(resp)); req->length = length; req->offset_in_page = 0; req->gdma_page_type = GDMA_PAGE_TYPE_4K; req->page_count = num_page; req->page_addr_list_len = num_page; for (i = 0; i < num_page; i++) req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE; err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp); if (err) goto out; if (resp.hdr.status || resp.dma_region_handle == GDMA_INVALID_DMA_REGION) { device_printf(gc->dev, "Failed to create DMA region: 0x%x\n", resp.hdr.status); err = EPROTO; goto out; } gmi->dma_region_handle = resp.dma_region_handle; out: free(req, M_DEVBUF); return err; } int mana_gd_create_mana_eq(struct gdma_dev *gd, const struct gdma_queue_spec *spec, struct gdma_queue **queue_ptr) { struct gdma_context *gc = gd->gdma_context; struct gdma_mem_info *gmi; struct gdma_queue *queue; int err; if (spec->type != GDMA_EQ) return EINVAL; queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO); gmi = &queue->mem_info; err = mana_gd_alloc_memory(gc, spec->queue_size, gmi); if (err) goto free_q; err = mana_gd_create_dma_region(gd, gmi); if (err) goto out; queue->head = 0; queue->tail = 0; queue->queue_mem_ptr = gmi->virt_addr; queue->queue_size = spec->queue_size; queue->monitor_avl_buf = spec->monitor_avl_buf; queue->type = spec->type; queue->gdma_dev = gd; err = mana_gd_create_eq(gd, spec, true, queue); if (err) goto out; *queue_ptr = queue; return 0; out: mana_gd_free_memory(gmi); free_q: free(queue, M_DEVBUF); return err; } int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, const struct gdma_queue_spec *spec, struct gdma_queue **queue_ptr) { struct gdma_context *gc = gd->gdma_context; struct gdma_mem_info *gmi; struct gdma_queue *queue; int err; if (spec->type != GDMA_CQ && spec->type != GDMA_SQ && spec->type != GDMA_RQ) return EINVAL; queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO); gmi = &queue->mem_info; err = mana_gd_alloc_memory(gc, spec->queue_size, gmi); if (err) goto free_q; err = mana_gd_create_dma_region(gd, gmi); if (err) goto out; queue->head = 0; queue->tail = 0; queue->queue_mem_ptr = gmi->virt_addr; queue->queue_size = spec->queue_size; queue->monitor_avl_buf = spec->monitor_avl_buf; queue->type = spec->type; queue->gdma_dev = gd; if (spec->type == GDMA_CQ) mana_gd_create_cq(spec, queue); *queue_ptr = queue; return 0; out: mana_gd_free_memory(gmi); free_q: free(queue, M_DEVBUF); return err; } void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue) { struct gdma_mem_info *gmi = &queue->mem_info; switch (queue->type) { case GDMA_EQ: mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue); break; case GDMA_CQ: mana_gd_destroy_cq(gc, queue); break; case GDMA_RQ: break; case GDMA_SQ: break; default: device_printf(gc->dev, "Can't destroy unknown queue: type = %d\n", queue->type); return; } mana_gd_destroy_dma_region(gc, gmi->dma_region_handle); mana_gd_free_memory(gmi); free(queue, M_DEVBUF); } #define OS_MAJOR_DIV 100000 #define OS_BUILD_MOD 1000 int mana_gd_verify_vf_version(device_t dev) { struct gdma_context *gc = device_get_softc(dev); struct gdma_verify_ver_resp resp = {}; struct gdma_verify_ver_req req = {}; int err; mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION, sizeof(req), sizeof(resp)); req.protocol_ver_min = GDMA_PROTOCOL_FIRST; req.protocol_ver_max = GDMA_PROTOCOL_LAST; req.drv_ver = 0; /* Unused */ req.os_type = 0x30; /* Other */ req.os_ver_major = osreldate / OS_MAJOR_DIV; req.os_ver_minor = (osreldate % OS_MAJOR_DIV) / OS_BUILD_MOD; req.os_ver_build = osreldate % OS_BUILD_MOD; strncpy(req.os_ver_str1, ostype, sizeof(req.os_ver_str1) - 1); strncpy(req.os_ver_str2, osrelease, sizeof(req.os_ver_str2) - 1); err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { device_printf(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n", err, resp.hdr.status); return err ? err : EPROTO; } return 0; } int mana_gd_register_device(struct gdma_dev *gd) { struct gdma_context *gc = gd->gdma_context; struct gdma_register_device_resp resp = {}; struct gdma_general_req req = {}; int err; gd->pdid = INVALID_PDID; gd->doorbell = INVALID_DOORBELL; gd->gpa_mkey = INVALID_MEM_KEY; mana_gd_init_req_hdr(&req.hdr, GDMA_REGISTER_DEVICE, sizeof(req), sizeof(resp)); req.hdr.dev_id = gd->dev_id; err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { device_printf(gc->dev, "gdma_register_device_resp failed: %d, 0x%x\n", err, resp.hdr.status); return err ? err : -EPROTO; } gd->pdid = resp.pdid; gd->gpa_mkey = resp.gpa_mkey; gd->doorbell = resp.db_id; mana_dbg(NULL, "mana device pdid %u, gpa_mkey %u, doorbell %u \n", gd->pdid, gd->gpa_mkey, gd->doorbell); return 0; } int mana_gd_deregister_device(struct gdma_dev *gd) { struct gdma_context *gc = gd->gdma_context; struct gdma_general_resp resp = {}; struct gdma_general_req req = {}; int err; if (gd->pdid == INVALID_PDID) return EINVAL; mana_gd_init_req_hdr(&req.hdr, GDMA_DEREGISTER_DEVICE, sizeof(req), sizeof(resp)); req.hdr.dev_id = gd->dev_id; err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { device_printf(gc->dev, "Failed to deregister device: %d, 0x%x\n", err, resp.hdr.status); if (!err) err = EPROTO; } gd->pdid = INVALID_PDID; gd->doorbell = INVALID_DOORBELL; gd->gpa_mkey = INVALID_MEM_KEY; return err; } uint32_t mana_gd_wq_avail_space(struct gdma_queue *wq) { uint32_t used_space = (wq->head - wq->tail) * GDMA_WQE_BU_SIZE; uint32_t wq_size = wq->queue_size; if (used_space > wq_size) { mana_warn(NULL, "failed: used space %u > queue size %u\n", used_space, wq_size); } return wq_size - used_space; } uint8_t * mana_gd_get_wqe_ptr(const struct gdma_queue *wq, uint32_t wqe_offset) { uint32_t offset = (wqe_offset * GDMA_WQE_BU_SIZE) & (wq->queue_size - 1); if ((offset + GDMA_WQE_BU_SIZE) > wq->queue_size) { mana_warn(NULL, "failed: write end out of queue bound %u, " "queue size %u\n", offset + GDMA_WQE_BU_SIZE, wq->queue_size); } return (uint8_t *)wq->queue_mem_ptr + offset; } static uint32_t mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req, enum gdma_queue_type q_type, uint32_t client_oob_size, uint32_t sgl_data_size, uint8_t *wqe_ptr) { bool oob_in_sgl = !!(wqe_req->flags & GDMA_WR_OOB_IN_SGL); bool pad_data = !!(wqe_req->flags & GDMA_WR_PAD_BY_SGE0); struct gdma_wqe *header = (struct gdma_wqe *)wqe_ptr; uint8_t *ptr; memset(header, 0, sizeof(struct gdma_wqe)); header->num_sge = wqe_req->num_sge; header->inline_oob_size_div4 = client_oob_size / sizeof(uint32_t); if (oob_in_sgl) { if (!pad_data || wqe_req->num_sge < 2) { mana_warn(NULL, "no pad_data or num_sge < 2\n"); } header->client_oob_in_sgl = 1; if (pad_data) header->last_vbytes = wqe_req->sgl[0].size; } if (q_type == GDMA_SQ) header->client_data_unit = wqe_req->client_data_unit; /* * The size of gdma_wqe + client_oob_size must be less than or equal * to one Basic Unit (i.e. 32 bytes), so the pointer can't go beyond * the queue memory buffer boundary. */ ptr = wqe_ptr + sizeof(header); if (wqe_req->inline_oob_data && wqe_req->inline_oob_size > 0) { memcpy(ptr, wqe_req->inline_oob_data, wqe_req->inline_oob_size); if (client_oob_size > wqe_req->inline_oob_size) memset(ptr + wqe_req->inline_oob_size, 0, client_oob_size - wqe_req->inline_oob_size); } return sizeof(header) + client_oob_size; } static void mana_gd_write_sgl(struct gdma_queue *wq, uint8_t *wqe_ptr, const struct gdma_wqe_request *wqe_req) { uint32_t sgl_size = sizeof(struct gdma_sge) * wqe_req->num_sge; const uint8_t *address = (uint8_t *)wqe_req->sgl; uint8_t *base_ptr, *end_ptr; uint32_t size_to_end; base_ptr = wq->queue_mem_ptr; end_ptr = base_ptr + wq->queue_size; size_to_end = (uint32_t)(end_ptr - wqe_ptr); if (size_to_end < sgl_size) { memcpy(wqe_ptr, address, size_to_end); wqe_ptr = base_ptr; address += size_to_end; sgl_size -= size_to_end; } memcpy(wqe_ptr, address, sgl_size); } int mana_gd_post_work_request(struct gdma_queue *wq, const struct gdma_wqe_request *wqe_req, struct gdma_posted_wqe_info *wqe_info) { uint32_t client_oob_size = wqe_req->inline_oob_size; struct gdma_context *gc; uint32_t sgl_data_size; uint32_t max_wqe_size; uint32_t wqe_size; uint8_t *wqe_ptr; if (wqe_req->num_sge == 0) return EINVAL; if (wq->type == GDMA_RQ) { if (client_oob_size != 0) return EINVAL; client_oob_size = INLINE_OOB_SMALL_SIZE; max_wqe_size = GDMA_MAX_RQE_SIZE; } else { if (client_oob_size != INLINE_OOB_SMALL_SIZE && client_oob_size != INLINE_OOB_LARGE_SIZE) return EINVAL; max_wqe_size = GDMA_MAX_SQE_SIZE; } sgl_data_size = sizeof(struct gdma_sge) * wqe_req->num_sge; wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size + sgl_data_size, GDMA_WQE_BU_SIZE); if (wqe_size > max_wqe_size) return EINVAL; if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) { gc = wq->gdma_dev->gdma_context; device_printf(gc->dev, "unsuccessful flow control!\n"); return ENOSPC; } if (wqe_info) wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE; wqe_ptr = mana_gd_get_wqe_ptr(wq, wq->head); wqe_ptr += mana_gd_write_client_oob(wqe_req, wq->type, client_oob_size, sgl_data_size, wqe_ptr); if (wqe_ptr >= (uint8_t *)wq->queue_mem_ptr + wq->queue_size) wqe_ptr -= wq->queue_size; mana_gd_write_sgl(wq, wqe_ptr, wqe_req); wq->head += wqe_size / GDMA_WQE_BU_SIZE; bus_dmamap_sync(wq->mem_info.dma_tag, wq->mem_info.dma_map, BUS_DMASYNC_PREWRITE); return 0; } int mana_gd_post_and_ring(struct gdma_queue *queue, const struct gdma_wqe_request *wqe_req, struct gdma_posted_wqe_info *wqe_info) { struct gdma_context *gc = queue->gdma_dev->gdma_context; int err; err = mana_gd_post_work_request(queue, wqe_req, wqe_info); if (err) return err; mana_gd_wq_ring_doorbell(gc, queue); return 0; } static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp) { unsigned int num_cqe = cq->queue_size / sizeof(struct gdma_cqe); struct gdma_cqe *cq_cqe = cq->queue_mem_ptr; uint32_t owner_bits, new_bits, old_bits; struct gdma_cqe *cqe; cqe = &cq_cqe[cq->head % num_cqe]; owner_bits = cqe->cqe_info.owner_bits; old_bits = (cq->head / num_cqe - 1) & GDMA_CQE_OWNER_MASK; /* Return 0 if no more entries. */ if (owner_bits == old_bits) return 0; new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK; /* Return -1 if overflow detected. */ if (owner_bits != new_bits) { mana_warn(NULL, "overflow detected! owner_bits %u != new_bits %u\n", owner_bits, new_bits); return -1; } rmb(); comp->wq_num = cqe->cqe_info.wq_num; comp->is_sq = cqe->cqe_info.is_sq; memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE); return 1; } int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe) { int cqe_idx; int ret; bus_dmamap_sync(cq->mem_info.dma_tag, cq->mem_info.dma_map, BUS_DMASYNC_POSTREAD); for (cqe_idx = 0; cqe_idx < num_cqe; cqe_idx++) { ret = mana_gd_read_cqe(cq, &comp[cqe_idx]); if (ret < 0) { cq->head -= cqe_idx; return ret; } if (ret == 0) break; cq->head++; } return cqe_idx; } static void mana_gd_intr(void *arg) { struct gdma_irq_context *gic = arg; if (gic->handler) { gic->handler(gic->arg); } } int mana_gd_alloc_res_map(uint32_t res_avail, struct gdma_resource *r, const char *lock_name) { int n = howmany(res_avail, BITS_PER_LONG); r->map = malloc(n * sizeof(unsigned long), M_DEVBUF, M_WAITOK | M_ZERO); r->size = res_avail; mtx_init(&r->lock_spin, lock_name, NULL, MTX_SPIN); mana_dbg(NULL, "total res %u, total number of unsigned longs %u\n", r->size, n); return (0); } void mana_gd_free_res_map(struct gdma_resource *r) { if (!r || !r->map) return; free(r->map, M_DEVBUF); r->map = NULL; r->size = 0; } static void mana_gd_init_registers(struct gdma_context *gc) { uintptr_t bar0_va = rman_get_bushandle(gc->bar0); vm_paddr_t bar0_pa = rman_get_start(gc->bar0); gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF; gc->db_page_base = (void *)(bar0_va + (size_t)mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET)); gc->phys_db_page_base = bar0_pa + mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET); gc->shm_base = (void *)(bar0_va + (size_t)mana_gd_r64(gc, GDMA_REG_SHM_OFFSET)); mana_dbg(NULL, "db_page_size 0x%xx, db_page_base %p," " shm_base %p\n", gc->db_page_size, gc->db_page_base, gc->shm_base); } static struct resource * mana_gd_alloc_bar(device_t dev, int bar) { struct resource *res = NULL; struct pci_map *pm; int rid, type; if (bar < 0 || bar > PCIR_MAX_BAR_0) goto alloc_bar_out; pm = pci_find_bar(dev, PCIR_BAR(bar)); if (!pm) goto alloc_bar_out; if (PCI_BAR_IO(pm->pm_value)) type = SYS_RES_IOPORT; else type = SYS_RES_MEMORY; if (type < 0) goto alloc_bar_out; rid = PCIR_BAR(bar); res = bus_alloc_resource_any(dev, type, &rid, RF_ACTIVE); #if defined(__amd64__) if (res) mana_dbg(NULL, "bar %d: rid 0x%x, type 0x%jx," " handle 0x%jx\n", bar, rid, res->r_bustag, res->r_bushandle); #endif alloc_bar_out: return (res); } static void mana_gd_free_pci_res(struct gdma_context *gc) { if (!gc || !gc->dev) return; if (gc->bar0 != NULL) { bus_release_resource(gc->dev, SYS_RES_MEMORY, PCIR_BAR(GDMA_BAR0), gc->bar0); } if (gc->msix != NULL) { bus_release_resource(gc->dev, SYS_RES_MEMORY, gc->msix_rid, gc->msix); } } static int mana_gd_setup_irqs(device_t dev) { unsigned int max_queues_per_port = mp_ncpus; struct gdma_context *gc = device_get_softc(dev); struct gdma_irq_context *gic; unsigned int max_irqs; int nvec; int rc, rcc, i; if (max_queues_per_port > MANA_MAX_NUM_QUEUES) max_queues_per_port = MANA_MAX_NUM_QUEUES; /* Need 1 interrupt for the Hardware communication Channel (HWC) */ max_irqs = max_queues_per_port + 1; nvec = max_irqs; rc = pci_alloc_msix(dev, &nvec); if (unlikely(rc != 0)) { device_printf(dev, "Failed to allocate MSIX, vectors %d, error: %d\n", nvec, rc); rc = ENOSPC; goto err_setup_irq_alloc; } if (nvec != max_irqs) { if (nvec == 1) { device_printf(dev, "Not enough number of MSI-x allocated: %d\n", nvec); rc = ENOSPC; goto err_setup_irq_release; } device_printf(dev, "Allocated only %d MSI-x (%d requested)\n", nvec, max_irqs); } gc->irq_contexts = malloc(nvec * sizeof(struct gdma_irq_context), M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < nvec; i++) { gic = &gc->irq_contexts[i]; gic->msix_e.entry = i; /* Vector starts from 1. */ gic->msix_e.vector = i + 1; gic->handler = NULL; gic->arg = NULL; gic->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &gic->msix_e.vector, RF_ACTIVE | RF_SHAREABLE); if (unlikely(gic->res == NULL)) { rc = ENOMEM; device_printf(dev, "could not allocate resource " "for irq vector %d\n", gic->msix_e.vector); goto err_setup_irq; } rc = bus_setup_intr(dev, gic->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, mana_gd_intr, gic, &gic->cookie); if (unlikely(rc != 0)) { device_printf(dev, "failed to register interrupt " "handler for irq %ju vector %d: error %d\n", rman_get_start(gic->res), gic->msix_e.vector, rc); goto err_setup_irq; } gic->requested = true; mana_dbg(NULL, "added msix vector %d irq %ju\n", gic->msix_e.vector, rman_get_start(gic->res)); } rc = mana_gd_alloc_res_map(nvec, &gc->msix_resource, "gdma msix res lock"); if (rc != 0) { device_printf(dev, "failed to allocate memory " "for msix bitmap\n"); goto err_setup_irq; } gc->max_num_msix = nvec; gc->num_msix_usable = nvec; mana_dbg(NULL, "setup %d msix interrupts\n", nvec); return (0); err_setup_irq: for (; i >= 0; i--) { gic = &gc->irq_contexts[i]; rcc = 0; /* * If gic->requested is true, we need to free both intr and * resources. */ if (gic->requested) rcc = bus_teardown_intr(dev, gic->res, gic->cookie); if (unlikely(rcc != 0)) device_printf(dev, "could not release " "irq vector %d, error: %d\n", gic->msix_e.vector, rcc); rcc = 0; if (gic->res != NULL) { rcc = bus_release_resource(dev, SYS_RES_IRQ, gic->msix_e.vector, gic->res); } if (unlikely(rcc != 0)) device_printf(dev, "dev has no parent while " "releasing resource for irq vector %d\n", gic->msix_e.vector); gic->requested = false; gic->res = NULL; } free(gc->irq_contexts, M_DEVBUF); gc->irq_contexts = NULL; err_setup_irq_release: pci_release_msi(dev); err_setup_irq_alloc: return (rc); } static void mana_gd_remove_irqs(device_t dev) { struct gdma_context *gc = device_get_softc(dev); struct gdma_irq_context *gic; int rc, i; mana_gd_free_res_map(&gc->msix_resource); for (i = 0; i < gc->max_num_msix; i++) { gic = &gc->irq_contexts[i]; if (gic->requested) { rc = bus_teardown_intr(dev, gic->res, gic->cookie); if (unlikely(rc != 0)) { device_printf(dev, "failed to tear down " "irq vector %d, error: %d\n", gic->msix_e.vector, rc); } gic->requested = false; } if (gic->res != NULL) { rc = bus_release_resource(dev, SYS_RES_IRQ, gic->msix_e.vector, gic->res); if (unlikely(rc != 0)) { device_printf(dev, "dev has no parent while " "releasing resource for irq vector %d\n", gic->msix_e.vector); } gic->res = NULL; } } gc->max_num_msix = 0; gc->num_msix_usable = 0; free(gc->irq_contexts, M_DEVBUF); gc->irq_contexts = NULL; pci_release_msi(dev); } static int mana_gd_probe(device_t dev) { mana_vendor_id_t *ent; uint16_t pci_vendor_id = 0; uint16_t pci_device_id = 0; pci_vendor_id = pci_get_vendor(dev); pci_device_id = pci_get_device(dev); ent = mana_id_table; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id)) { mana_dbg(NULL, "vendor=%x device=%x\n", pci_vendor_id, pci_device_id); device_set_desc(dev, DEVICE_DESC); return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } /** * mana_attach - Device Initialization Routine * @dev: device information struct * * Returns 0 on success, otherwise on failure. * * mana_attach initializes a GDMA adapter identified by a device structure. **/ static int mana_gd_attach(device_t dev) { struct gdma_context *gc; int msix_rid; int rc; gc = device_get_softc(dev); gc->dev = dev; pci_enable_io(dev, SYS_RES_IOPORT); pci_enable_io(dev, SYS_RES_MEMORY); pci_enable_busmaster(dev); gc->bar0 = mana_gd_alloc_bar(dev, GDMA_BAR0); if (unlikely(gc->bar0 == NULL)) { device_printf(dev, "unable to allocate bus resource for bar0!\n"); rc = ENOMEM; goto err_disable_dev; } /* Store bar0 tage and handle for quick access */ gc->gd_bus.bar0_t = rman_get_bustag(gc->bar0); gc->gd_bus.bar0_h = rman_get_bushandle(gc->bar0); /* Map MSI-x vector table */ msix_rid = pci_msix_table_bar(dev); mana_dbg(NULL, "msix_rid 0x%x\n", msix_rid); gc->msix = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &msix_rid, RF_ACTIVE); if (unlikely(gc->msix == NULL)) { device_printf(dev, "unable to allocate bus resource for msix!\n"); rc = ENOMEM; goto err_free_pci_res; } gc->msix_rid = msix_rid; if (unlikely(gc->gd_bus.bar0_h == 0)) { device_printf(dev, "failed to map bar0!\n"); rc = ENXIO; goto err_free_pci_res; } mana_gd_init_registers(gc); mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base); rc = mana_gd_setup_irqs(dev); if (rc) { goto err_free_pci_res; } sx_init(&gc->eq_test_event_sx, "gdma test event sx"); rc = mana_hwc_create_channel(gc); if (rc) { mana_dbg(NULL, "Failed to create hwc channel\n"); if (rc == EIO) goto err_clean_up_gdma; else goto err_remove_irq; } rc = mana_gd_verify_vf_version(dev); if (rc) { mana_dbg(NULL, "Failed to verify vf\n"); goto err_clean_up_gdma; } rc = mana_gd_query_max_resources(dev); if (rc) { mana_dbg(NULL, "Failed to query max resources\n"); goto err_clean_up_gdma; } rc = mana_gd_detect_devices(dev); if (rc) { mana_dbg(NULL, "Failed to detect mana device\n"); goto err_clean_up_gdma; } rc = mana_probe(&gc->mana); if (rc) { mana_dbg(NULL, "Failed to probe mana device\n"); goto err_clean_up_gdma; } return (0); err_clean_up_gdma: mana_hwc_destroy_channel(gc); err_remove_irq: mana_gd_remove_irqs(dev); err_free_pci_res: mana_gd_free_pci_res(gc); err_disable_dev: pci_disable_busmaster(dev); return(rc); } /** * mana_detach - Device Removal Routine * @pdev: device information struct * * mana_detach is called by the device subsystem to alert the driver * that it should release a PCI device. **/ static int mana_gd_detach(device_t dev) { struct gdma_context *gc = device_get_softc(dev); + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); mana_remove(&gc->mana); mana_hwc_destroy_channel(gc); mana_gd_remove_irqs(dev); mana_gd_free_pci_res(gc); pci_disable_busmaster(dev); - return (bus_generic_detach(dev)); + return (0); } /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t mana_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mana_gd_probe), DEVMETHOD(device_attach, mana_gd_attach), DEVMETHOD(device_detach, mana_gd_detach), DEVMETHOD_END }; static driver_t mana_driver = { "mana", mana_methods, sizeof(struct gdma_context), }; DRIVER_MODULE(mana, pci, mana_driver, 0, 0); MODULE_PNP_INFO("U16:vendor;U16:device", pci, mana, mana_id_table, nitems(mana_id_table) - 1); MODULE_DEPEND(mana, pci, 1, 1, 1); MODULE_DEPEND(mana, ether, 1, 1, 1); /*********************************************************************/ diff --git a/sys/powerpc/mpc85xx/pci_mpc85xx.c b/sys/powerpc/mpc85xx/pci_mpc85xx.c index f4080ce3b75c..8e349df03a51 100644 --- a/sys/powerpc/mpc85xx/pci_mpc85xx.c +++ b/sys/powerpc/mpc85xx/pci_mpc85xx.c @@ -1,952 +1,957 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright 2006-2007 by Juniper Networks. * Copyright 2008 Semihalf. * Copyright 2010 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Semihalf * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: FreeBSD: src/sys/powerpc/mpc85xx/pci_ocp.c,v 1.9 2010/03/23 23:46:28 marcel */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ofw_bus_if.h" #include "pcib_if.h" #include "pic_if.h" #include #include #include #include #define REG_CFG_ADDR 0x0000 #define CONFIG_ACCESS_ENABLE 0x80000000 #define REG_CFG_DATA 0x0004 #define REG_INT_ACK 0x0008 #define REG_PEX_IP_BLK_REV1 0x0bf8 #define IP_MJ_M 0x0000ff00 #define IP_MJ_S 8 #define IP_MN_M 0x000000ff #define IP_MN_S 0 #define REG_POTAR(n) (0x0c00 + 0x20 * (n)) #define REG_POTEAR(n) (0x0c04 + 0x20 * (n)) #define REG_POWBAR(n) (0x0c08 + 0x20 * (n)) #define REG_POWAR(n) (0x0c10 + 0x20 * (n)) #define REG_PITAR(n) (0x0e00 - 0x20 * (n)) #define REG_PIWBAR(n) (0x0e08 - 0x20 * (n)) #define REG_PIWBEAR(n) (0x0e0c - 0x20 * (n)) #define REG_PIWAR(n) (0x0e10 - 0x20 * (n)) #define PIWAR_EN 0x80000000 #define PIWAR_PF 0x40000000 #define PIWAR_TRGT_M 0x00f00000 #define PIWAR_TRGT_S 20 #define PIWAR_TRGT_CCSR 0xe #define PIWAR_TRGT_LOCAL 0xf #define REG_PEX_MES_DR 0x0020 #define REG_PEX_MES_IER 0x0028 #define REG_PEX_ERR_DR 0x0e00 #define REG_PEX_ERR_EN 0x0e08 #define REG_PEX_ERR_DR 0x0e00 #define REG_PEX_ERR_DR_ME 0x80000000 #define REG_PEX_ERR_DR_PCT 0x800000 #define REG_PEX_ERR_DR_PAT 0x400000 #define REG_PEX_ERR_DR_PCAC 0x200000 #define REG_PEX_ERR_DR_PNM 0x100000 #define REG_PEX_ERR_DR_CDNSC 0x80000 #define REG_PEX_ERR_DR_CRSNC 0x40000 #define REG_PEX_ERR_DR_ICCA 0x20000 #define REG_PEX_ERR_DR_IACA 0x10000 #define REG_PEX_ERR_DR_CRST 0x8000 #define REG_PEX_ERR_DR_MIS 0x4000 #define REG_PEX_ERR_DR_IOIS 0x2000 #define REG_PEX_ERR_DR_CIS 0x1000 #define REG_PEX_ERR_DR_CIEP 0x800 #define REG_PEX_ERR_DR_IOIEP 0x400 #define REG_PEX_ERR_DR_OAC 0x200 #define REG_PEX_ERR_DR_IOIA 0x100 #define REG_PEX_ERR_DR_IMBA 0x80 #define REG_PEX_ERR_DR_IIOBA 0x40 #define REG_PEX_ERR_DR_LDDE 0x20 #define REG_PEX_ERR_EN 0x0e08 #define PCIR_LTSSM 0x404 #define LTSSM_STAT_L0 0x16 #define DEVFN(b, s, f) ((b << 16) | (s << 8) | f) #define FSL_NUM_MSIS 256 /* 8 registers of 32 bits (8 hardware IRQs) */ #define PCI_SLOT_FIRST 0x1 /* used to be 0x11 but qemu-ppce500 starts from 0x1 */ struct fsl_pcib_softc { struct ofw_pci_softc pci_sc; device_t sc_dev; struct mtx sc_cfg_mtx; int sc_ip_maj; int sc_ip_min; int sc_iomem_target; bus_addr_t sc_iomem_start, sc_iomem_end; int sc_ioport_target; bus_addr_t sc_ioport_start, sc_ioport_end; struct resource *sc_res; bus_space_handle_t sc_bsh; bus_space_tag_t sc_bst; int sc_rid; struct resource *sc_irq_res; void *sc_ih; int sc_busnr; int sc_pcie; uint8_t sc_pcie_capreg; /* PCI-E Capability Reg Set */ }; struct fsl_pcib_err_dr { const char *msg; uint32_t err_dr_mask; }; struct fsl_msi_map { SLIST_ENTRY(fsl_msi_map) slist; uint32_t irq_base; bus_addr_t target; }; SLIST_HEAD(msi_head, fsl_msi_map) fsl_msis = SLIST_HEAD_INITIALIZER(msi_head); static const struct fsl_pcib_err_dr pci_err[] = { {"ME", REG_PEX_ERR_DR_ME}, {"PCT", REG_PEX_ERR_DR_PCT}, {"PAT", REG_PEX_ERR_DR_PAT}, {"PCAC", REG_PEX_ERR_DR_PCAC}, {"PNM", REG_PEX_ERR_DR_PNM}, {"CDNSC", REG_PEX_ERR_DR_CDNSC}, {"CRSNC", REG_PEX_ERR_DR_CRSNC}, {"ICCA", REG_PEX_ERR_DR_ICCA}, {"IACA", REG_PEX_ERR_DR_IACA}, {"CRST", REG_PEX_ERR_DR_CRST}, {"MIS", REG_PEX_ERR_DR_MIS}, {"IOIS", REG_PEX_ERR_DR_IOIS}, {"CIS", REG_PEX_ERR_DR_CIS}, {"CIEP", REG_PEX_ERR_DR_CIEP}, {"IOIEP", REG_PEX_ERR_DR_IOIEP}, {"OAC", REG_PEX_ERR_DR_OAC}, {"IOIA", REG_PEX_ERR_DR_IOIA}, {"IMBA", REG_PEX_ERR_DR_IMBA}, {"IIOBA", REG_PEX_ERR_DR_IIOBA}, {"LDDE", REG_PEX_ERR_DR_LDDE} }; /* Local forward declerations. */ static uint32_t fsl_pcib_cfgread(struct fsl_pcib_softc *, u_int, u_int, u_int, u_int, int); static void fsl_pcib_cfgwrite(struct fsl_pcib_softc *, u_int, u_int, u_int, u_int, uint32_t, int); static int fsl_pcib_decode_win(phandle_t, struct fsl_pcib_softc *); static void fsl_pcib_err_init(device_t); static void fsl_pcib_inbound(struct fsl_pcib_softc *, int, int, uint64_t, uint64_t, uint64_t); static void fsl_pcib_outbound(struct fsl_pcib_softc *, int, int, uint64_t, uint64_t, uint64_t); /* Forward declerations. */ static int fsl_pcib_attach(device_t); static int fsl_pcib_detach(device_t); static int fsl_pcib_probe(device_t); static int fsl_pcib_maxslots(device_t); static uint32_t fsl_pcib_read_config(device_t, u_int, u_int, u_int, u_int, int); static void fsl_pcib_write_config(device_t, u_int, u_int, u_int, u_int, uint32_t, int); static int fsl_pcib_alloc_msi(device_t dev, device_t child, int count, int maxcount, int *irqs); static int fsl_pcib_release_msi(device_t dev, device_t child, int count, int *irqs); static int fsl_pcib_alloc_msix(device_t dev, device_t child, int *irq); static int fsl_pcib_release_msix(device_t dev, device_t child, int irq); static int fsl_pcib_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data); static vmem_t *msi_vmem; /* Global MSI vmem, holds all MSI ranges. */ /* * Bus interface definitions. */ static device_method_t fsl_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fsl_pcib_probe), DEVMETHOD(device_attach, fsl_pcib_attach), DEVMETHOD(device_detach, fsl_pcib_detach), /* pcib interface */ DEVMETHOD(pcib_maxslots, fsl_pcib_maxslots), DEVMETHOD(pcib_read_config, fsl_pcib_read_config), DEVMETHOD(pcib_write_config, fsl_pcib_write_config), DEVMETHOD(pcib_alloc_msi, fsl_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, fsl_pcib_release_msi), DEVMETHOD(pcib_alloc_msix, fsl_pcib_alloc_msix), DEVMETHOD(pcib_release_msix, fsl_pcib_release_msix), DEVMETHOD(pcib_map_msi, fsl_pcib_map_msi), DEVMETHOD_END }; DEFINE_CLASS_1(pcib, fsl_pcib_driver, fsl_pcib_methods, sizeof(struct fsl_pcib_softc), ofw_pcib_driver); EARLY_DRIVER_MODULE(pcib, ofwbus, fsl_pcib_driver, 0, 0, BUS_PASS_BUS); static void fsl_pcib_err_intr(void *v) { struct fsl_pcib_softc *sc; device_t dev; uint32_t err_reg, clear_reg; uint8_t i; dev = (device_t)v; sc = device_get_softc(dev); clear_reg = 0; err_reg = bus_space_read_4(sc->sc_bst, sc->sc_bsh, REG_PEX_ERR_DR); /* Check which one error occurred */ for (i = 0; i < sizeof(pci_err)/sizeof(struct fsl_pcib_err_dr); i++) { if (err_reg & pci_err[i].err_dr_mask) { device_printf(dev, "PCI %d: report %s error\n", device_get_unit(dev), pci_err[i].msg); clear_reg |= pci_err[i].err_dr_mask; } } /* Clear pending errors */ bus_space_write_4(sc->sc_bst, sc->sc_bsh, REG_PEX_ERR_DR, clear_reg); } static int fsl_pcib_probe(device_t dev) { if (ofw_bus_get_type(dev) == NULL || strcmp(ofw_bus_get_type(dev), "pci") != 0) return (ENXIO); if (!(ofw_bus_is_compatible(dev, "fsl,mpc8540-pci") || ofw_bus_is_compatible(dev, "fsl,mpc8540-pcie") || ofw_bus_is_compatible(dev, "fsl,mpc8548-pcie") || ofw_bus_is_compatible(dev, "fsl,p5020-pcie") || ofw_bus_is_compatible(dev, "fsl,p5040-pcie") || ofw_bus_is_compatible(dev, "fsl,qoriq-pcie-v2.2") || ofw_bus_is_compatible(dev, "fsl,qoriq-pcie-v2.4") || ofw_bus_is_compatible(dev, "fsl,qoriq-pcie"))) return (ENXIO); device_set_desc(dev, "Freescale Integrated PCI/PCI-E Controller"); return (BUS_PROBE_DEFAULT); } static int fsl_pcib_attach(device_t dev) { struct fsl_pcib_softc *sc; phandle_t node; uint32_t cfgreg, brctl, ipreg; int do_reset, error, rid; uint8_t ltssm, capptr; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_rid = 0; sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid, RF_ACTIVE); if (sc->sc_res == NULL) { device_printf(dev, "could not map I/O memory\n"); return (ENXIO); } sc->sc_bst = rman_get_bustag(sc->sc_res); sc->sc_bsh = rman_get_bushandle(sc->sc_res); sc->sc_busnr = 0; ipreg = bus_read_4(sc->sc_res, REG_PEX_IP_BLK_REV1); sc->sc_ip_min = (ipreg & IP_MN_M) >> IP_MN_S; sc->sc_ip_maj = (ipreg & IP_MJ_M) >> IP_MJ_S; mtx_init(&sc->sc_cfg_mtx, "pcicfg", NULL, MTX_SPIN); cfgreg = fsl_pcib_cfgread(sc, 0, 0, 0, PCIR_VENDOR, 2); if (cfgreg != 0x1057 && cfgreg != 0x1957) goto err; capptr = fsl_pcib_cfgread(sc, 0, 0, 0, PCIR_CAP_PTR, 1); while (capptr != 0) { cfgreg = fsl_pcib_cfgread(sc, 0, 0, 0, capptr, 2); switch (cfgreg & 0xff) { case PCIY_PCIX: break; case PCIY_EXPRESS: sc->sc_pcie = 1; sc->sc_pcie_capreg = capptr; break; } capptr = (cfgreg >> 8) & 0xff; } node = ofw_bus_get_node(dev); /* * Initialize generic OF PCI interface (ranges, etc.) */ error = ofw_pcib_init(dev); if (error) return (error); /* * Configure decode windows for PCI(E) access. */ if (fsl_pcib_decode_win(node, sc) != 0) goto err; cfgreg = fsl_pcib_cfgread(sc, 0, 0, 0, PCIR_COMMAND, 2); cfgreg |= PCIM_CMD_SERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_PORTEN; fsl_pcib_cfgwrite(sc, 0, 0, 0, PCIR_COMMAND, cfgreg, 2); do_reset = 0; resource_int_value("pcib", device_get_unit(dev), "reset", &do_reset); if (do_reset) { /* Reset the bus. Needed for Radeon video cards. */ brctl = fsl_pcib_read_config(sc->sc_dev, 0, 0, 0, PCIR_BRIDGECTL_1, 1); brctl |= PCIB_BCR_SECBUS_RESET; fsl_pcib_write_config(sc->sc_dev, 0, 0, 0, PCIR_BRIDGECTL_1, brctl, 1); DELAY(100000); brctl &= ~PCIB_BCR_SECBUS_RESET; fsl_pcib_write_config(sc->sc_dev, 0, 0, 0, PCIR_BRIDGECTL_1, brctl, 1); DELAY(100000); } if (sc->sc_pcie) { ltssm = fsl_pcib_cfgread(sc, 0, 0, 0, PCIR_LTSSM, 1); if (ltssm < LTSSM_STAT_L0) { if (bootverbose) printf("PCI %d: no PCIE link, skipping\n", device_get_unit(dev)); return (0); } } /* Allocate irq */ rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->sc_irq_res == NULL) { error = fsl_pcib_detach(dev); if (error != 0) { device_printf(dev, "Detach of the driver failed with error %d\n", error); } return (ENXIO); } /* Setup interrupt handler */ error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, fsl_pcib_err_intr, dev, &sc->sc_ih); if (error != 0) { device_printf(dev, "Could not setup irq, %d\n", error); sc->sc_ih = NULL; error = fsl_pcib_detach(dev); if (error != 0) { device_printf(dev, "Detach of the driver failed with error %d\n", error); } return (ENXIO); } fsl_pcib_err_init(dev); return (ofw_pcib_attach(dev)); err: return (ENXIO); } static uint32_t fsl_pcib_cfgread(struct fsl_pcib_softc *sc, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { uint32_t addr, data; addr = CONFIG_ACCESS_ENABLE; addr |= (bus & 0xff) << 16; addr |= (slot & 0x1f) << 11; addr |= (func & 0x7) << 8; addr |= reg & 0xfc; if (sc->sc_pcie) addr |= (reg & 0xf00) << 16; mtx_lock_spin(&sc->sc_cfg_mtx); bus_space_write_4(sc->sc_bst, sc->sc_bsh, REG_CFG_ADDR, addr); switch (bytes) { case 1: data = bus_space_read_1(sc->sc_bst, sc->sc_bsh, REG_CFG_DATA + (reg & 3)); break; case 2: data = le16toh(bus_space_read_2(sc->sc_bst, sc->sc_bsh, REG_CFG_DATA + (reg & 2))); break; case 4: data = le32toh(bus_space_read_4(sc->sc_bst, sc->sc_bsh, REG_CFG_DATA)); break; default: data = ~0; break; } mtx_unlock_spin(&sc->sc_cfg_mtx); return (data); } static void fsl_pcib_cfgwrite(struct fsl_pcib_softc *sc, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { uint32_t addr; addr = CONFIG_ACCESS_ENABLE; addr |= (bus & 0xff) << 16; addr |= (slot & 0x1f) << 11; addr |= (func & 0x7) << 8; addr |= reg & 0xfc; if (sc->sc_pcie) addr |= (reg & 0xf00) << 16; mtx_lock_spin(&sc->sc_cfg_mtx); bus_space_write_4(sc->sc_bst, sc->sc_bsh, REG_CFG_ADDR, addr); switch (bytes) { case 1: bus_space_write_1(sc->sc_bst, sc->sc_bsh, REG_CFG_DATA + (reg & 3), data); break; case 2: bus_space_write_2(sc->sc_bst, sc->sc_bsh, REG_CFG_DATA + (reg & 2), htole16(data)); break; case 4: bus_space_write_4(sc->sc_bst, sc->sc_bsh, REG_CFG_DATA, htole32(data)); break; } mtx_unlock_spin(&sc->sc_cfg_mtx); } #if 0 static void dump(struct fsl_pcib_softc *sc) { unsigned int i; #define RD(o) bus_space_read_4(sc->sc_bst, sc->sc_bsh, o) for (i = 0; i < 5; i++) { printf("POTAR%u =0x%08x\n", i, RD(REG_POTAR(i))); printf("POTEAR%u =0x%08x\n", i, RD(REG_POTEAR(i))); printf("POWBAR%u =0x%08x\n", i, RD(REG_POWBAR(i))); printf("POWAR%u =0x%08x\n", i, RD(REG_POWAR(i))); } printf("\n"); for (i = 1; i < 4; i++) { printf("PITAR%u =0x%08x\n", i, RD(REG_PITAR(i))); printf("PIWBAR%u =0x%08x\n", i, RD(REG_PIWBAR(i))); printf("PIWBEAR%u=0x%08x\n", i, RD(REG_PIWBEAR(i))); printf("PIWAR%u =0x%08x\n", i, RD(REG_PIWAR(i))); } printf("\n"); #undef RD for (i = 0; i < 0x48; i += 4) { printf("cfg%02x=0x%08x\n", i, fsl_pcib_cfgread(sc, 0, 0, 0, i, 4)); } } #endif static int fsl_pcib_maxslots(device_t dev) { struct fsl_pcib_softc *sc = device_get_softc(dev); return ((sc->sc_pcie) ? 0 : PCI_SLOTMAX); } static uint32_t fsl_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct fsl_pcib_softc *sc = device_get_softc(dev); if (bus == sc->sc_busnr && !sc->sc_pcie && slot < PCI_SLOT_FIRST) return (~0); return (fsl_pcib_cfgread(sc, bus, slot, func, reg, bytes)); } static void fsl_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { struct fsl_pcib_softc *sc = device_get_softc(dev); if (bus == sc->sc_busnr && !sc->sc_pcie && slot < PCI_SLOT_FIRST) return; fsl_pcib_cfgwrite(sc, bus, slot, func, reg, val, bytes); } static void fsl_pcib_inbound(struct fsl_pcib_softc *sc, int wnd, int tgt, uint64_t start, uint64_t size, uint64_t pci_start) { uint32_t attr, bar, tar; KASSERT(wnd > 0, ("%s: inbound window 0 is invalid", __func__)); attr = PIWAR_EN; switch (tgt) { case -1: attr &= ~PIWAR_EN; break; case PIWAR_TRGT_LOCAL: attr |= (ffsl(size) - 2); default: attr |= (tgt << PIWAR_TRGT_S); break; } tar = start >> 12; bar = pci_start >> 12; bus_space_write_4(sc->sc_bst, sc->sc_bsh, REG_PITAR(wnd), tar); bus_space_write_4(sc->sc_bst, sc->sc_bsh, REG_PIWBEAR(wnd), 0); bus_space_write_4(sc->sc_bst, sc->sc_bsh, REG_PIWBAR(wnd), bar); bus_space_write_4(sc->sc_bst, sc->sc_bsh, REG_PIWAR(wnd), attr); } static void fsl_pcib_outbound(struct fsl_pcib_softc *sc, int wnd, int res, uint64_t start, uint64_t size, uint64_t pci_start) { uint32_t attr, bar, tar; switch (res) { case SYS_RES_MEMORY: attr = 0x80044000 | (ffsll(size) - 2); break; case SYS_RES_IOPORT: attr = 0x80088000 | (ffsll(size) - 2); break; default: attr = 0x0004401f; break; } bar = start >> 12; tar = pci_start >> 12; bus_space_write_4(sc->sc_bst, sc->sc_bsh, REG_POTAR(wnd), tar); bus_space_write_4(sc->sc_bst, sc->sc_bsh, REG_POTEAR(wnd), 0); bus_space_write_4(sc->sc_bst, sc->sc_bsh, REG_POWBAR(wnd), bar); bus_space_write_4(sc->sc_bst, sc->sc_bsh, REG_POWAR(wnd), attr); } static void fsl_pcib_err_init(device_t dev) { struct fsl_pcib_softc *sc; uint16_t sec_stat, dsr; uint32_t dcr, err_en; sc = device_get_softc(dev); sec_stat = fsl_pcib_cfgread(sc, 0, 0, 0, PCIR_SECSTAT_1, 2); if (sec_stat) fsl_pcib_cfgwrite(sc, 0, 0, 0, PCIR_SECSTAT_1, 0xffff, 2); if (sc->sc_pcie) { /* Clear error bits */ bus_space_write_4(sc->sc_bst, sc->sc_bsh, REG_PEX_MES_IER, 0xffffffff); bus_space_write_4(sc->sc_bst, sc->sc_bsh, REG_PEX_MES_DR, 0xffffffff); bus_space_write_4(sc->sc_bst, sc->sc_bsh, REG_PEX_ERR_DR, 0xffffffff); dsr = fsl_pcib_cfgread(sc, 0, 0, 0, sc->sc_pcie_capreg + PCIER_DEVICE_STA, 2); if (dsr) fsl_pcib_cfgwrite(sc, 0, 0, 0, sc->sc_pcie_capreg + PCIER_DEVICE_STA, 0xffff, 2); /* Enable all errors reporting */ err_en = 0x00bfff00; bus_space_write_4(sc->sc_bst, sc->sc_bsh, REG_PEX_ERR_EN, err_en); /* Enable error reporting: URR, FER, NFER */ dcr = fsl_pcib_cfgread(sc, 0, 0, 0, sc->sc_pcie_capreg + PCIER_DEVICE_CTL, 4); dcr |= PCIEM_CTL_URR_ENABLE | PCIEM_CTL_FER_ENABLE | PCIEM_CTL_NFER_ENABLE; fsl_pcib_cfgwrite(sc, 0, 0, 0, sc->sc_pcie_capreg + PCIER_DEVICE_CTL, dcr, 4); } } static int fsl_pcib_detach(device_t dev) { struct fsl_pcib_softc *sc; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); sc = device_get_softc(dev); mtx_destroy(&sc->sc_cfg_mtx); - return (bus_generic_detach(dev)); + return (0); } static int fsl_pcib_decode_win(phandle_t node, struct fsl_pcib_softc *sc) { device_t dev; int error, i, trgt; dev = sc->sc_dev; fsl_pcib_outbound(sc, 0, -1, 0, 0, 0); /* * Configure LAW decode windows. */ error = law_pci_target(sc->sc_res, &sc->sc_iomem_target, &sc->sc_ioport_target); if (error != 0) { device_printf(dev, "could not retrieve PCI LAW target info\n"); return (error); } for (i = 0; i < sc->pci_sc.sc_nrange; i++) { switch (sc->pci_sc.sc_range[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { case OFW_PCI_PHYS_HI_SPACE_CONFIG: continue; case OFW_PCI_PHYS_HI_SPACE_IO: trgt = sc->sc_ioport_target; fsl_pcib_outbound(sc, 2, SYS_RES_IOPORT, sc->pci_sc.sc_range[i].host, sc->pci_sc.sc_range[i].size, sc->pci_sc.sc_range[i].pci); sc->sc_ioport_start = sc->pci_sc.sc_range[i].pci; sc->sc_ioport_end = sc->pci_sc.sc_range[i].pci + sc->pci_sc.sc_range[i].size - 1; break; case OFW_PCI_PHYS_HI_SPACE_MEM32: case OFW_PCI_PHYS_HI_SPACE_MEM64: trgt = sc->sc_iomem_target; fsl_pcib_outbound(sc, 1, SYS_RES_MEMORY, sc->pci_sc.sc_range[i].host, sc->pci_sc.sc_range[i].size, sc->pci_sc.sc_range[i].pci); sc->sc_iomem_start = sc->pci_sc.sc_range[i].pci; sc->sc_iomem_end = sc->pci_sc.sc_range[i].pci + sc->pci_sc.sc_range[i].size - 1; break; default: panic("Unknown range type %#x\n", sc->pci_sc.sc_range[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK); } error = law_enable(trgt, sc->pci_sc.sc_range[i].host, sc->pci_sc.sc_range[i].size); if (error != 0) { device_printf(dev, "could not program LAW for range " "%d\n", i); return (error); } } /* * Set outbout and inbound windows. */ fsl_pcib_outbound(sc, 3, -1, 0, 0, 0); fsl_pcib_outbound(sc, 4, -1, 0, 0, 0); fsl_pcib_inbound(sc, 1, -1, 0, 0, 0); fsl_pcib_inbound(sc, 2, -1, 0, 0, 0); fsl_pcib_inbound(sc, 3, PIWAR_TRGT_LOCAL, 0, ptoa(Maxmem), 0); /* Direct-map the CCSR for MSIs. */ /* Freescale PCIe 2.x has a dedicated MSI window. */ /* inbound window 8 makes it hit 0xD00 offset, the MSI window. */ if (sc->sc_ip_maj >= 2) fsl_pcib_inbound(sc, 8, PIWAR_TRGT_CCSR, ccsrbar_pa, ccsrbar_size, ccsrbar_pa); else fsl_pcib_inbound(sc, 1, PIWAR_TRGT_CCSR, ccsrbar_pa, ccsrbar_size, ccsrbar_pa); return (0); } static int fsl_pcib_alloc_msi(device_t dev, device_t child, int count, int maxcount, int *irqs) { vmem_addr_t start; int err, i; if (msi_vmem == NULL) return (ENODEV); err = vmem_xalloc(msi_vmem, count, powerof2(count), 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, M_BESTFIT | M_WAITOK, &start); if (err) return (err); for (i = 0; i < count; i++) irqs[i] = start + i; return (0); } static int fsl_pcib_release_msi(device_t dev, device_t child, int count, int *irqs) { if (msi_vmem == NULL) return (ENODEV); vmem_xfree(msi_vmem, irqs[0], count); return (0); } static int fsl_pcib_alloc_msix(device_t dev, device_t child, int *irq) { return (fsl_pcib_alloc_msi(dev, child, 1, 1, irq)); } static int fsl_pcib_release_msix(device_t dev, device_t child, int irq) { return (fsl_pcib_release_msi(dev, child, 1, &irq)); } static int fsl_pcib_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data) { struct fsl_msi_map *mp; SLIST_FOREACH(mp, &fsl_msis, slist) { if (irq >= mp->irq_base && irq < mp->irq_base + FSL_NUM_MSIS) break; } if (mp == NULL) return (ENODEV); *data = (irq & 255); *addr = ccsrbar_pa + mp->target; return (0); } /* * Linux device trees put the msi@ as children of the SoC, with ranges based * on the CCSR. Since rman doesn't permit overlapping or sub-ranges between * devices (bus_space_subregion(9) could do it, but let's not touch the PIC * driver just to allocate a subregion for a sibling driver). This driver will * use ccsr_write() and ccsr_read() instead. */ #define FSL_NUM_IRQS 8 #define FSL_NUM_MSI_PER_IRQ 32 #define FSL_MSI_TARGET 0x140 struct fsl_msi_softc { vm_offset_t sc_base; vm_offset_t sc_target; int sc_msi_base_irq; struct fsl_msi_map sc_map; struct fsl_msi_irq { /* This struct gets passed as the filter private data. */ struct fsl_msi_softc *sc_ptr; /* Pointer back to softc. */ struct resource *res; int irq; void *cookie; int vectors[FSL_NUM_MSI_PER_IRQ]; vm_offset_t reg; } sc_msi_irq[FSL_NUM_IRQS]; }; static int fsl_msi_intr_filter(void *priv) { struct fsl_msi_irq *data = priv; uint32_t reg; int i; reg = ccsr_read4(ccsrbar_va + data->reg); i = 0; while (reg != 0) { if (reg & 1) powerpc_dispatch_intr(data->vectors[i], NULL); reg >>= 1; i++; } return (FILTER_HANDLED); } static int fsl_msi_probe(device_t dev) { if (!ofw_bus_is_compatible(dev, "fsl,mpic-msi")) return (ENXIO); device_set_desc(dev, "Freescale MSI"); return (BUS_PROBE_DEFAULT); } static int fsl_msi_attach(device_t dev) { struct fsl_msi_softc *sc; struct fsl_msi_irq *irq; int i; sc = device_get_softc(dev); if (msi_vmem == NULL) msi_vmem = vmem_create("MPIC MSI", 0, 0, 1, 0, M_BESTFIT | M_WAITOK); /* Manually play with resource entries. */ sc->sc_base = bus_get_resource_start(dev, SYS_RES_MEMORY, 0); sc->sc_map.target = bus_get_resource_start(dev, SYS_RES_MEMORY, 1); if (sc->sc_map.target == 0) sc->sc_map.target = sc->sc_base + FSL_MSI_TARGET; for (i = 0; i < FSL_NUM_IRQS; i++) { irq = &sc->sc_msi_irq[i]; irq->irq = i; irq->reg = sc->sc_base + 16 * i; irq->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->irq, RF_ACTIVE); bus_setup_intr(dev, irq->res, INTR_TYPE_MISC | INTR_MPSAFE, fsl_msi_intr_filter, NULL, irq, &irq->cookie); } sc->sc_map.irq_base = powerpc_register_pic(dev, ofw_bus_get_node(dev), FSL_NUM_MSIS, 0, 0); /* Let vmem and the IRQ subsystem work their magic for allocations. */ vmem_add(msi_vmem, sc->sc_map.irq_base, FSL_NUM_MSIS, M_WAITOK); SLIST_INSERT_HEAD(&fsl_msis, &sc->sc_map, slist); return (0); } static void fsl_msi_enable(device_t dev, u_int irq, u_int vector, void **priv) { struct fsl_msi_softc *sc; struct fsl_msi_irq *irqd; sc = device_get_softc(dev); irqd = &sc->sc_msi_irq[irq / FSL_NUM_MSI_PER_IRQ]; irqd->vectors[irq % FSL_NUM_MSI_PER_IRQ] = vector; } static device_method_t fsl_msi_methods[] = { DEVMETHOD(device_probe, fsl_msi_probe), DEVMETHOD(device_attach, fsl_msi_attach), DEVMETHOD(pic_enable, fsl_msi_enable), DEVMETHOD_END }; static driver_t fsl_msi_driver = { "fsl_msi", fsl_msi_methods, sizeof(struct fsl_msi_softc) }; EARLY_DRIVER_MODULE(fsl_msi, simplebus, fsl_msi_driver, 0, 0, BUS_PASS_INTERRUPT + 1); diff --git a/sys/powerpc/powermac/cuda.c b/sys/powerpc/powermac/cuda.c index 4d120747e0f0..367eb7a059c2 100644 --- a/sys/powerpc/powermac/cuda.c +++ b/sys/powerpc/powermac/cuda.c @@ -1,800 +1,805 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2006 Michael Lorenz * Copyright 2008 by Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "clock_if.h" #include "cudavar.h" #include "viareg.h" /* * MacIO interface */ static int cuda_probe(device_t); static int cuda_attach(device_t); static int cuda_detach(device_t); static u_int cuda_adb_send(device_t dev, u_char command_byte, int len, u_char *data, u_char poll); static u_int cuda_adb_autopoll(device_t dev, uint16_t mask); static u_int cuda_poll(device_t dev); static void cuda_send_inbound(struct cuda_softc *sc); static void cuda_send_outbound(struct cuda_softc *sc); static void cuda_shutdown(void *xsc, int howto); /* * Clock interface */ static int cuda_gettime(device_t dev, struct timespec *ts); static int cuda_settime(device_t dev, struct timespec *ts); static device_method_t cuda_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cuda_probe), DEVMETHOD(device_attach, cuda_attach), DEVMETHOD(device_detach, cuda_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* ADB bus interface */ DEVMETHOD(adb_hb_send_raw_packet, cuda_adb_send), DEVMETHOD(adb_hb_controller_poll, cuda_poll), DEVMETHOD(adb_hb_set_autopoll_mask, cuda_adb_autopoll), /* Clock interface */ DEVMETHOD(clock_gettime, cuda_gettime), DEVMETHOD(clock_settime, cuda_settime), DEVMETHOD_END }; static driver_t cuda_driver = { "cuda", cuda_methods, sizeof(struct cuda_softc), }; DRIVER_MODULE(cuda, macio, cuda_driver, 0, 0); DRIVER_MODULE(adb, cuda, adb_driver, 0, 0); static void cuda_intr(void *arg); static uint8_t cuda_read_reg(struct cuda_softc *sc, u_int offset); static void cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value); static void cuda_idle(struct cuda_softc *); static void cuda_tip(struct cuda_softc *); static void cuda_clear_tip(struct cuda_softc *); static void cuda_in(struct cuda_softc *); static void cuda_out(struct cuda_softc *); static void cuda_toggle_ack(struct cuda_softc *); static void cuda_ack_off(struct cuda_softc *); static int cuda_intr_state(struct cuda_softc *); static int cuda_probe(device_t dev) { const char *type = ofw_bus_get_type(dev); if (strcmp(type, "via-cuda") != 0) return (ENXIO); device_set_desc(dev, CUDA_DEVSTR); return (0); } static int cuda_attach(device_t dev) { struct cuda_softc *sc; volatile int i; uint8_t reg; phandle_t node,child; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_memrid = 0; sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_memrid, RF_ACTIVE); if (sc->sc_memr == NULL) { device_printf(dev, "Could not alloc mem resource!\n"); return (ENXIO); } sc->sc_irqrid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqrid, RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "could not allocate interrupt\n"); bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, sc->sc_memr); return (ENXIO); } if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC | INTR_MPSAFE | INTR_ENTROPY, NULL, cuda_intr, dev, &sc->sc_ih) != 0) { device_printf(dev, "could not setup interrupt\n"); bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, sc->sc_memr); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid, sc->sc_irq); return (ENXIO); } mtx_init(&sc->sc_mutex,"cuda",NULL,MTX_DEF | MTX_RECURSE); sc->sc_sent = 0; sc->sc_received = 0; sc->sc_waiting = 0; sc->sc_polling = 0; sc->sc_state = CUDA_NOTREADY; sc->sc_autopoll = 0; sc->sc_rtc = -1; STAILQ_INIT(&sc->sc_inq); STAILQ_INIT(&sc->sc_outq); STAILQ_INIT(&sc->sc_freeq); for (i = 0; i < CUDA_MAXPACKETS; i++) STAILQ_INSERT_TAIL(&sc->sc_freeq, &sc->sc_pkts[i], pkt_q); /* Init CUDA */ reg = cuda_read_reg(sc, vDirB); reg |= 0x30; /* register B bits 4 and 5: outputs */ cuda_write_reg(sc, vDirB, reg); reg = cuda_read_reg(sc, vDirB); reg &= 0xf7; /* register B bit 3: input */ cuda_write_reg(sc, vDirB, reg); reg = cuda_read_reg(sc, vACR); reg &= ~vSR_OUT; /* make sure SR is set to IN */ cuda_write_reg(sc, vACR, reg); cuda_write_reg(sc, vACR, (cuda_read_reg(sc, vACR) | 0x0c) & ~0x10); sc->sc_state = CUDA_IDLE; /* used by all types of hardware */ cuda_write_reg(sc, vIER, 0x84); /* make sure VIA interrupts are on */ cuda_idle(sc); /* reset ADB */ /* Reset CUDA */ i = cuda_read_reg(sc, vSR); /* clear interrupt */ cuda_write_reg(sc, vIER, 0x04); /* no interrupts while clearing */ cuda_idle(sc); /* reset state to idle */ DELAY(150); cuda_tip(sc); /* signal start of frame */ DELAY(150); cuda_toggle_ack(sc); DELAY(150); cuda_clear_tip(sc); DELAY(150); cuda_idle(sc); /* back to idle state */ i = cuda_read_reg(sc, vSR); /* clear interrupt */ cuda_write_reg(sc, vIER, 0x84); /* ints ok now */ /* Initialize child buses (ADB) */ node = ofw_bus_get_node(dev); for (child = OF_child(node); child != 0; child = OF_peer(child)) { char name[32]; memset(name, 0, sizeof(name)); OF_getprop(child, "name", name, sizeof(name)); if (bootverbose) device_printf(dev, "CUDA child <%s>\n",name); if (strncmp(name, "adb", 4) == 0) { sc->adb_bus = device_add_child(dev,"adb",DEVICE_UNIT_ANY); } } clock_register(dev, 1000); EVENTHANDLER_REGISTER(shutdown_final, cuda_shutdown, sc, SHUTDOWN_PRI_LAST); return (bus_generic_attach(dev)); } static int cuda_detach(device_t dev) { struct cuda_softc *sc; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); sc = device_get_softc(dev); bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid, sc->sc_irq); bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, sc->sc_memr); mtx_destroy(&sc->sc_mutex); - return (bus_generic_detach(dev)); + return (0); } static uint8_t cuda_read_reg(struct cuda_softc *sc, u_int offset) { return (bus_read_1(sc->sc_memr, offset)); } static void cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value) { bus_write_1(sc->sc_memr, offset, value); } static void cuda_idle(struct cuda_softc *sc) { uint8_t reg; reg = cuda_read_reg(sc, vBufB); reg |= (vPB4 | vPB5); cuda_write_reg(sc, vBufB, reg); } static void cuda_tip(struct cuda_softc *sc) { uint8_t reg; reg = cuda_read_reg(sc, vBufB); reg &= ~vPB5; cuda_write_reg(sc, vBufB, reg); } static void cuda_clear_tip(struct cuda_softc *sc) { uint8_t reg; reg = cuda_read_reg(sc, vBufB); reg |= vPB5; cuda_write_reg(sc, vBufB, reg); } static void cuda_in(struct cuda_softc *sc) { uint8_t reg; reg = cuda_read_reg(sc, vACR); reg &= ~vSR_OUT; cuda_write_reg(sc, vACR, reg); } static void cuda_out(struct cuda_softc *sc) { uint8_t reg; reg = cuda_read_reg(sc, vACR); reg |= vSR_OUT; cuda_write_reg(sc, vACR, reg); } static void cuda_toggle_ack(struct cuda_softc *sc) { uint8_t reg; reg = cuda_read_reg(sc, vBufB); reg ^= vPB4; cuda_write_reg(sc, vBufB, reg); } static void cuda_ack_off(struct cuda_softc *sc) { uint8_t reg; reg = cuda_read_reg(sc, vBufB); reg |= vPB4; cuda_write_reg(sc, vBufB, reg); } static int cuda_intr_state(struct cuda_softc *sc) { return ((cuda_read_reg(sc, vBufB) & vPB3) == 0); } static int cuda_send(void *cookie, int poll, int length, uint8_t *msg) { struct cuda_softc *sc = cookie; device_t dev = sc->sc_dev; struct cuda_packet *pkt; if (sc->sc_state == CUDA_NOTREADY) return (-1); mtx_lock(&sc->sc_mutex); pkt = STAILQ_FIRST(&sc->sc_freeq); if (pkt == NULL) { mtx_unlock(&sc->sc_mutex); return (-1); } pkt->len = length - 1; pkt->type = msg[0]; memcpy(pkt->data, &msg[1], pkt->len); STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q); STAILQ_INSERT_TAIL(&sc->sc_outq, pkt, pkt_q); /* * If we already are sending a packet, we should bail now that this * one has been added to the queue. */ if (sc->sc_waiting) { mtx_unlock(&sc->sc_mutex); return (0); } cuda_send_outbound(sc); mtx_unlock(&sc->sc_mutex); if (sc->sc_polling || poll || cold) cuda_poll(dev); return (0); } static void cuda_send_outbound(struct cuda_softc *sc) { struct cuda_packet *pkt; mtx_assert(&sc->sc_mutex, MA_OWNED); pkt = STAILQ_FIRST(&sc->sc_outq); if (pkt == NULL) return; sc->sc_out_length = pkt->len + 1; memcpy(sc->sc_out, &pkt->type, pkt->len + 1); sc->sc_sent = 0; STAILQ_REMOVE_HEAD(&sc->sc_outq, pkt_q); STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q); sc->sc_waiting = 1; cuda_poll(sc->sc_dev); DELAY(150); if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc)) { sc->sc_state = CUDA_OUT; cuda_out(sc); cuda_write_reg(sc, vSR, sc->sc_out[0]); cuda_ack_off(sc); cuda_tip(sc); } } static void cuda_send_inbound(struct cuda_softc *sc) { device_t dev; struct cuda_packet *pkt; dev = sc->sc_dev; mtx_lock(&sc->sc_mutex); while ((pkt = STAILQ_FIRST(&sc->sc_inq)) != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_inq, pkt_q); mtx_unlock(&sc->sc_mutex); /* check if we have a handler for this message */ switch (pkt->type) { case CUDA_ADB: if (pkt->len > 2) { adb_receive_raw_packet(sc->adb_bus, pkt->data[0],pkt->data[1], pkt->len - 2,&pkt->data[2]); } else { adb_receive_raw_packet(sc->adb_bus, pkt->data[0],pkt->data[1],0,NULL); } break; case CUDA_PSEUDO: mtx_lock(&sc->sc_mutex); switch (pkt->data[1]) { case CMD_AUTOPOLL: sc->sc_autopoll = 1; break; case CMD_READ_RTC: memcpy(&sc->sc_rtc, &pkt->data[2], sizeof(sc->sc_rtc)); wakeup(&sc->sc_rtc); break; case CMD_WRITE_RTC: break; } mtx_unlock(&sc->sc_mutex); break; case CUDA_ERROR: /* * CUDA will throw errors if we miss a race between * sending and receiving packets. This is already * handled when we abort packet output to handle * this packet in cuda_intr(). Thus, we ignore * these messages. */ break; default: device_printf(dev,"unknown CUDA command %d\n", pkt->type); break; } mtx_lock(&sc->sc_mutex); STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q); } mtx_unlock(&sc->sc_mutex); } static u_int cuda_poll(device_t dev) { struct cuda_softc *sc = device_get_softc(dev); if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc) && !sc->sc_waiting) return (0); cuda_intr(dev); return (0); } static void cuda_intr(void *arg) { device_t dev; struct cuda_softc *sc; int ending, process_inbound; uint8_t reg; dev = (device_t)arg; sc = device_get_softc(dev); mtx_lock(&sc->sc_mutex); process_inbound = 0; reg = cuda_read_reg(sc, vIFR); if ((reg & vSR_INT) != vSR_INT) { mtx_unlock(&sc->sc_mutex); return; } cuda_write_reg(sc, vIFR, 0x7f); /* Clear interrupt */ switch_start: switch (sc->sc_state) { case CUDA_IDLE: /* * This is an unexpected packet, so grab the first (dummy) * byte, set up the proper vars, and tell the chip we are * starting to receive the packet by setting the TIP bit. */ sc->sc_in[1] = cuda_read_reg(sc, vSR); if (cuda_intr_state(sc) == 0) { /* must have been a fake start */ if (sc->sc_waiting) { /* start over */ DELAY(150); sc->sc_state = CUDA_OUT; sc->sc_sent = 0; cuda_out(sc); cuda_write_reg(sc, vSR, sc->sc_out[1]); cuda_ack_off(sc); cuda_tip(sc); } break; } cuda_in(sc); cuda_tip(sc); sc->sc_received = 1; sc->sc_state = CUDA_IN; break; case CUDA_IN: sc->sc_in[sc->sc_received] = cuda_read_reg(sc, vSR); ending = 0; if (sc->sc_received > 255) { /* bitch only once */ if (sc->sc_received == 256) { device_printf(dev,"input overflow\n"); ending = 1; } } else sc->sc_received++; /* intr off means this is the last byte (end of frame) */ if (cuda_intr_state(sc) == 0) { ending = 1; } else { cuda_toggle_ack(sc); } if (ending == 1) { /* end of message? */ struct cuda_packet *pkt; /* reset vars and signal the end of this frame */ cuda_idle(sc); /* Queue up the packet */ pkt = STAILQ_FIRST(&sc->sc_freeq); if (pkt != NULL) { /* If we have a free packet, process it */ pkt->len = sc->sc_received - 2; pkt->type = sc->sc_in[1]; memcpy(pkt->data, &sc->sc_in[2], pkt->len); STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q); STAILQ_INSERT_TAIL(&sc->sc_inq, pkt, pkt_q); process_inbound = 1; } sc->sc_state = CUDA_IDLE; sc->sc_received = 0; /* * If there is something waiting to be sent out, * set everything up and send the first byte. */ if (sc->sc_waiting == 1) { DELAY(1500); /* required */ sc->sc_sent = 0; sc->sc_state = CUDA_OUT; /* * If the interrupt is on, we were too slow * and the chip has already started to send * something to us, so back out of the write * and start a read cycle. */ if (cuda_intr_state(sc)) { cuda_in(sc); cuda_idle(sc); sc->sc_sent = 0; sc->sc_state = CUDA_IDLE; sc->sc_received = 0; DELAY(150); goto switch_start; } /* * If we got here, it's ok to start sending * so load the first byte and tell the chip * we want to send. */ cuda_out(sc); cuda_write_reg(sc, vSR, sc->sc_out[sc->sc_sent]); cuda_ack_off(sc); cuda_tip(sc); } } break; case CUDA_OUT: cuda_read_reg(sc, vSR); /* reset SR-intr in IFR */ sc->sc_sent++; if (cuda_intr_state(sc)) { /* ADB intr low during write */ cuda_in(sc); /* make sure SR is set to IN */ cuda_idle(sc); sc->sc_sent = 0; /* must start all over */ sc->sc_state = CUDA_IDLE; /* new state */ sc->sc_received = 0; sc->sc_waiting = 1; /* must retry when done with * read */ DELAY(150); goto switch_start; /* process next state right * now */ break; } if (sc->sc_out_length == sc->sc_sent) { /* check for done */ sc->sc_waiting = 0; /* done writing */ sc->sc_state = CUDA_IDLE; /* signal bus is idle */ cuda_in(sc); cuda_idle(sc); } else { /* send next byte */ cuda_write_reg(sc, vSR, sc->sc_out[sc->sc_sent]); cuda_toggle_ack(sc); /* signal byte ready to * shift */ } break; case CUDA_NOTREADY: break; default: break; } mtx_unlock(&sc->sc_mutex); if (process_inbound) cuda_send_inbound(sc); mtx_lock(&sc->sc_mutex); /* If we have another packet waiting, set it up */ if (!sc->sc_waiting && sc->sc_state == CUDA_IDLE) cuda_send_outbound(sc); mtx_unlock(&sc->sc_mutex); } static u_int cuda_adb_send(device_t dev, u_char command_byte, int len, u_char *data, u_char poll) { struct cuda_softc *sc = device_get_softc(dev); uint8_t packet[16]; int i; /* construct an ADB command packet and send it */ packet[0] = CUDA_ADB; packet[1] = command_byte; for (i = 0; i < len; i++) packet[i + 2] = data[i]; cuda_send(sc, poll, len + 2, packet); return (0); } static u_int cuda_adb_autopoll(device_t dev, uint16_t mask) { struct cuda_softc *sc = device_get_softc(dev); uint8_t cmd[] = {CUDA_PSEUDO, CMD_AUTOPOLL, mask != 0}; mtx_lock(&sc->sc_mutex); if (cmd[2] == sc->sc_autopoll) { mtx_unlock(&sc->sc_mutex); return (0); } sc->sc_autopoll = -1; cuda_send(sc, 1, 3, cmd); mtx_unlock(&sc->sc_mutex); return (0); } static void cuda_shutdown(void *xsc, int howto) { struct cuda_softc *sc = xsc; uint8_t cmd[] = {CUDA_PSEUDO, 0}; if ((howto & RB_POWEROFF) != 0) cmd[1] = CMD_POWEROFF; else if ((howto & RB_HALT) == 0) cmd[1] = CMD_RESET; else return; cuda_poll(sc->sc_dev); cuda_send(sc, 1, 2, cmd); while (1) cuda_poll(sc->sc_dev); } #define DIFF19041970 2082844800 static int cuda_gettime(device_t dev, struct timespec *ts) { struct cuda_softc *sc = device_get_softc(dev); uint8_t cmd[] = {CUDA_PSEUDO, CMD_READ_RTC}; mtx_lock(&sc->sc_mutex); sc->sc_rtc = -1; cuda_send(sc, 1, 2, cmd); if (sc->sc_rtc == -1) mtx_sleep(&sc->sc_rtc, &sc->sc_mutex, 0, "rtc", 100); ts->tv_sec = sc->sc_rtc - DIFF19041970; ts->tv_nsec = 0; mtx_unlock(&sc->sc_mutex); return (0); } static int cuda_settime(device_t dev, struct timespec *ts) { struct cuda_softc *sc = device_get_softc(dev); uint8_t cmd[] = {CUDA_PSEUDO, CMD_WRITE_RTC, 0, 0, 0, 0}; uint32_t sec; sec = ts->tv_sec + DIFF19041970; memcpy(&cmd[2], &sec, sizeof(sec)); mtx_lock(&sc->sc_mutex); cuda_send(sc, 0, 6, cmd); mtx_unlock(&sc->sc_mutex); return (0); } diff --git a/sys/powerpc/powermac/pmu.c b/sys/powerpc/powermac/pmu.c index 2d38545a7046..e0358c6f9887 100644 --- a/sys/powerpc/powermac/pmu.c +++ b/sys/powerpc/powermac/pmu.c @@ -1,1140 +1,1145 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2006 Michael Lorenz * Copyright 2008 by Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "clock_if.h" #include "pmuvar.h" #include "viareg.h" #include "uninorthvar.h" /* For unin_chip_sleep()/unin_chip_wake() */ #define PMU_DEFAULTS PMU_INT_TICK | PMU_INT_ADB | \ PMU_INT_PCEJECT | PMU_INT_SNDBRT | \ PMU_INT_BATTERY | PMU_INT_ENVIRONMENT /* * Bus interface */ static int pmu_probe(device_t); static int pmu_attach(device_t); static int pmu_detach(device_t); /* * Clock interface */ static int pmu_gettime(device_t dev, struct timespec *ts); static int pmu_settime(device_t dev, struct timespec *ts); /* * ADB Interface */ static u_int pmu_adb_send(device_t dev, u_char command_byte, int len, u_char *data, u_char poll); static u_int pmu_adb_autopoll(device_t dev, uint16_t mask); static u_int pmu_poll(device_t dev); /* * Power interface */ static void pmu_shutdown(void *xsc, int howto); static void pmu_set_sleepled(void *xsc, int onoff); static int pmu_server_mode(SYSCTL_HANDLER_ARGS); static int pmu_acline_state(SYSCTL_HANDLER_ARGS); static int pmu_query_battery(struct pmu_softc *sc, int batt, struct pmu_battstate *info); static int pmu_battquery_sysctl(SYSCTL_HANDLER_ARGS); static int pmu_battmon(SYSCTL_HANDLER_ARGS); static void pmu_battquery_proc(void); static void pmu_battery_notify(struct pmu_battstate *batt, struct pmu_battstate *old); /* * List of battery-related sysctls we might ask for */ enum { PMU_BATSYSCTL_PRESENT = 1 << 8, PMU_BATSYSCTL_CHARGING = 2 << 8, PMU_BATSYSCTL_CHARGE = 3 << 8, PMU_BATSYSCTL_MAXCHARGE = 4 << 8, PMU_BATSYSCTL_CURRENT = 5 << 8, PMU_BATSYSCTL_VOLTAGE = 6 << 8, PMU_BATSYSCTL_TIME = 7 << 8, PMU_BATSYSCTL_LIFE = 8 << 8 }; static device_method_t pmu_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pmu_probe), DEVMETHOD(device_attach, pmu_attach), DEVMETHOD(device_detach, pmu_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* ADB bus interface */ DEVMETHOD(adb_hb_send_raw_packet, pmu_adb_send), DEVMETHOD(adb_hb_controller_poll, pmu_poll), DEVMETHOD(adb_hb_set_autopoll_mask, pmu_adb_autopoll), /* Clock interface */ DEVMETHOD(clock_gettime, pmu_gettime), DEVMETHOD(clock_settime, pmu_settime), DEVMETHOD_END }; static driver_t pmu_driver = { "pmu", pmu_methods, sizeof(struct pmu_softc), }; EARLY_DRIVER_MODULE(pmu, macio, pmu_driver, 0, 0, BUS_PASS_RESOURCE); DRIVER_MODULE(adb, pmu, adb_driver, 0, 0); static int pmuextint_probe(device_t); static int pmuextint_attach(device_t); static device_method_t pmuextint_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pmuextint_probe), DEVMETHOD(device_attach, pmuextint_attach), {0,0} }; static driver_t pmuextint_driver = { "pmuextint", pmuextint_methods, 0 }; EARLY_DRIVER_MODULE(pmuextint, macgpio, pmuextint_driver, 0, 0, BUS_PASS_RESOURCE); /* Make sure uhid is loaded, as it turns off some of the ADB emulation */ MODULE_DEPEND(pmu, usb, 1, 1, 1); static void pmu_intr(void *arg); static void pmu_in(struct pmu_softc *sc); static void pmu_out(struct pmu_softc *sc); static void pmu_ack_on(struct pmu_softc *sc); static void pmu_ack_off(struct pmu_softc *sc); static int pmu_send(void *cookie, int cmd, int length, uint8_t *in_msg, int rlen, uint8_t *out_msg); static uint8_t pmu_read_reg(struct pmu_softc *sc, u_int offset); static void pmu_write_reg(struct pmu_softc *sc, u_int offset, uint8_t value); static int pmu_intr_state(struct pmu_softc *); /* these values shows that number of data returned after 'send' cmd is sent */ static signed char pm_send_cmd_type[] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0x01, 0x01, -1, -1, -1, -1, -1, -1, 0x00, 0x00, -1, -1, -1, -1, -1, 0x00, -1, 0x00, 0x02, 0x01, 0x01, -1, -1, -1, 0x00, -1, -1, -1, -1, -1, -1, -1, 0x04, 0x14, -1, 0x03, -1, -1, -1, -1, 0x00, 0x00, 0x02, 0x02, -1, -1, -1, -1, 0x01, 0x01, -1, -1, -1, -1, -1, -1, 0x00, 0x00, -1, -1, 0x01, -1, -1, -1, 0x01, 0x00, 0x02, 0x02, -1, 0x01, 0x03, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, -1, -1, -1, 0x02, -1, -1, -1, -1, -1, -1, -1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -1, -1, 0x01, 0x01, 0x01, -1, -1, -1, -1, -1, 0x00, 0x00, -1, -1, -1, 0x05, 0x04, 0x04, 0x04, -1, 0x00, -1, -1, -1, -1, -1, 0x00, -1, -1, -1, -1, -1, -1, -1, 0x01, 0x02, -1, -1, -1, -1, -1, -1, 0x00, 0x00, -1, -1, -1, -1, -1, -1, 0x02, 0x02, 0x02, 0x04, -1, 0x00, -1, -1, 0x01, 0x01, 0x03, 0x02, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0x00, -1, -1, -1, -1, -1, -1, -1, 0x01, 0x01, -1, -1, 0x00, 0x00, -1, -1, -1, 0x04, 0x00, -1, -1, -1, -1, -1, 0x03, -1, 0x00, -1, 0x00, -1, -1, 0x00, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; /* these values shows that number of data returned after 'receive' cmd is sent */ static signed char pm_receive_cmd_type[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -1, -1, -1, -1, -1, -1, -1, -1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, -1, -1, -1, -1, -1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -1, -1, -1, -1, -1, -1, -1, -1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x15, -1, 0x02, -1, -1, -1, -1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, -1, -1, -1, -1, -1, -1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x03, 0x03, -1, -1, -1, -1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x03, 0x09, -1, -1, -1, -1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -1, -1, -1, -1, -1, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, -1, -1, -1, -1, -1, -1, -1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, -1, -1, -1, -1, -1, -1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, -1, -1, -1, -1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -1, -1, -1, -1, -1, -1, -1, -1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -1, -1, -1, -1, -1, -1, -1, -1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, -1, -1, 0x02, -1, -1, -1, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, -1, -1, 0x02, -1, -1, -1, -1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -1, -1, -1, -1, -1, -1, -1, -1, }; static int pmu_battmon_enabled = 1; static struct proc *pmubattproc; static struct kproc_desc pmu_batt_kp = { "pmu_batt", pmu_battquery_proc, &pmubattproc }; /* We only have one of each device, so globals are safe */ static device_t pmu = NULL; static device_t pmu_extint = NULL; static int pmuextint_probe(device_t dev) { const char *type = ofw_bus_get_type(dev); if (strcmp(type, "extint-gpio1") != 0) return (ENXIO); device_set_desc(dev, "Apple PMU99 External Interrupt"); return (0); } static int pmu_probe(device_t dev) { const char *type = ofw_bus_get_type(dev); if (strcmp(type, "via-pmu") != 0) return (ENXIO); device_set_desc(dev, "Apple PMU99 Controller"); return (0); } static int setup_pmu_intr(device_t dev, device_t extint) { struct pmu_softc *sc; sc = device_get_softc(dev); sc->sc_irqrid = 0; sc->sc_irq = bus_alloc_resource_any(extint, SYS_RES_IRQ, &sc->sc_irqrid, RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "could not allocate interrupt\n"); return (ENXIO); } if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC | INTR_MPSAFE | INTR_ENTROPY, NULL, pmu_intr, dev, &sc->sc_ih) != 0) { device_printf(dev, "could not setup interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid, sc->sc_irq); return (ENXIO); } return (0); } static int pmuextint_attach(device_t dev) { pmu_extint = dev; if (pmu) return (setup_pmu_intr(pmu,dev)); return (0); } static int pmu_attach(device_t dev) { struct pmu_softc *sc; int i; uint8_t reg; uint8_t cmd[2] = {2, 0}; uint8_t resp[16]; phandle_t node,child; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_memrid = 0; sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_memrid, RF_ACTIVE); mtx_init(&sc->sc_mutex,"pmu",NULL,MTX_DEF | MTX_RECURSE); if (sc->sc_memr == NULL) { device_printf(dev, "Could not alloc mem resource!\n"); return (ENXIO); } /* * Our interrupt is attached to a GPIO pin. Depending on probe order, * we may not have found it yet. If we haven't, it will find us, and * attach our interrupt then. */ pmu = dev; if (pmu_extint != NULL) { if (setup_pmu_intr(dev,pmu_extint) != 0) return (ENXIO); } sc->sc_autopoll = 0; sc->sc_batteries = 0; sc->adb_bus = NULL; sc->sc_leddev = NULL; /* Init PMU */ pmu_write_reg(sc, vBufB, pmu_read_reg(sc, vBufB) | vPB4); pmu_write_reg(sc, vDirB, (pmu_read_reg(sc, vDirB) | vPB4) & ~vPB3); reg = PMU_DEFAULTS; pmu_send(sc, PMU_SET_IMASK, 1, ®, 16, resp); pmu_write_reg(sc, vIER, 0x94); /* make sure VIA interrupts are on */ pmu_send(sc, PMU_SYSTEM_READY, 1, cmd, 16, resp); pmu_send(sc, PMU_GET_VERSION, 0, cmd, 16, resp); /* Initialize child buses (ADB) */ node = ofw_bus_get_node(dev); for (child = OF_child(node); child != 0; child = OF_peer(child)) { char name[32]; memset(name, 0, sizeof(name)); OF_getprop(child, "name", name, sizeof(name)); if (bootverbose) device_printf(dev, "PMU child <%s>\n",name); if (strncmp(name, "adb", 4) == 0) { sc->adb_bus = device_add_child(dev,"adb",DEVICE_UNIT_ANY); } if (strncmp(name, "power-mgt", 9) == 0) { uint32_t prim_info[9]; if (OF_getprop(child, "prim-info", prim_info, sizeof(prim_info)) >= 7) sc->sc_batteries = (prim_info[6] >> 16) & 0xff; if (bootverbose && sc->sc_batteries > 0) device_printf(dev, "%d batteries detected\n", sc->sc_batteries); } } /* * Set up sysctls */ ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "server_mode", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, pmu_server_mode, "I", "Enable reboot after power failure"); if (sc->sc_batteries > 0) { struct sysctl_oid *oid, *battroot; char battnum[2]; /* Only start the battery monitor if we have a battery. */ kproc_start(&pmu_batt_kp); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "monitor_batteries", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, pmu_battmon, "I", "Post battery events to devd"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "acline", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, pmu_acline_state, "I", "AC Line Status"); battroot = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "batteries", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Battery Information"); for (i = 0; i < sc->sc_batteries; i++) { battnum[0] = i + '0'; battnum[1] = '\0'; oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(battroot), OID_AUTO, battnum, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Battery Information"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "present", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, PMU_BATSYSCTL_PRESENT | i, pmu_battquery_sysctl, "I", "Battery present"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "charging", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, PMU_BATSYSCTL_CHARGING | i, pmu_battquery_sysctl, "I", "Battery charging"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "charge", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, PMU_BATSYSCTL_CHARGE | i, pmu_battquery_sysctl, "I", "Battery charge (mAh)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "maxcharge", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, PMU_BATSYSCTL_MAXCHARGE | i, pmu_battquery_sysctl, "I", "Maximum battery capacity (mAh)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "rate", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, PMU_BATSYSCTL_CURRENT | i, pmu_battquery_sysctl, "I", "Battery discharge rate (mA)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "voltage", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, PMU_BATSYSCTL_VOLTAGE | i, pmu_battquery_sysctl, "I", "Battery voltage (mV)"); /* Knobs for mental compatibility with ACPI */ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "time", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, PMU_BATSYSCTL_TIME | i, pmu_battquery_sysctl, "I", "Time Remaining (minutes)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "life", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, PMU_BATSYSCTL_LIFE | i, pmu_battquery_sysctl, "I", "Capacity remaining (percent)"); } } /* * Set up LED interface */ sc->sc_leddev = led_create(pmu_set_sleepled, sc, "sleepled"); /* * Register RTC */ clock_register(dev, 1000); /* * Register power control handler */ EVENTHANDLER_REGISTER(shutdown_final, pmu_shutdown, sc, SHUTDOWN_PRI_LAST); return (bus_generic_attach(dev)); } static int pmu_detach(device_t dev) { struct pmu_softc *sc; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); sc = device_get_softc(dev); if (sc->sc_leddev != NULL) led_destroy(sc->sc_leddev); bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid, sc->sc_irq); bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, sc->sc_memr); mtx_destroy(&sc->sc_mutex); - return (bus_generic_detach(dev)); + return (0); } static uint8_t pmu_read_reg(struct pmu_softc *sc, u_int offset) { return (bus_read_1(sc->sc_memr, offset)); } static void pmu_write_reg(struct pmu_softc *sc, u_int offset, uint8_t value) { bus_write_1(sc->sc_memr, offset, value); } static int pmu_send_byte(struct pmu_softc *sc, uint8_t data) { pmu_out(sc); pmu_write_reg(sc, vSR, data); pmu_ack_off(sc); /* wait for intr to come up */ /* XXX should add a timeout and bail if it expires */ do {} while (pmu_intr_state(sc) == 0); pmu_ack_on(sc); do {} while (pmu_intr_state(sc)); pmu_ack_on(sc); return 0; } static inline int pmu_read_byte(struct pmu_softc *sc, uint8_t *data) { pmu_in(sc); (void)pmu_read_reg(sc, vSR); pmu_ack_off(sc); /* wait for intr to come up */ do {} while (pmu_intr_state(sc) == 0); pmu_ack_on(sc); do {} while (pmu_intr_state(sc)); *data = pmu_read_reg(sc, vSR); return 0; } static int pmu_intr_state(struct pmu_softc *sc) { return ((pmu_read_reg(sc, vBufB) & vPB3) == 0); } static int pmu_send(void *cookie, int cmd, int length, uint8_t *in_msg, int rlen, uint8_t *out_msg) { struct pmu_softc *sc = cookie; int i, rcv_len = -1; uint8_t out_len, intreg; intreg = pmu_read_reg(sc, vIER); intreg &= 0x10; pmu_write_reg(sc, vIER, intreg); /* wait idle */ do {} while (pmu_intr_state(sc)); /* send command */ pmu_send_byte(sc, cmd); /* send length if necessary */ if (pm_send_cmd_type[cmd] < 0) { pmu_send_byte(sc, length); } for (i = 0; i < length; i++) { pmu_send_byte(sc, in_msg[i]); } /* see if there's data to read */ rcv_len = pm_receive_cmd_type[cmd]; if (rcv_len == 0) goto done; /* read command */ if (rcv_len == 1) { pmu_read_byte(sc, out_msg); goto done; } else out_msg[0] = cmd; if (rcv_len < 0) { pmu_read_byte(sc, &out_len); rcv_len = out_len + 1; } for (i = 1; i < min(rcv_len, rlen); i++) pmu_read_byte(sc, &out_msg[i]); done: pmu_write_reg(sc, vIER, (intreg == 0) ? 0 : 0x90); return rcv_len; } static u_int pmu_poll(device_t dev) { pmu_intr(dev); return (0); } static void pmu_in(struct pmu_softc *sc) { uint8_t reg; reg = pmu_read_reg(sc, vACR); reg &= ~vSR_OUT; reg |= 0x0c; pmu_write_reg(sc, vACR, reg); } static void pmu_out(struct pmu_softc *sc) { uint8_t reg; reg = pmu_read_reg(sc, vACR); reg |= vSR_OUT; reg |= 0x0c; pmu_write_reg(sc, vACR, reg); } static void pmu_ack_off(struct pmu_softc *sc) { uint8_t reg; reg = pmu_read_reg(sc, vBufB); reg &= ~vPB4; pmu_write_reg(sc, vBufB, reg); } static void pmu_ack_on(struct pmu_softc *sc) { uint8_t reg; reg = pmu_read_reg(sc, vBufB); reg |= vPB4; pmu_write_reg(sc, vBufB, reg); } static void pmu_intr(void *arg) { device_t dev; struct pmu_softc *sc; unsigned int len; uint8_t resp[16]; uint8_t junk[16]; dev = (device_t)arg; sc = device_get_softc(dev); mtx_lock(&sc->sc_mutex); pmu_write_reg(sc, vIFR, 0x90); /* Clear 'em */ len = pmu_send(sc, PMU_INT_ACK, 0, NULL, 16, resp); mtx_unlock(&sc->sc_mutex); if ((len < 1) || (resp[1] == 0)) { return; } if (resp[1] & PMU_INT_ADB) { /* * the PMU will turn off autopolling after each command that * it did not issue, so we assume any but TALK R0 is ours and * re-enable autopoll here whenever we receive an ACK for a * non TR0 command. */ mtx_lock(&sc->sc_mutex); if ((resp[2] & 0x0f) != (ADB_COMMAND_TALK << 2)) { if (sc->sc_autopoll) { uint8_t cmd[] = {0, PMU_SET_POLL_MASK, (sc->sc_autopoll >> 8) & 0xff, sc->sc_autopoll & 0xff}; pmu_send(sc, PMU_ADB_CMD, 4, cmd, 16, junk); } } mtx_unlock(&sc->sc_mutex); adb_receive_raw_packet(sc->adb_bus,resp[1],resp[2], len - 3,&resp[3]); } if (resp[1] & PMU_INT_ENVIRONMENT) { /* if the lid was just closed, notify devd. */ if ((resp[2] & PMU_ENV_LID_CLOSED) && (!sc->lid_closed)) { sc->lid_closed = 1; devctl_notify("PMU", "lid", "close", NULL); } else if (!(resp[2] & PMU_ENV_LID_CLOSED) && (sc->lid_closed)) { /* if the lid was just opened, notify devd. */ sc->lid_closed = 0; devctl_notify("PMU", "lid", "open", NULL); } if (resp[2] & PMU_ENV_POWER) devctl_notify("PMU", "Button", "pressed", NULL); } } static u_int pmu_adb_send(device_t dev, u_char command_byte, int len, u_char *data, u_char poll) { struct pmu_softc *sc = device_get_softc(dev); int i; uint8_t packet[16], resp[16]; /* construct an ADB command packet and send it */ packet[0] = command_byte; packet[1] = 0; packet[2] = len; for (i = 0; i < len; i++) packet[i + 3] = data[i]; mtx_lock(&sc->sc_mutex); pmu_send(sc, PMU_ADB_CMD, len + 3, packet, 16, resp); mtx_unlock(&sc->sc_mutex); if (poll) pmu_poll(dev); return 0; } static u_int pmu_adb_autopoll(device_t dev, uint16_t mask) { struct pmu_softc *sc = device_get_softc(dev); /* magical incantation to re-enable autopolling */ uint8_t cmd[] = {0, PMU_SET_POLL_MASK, (mask >> 8) & 0xff, mask & 0xff}; uint8_t resp[16]; mtx_lock(&sc->sc_mutex); if (sc->sc_autopoll == mask) { mtx_unlock(&sc->sc_mutex); return 0; } sc->sc_autopoll = mask & 0xffff; if (mask) pmu_send(sc, PMU_ADB_CMD, 4, cmd, 16, resp); else pmu_send(sc, PMU_ADB_POLL_OFF, 0, NULL, 16, resp); mtx_unlock(&sc->sc_mutex); return 0; } static void pmu_shutdown(void *xsc, int howto) { struct pmu_softc *sc = xsc; uint8_t cmd[] = {'M', 'A', 'T', 'T'}; if ((howto & RB_POWEROFF) != 0) pmu_send(sc, PMU_POWER_OFF, 4, cmd, 0, NULL); else if ((howto & RB_HALT) == 0) pmu_send(sc, PMU_RESET_CPU, 0, NULL, 0, NULL); else return; for (;;); } static void pmu_set_sleepled(void *xsc, int onoff) { struct pmu_softc *sc = xsc; uint8_t cmd[] = {4, 0, 0}; cmd[2] = onoff; mtx_lock(&sc->sc_mutex); pmu_send(sc, PMU_SET_SLEEPLED, 3, cmd, 0, NULL); mtx_unlock(&sc->sc_mutex); } static int pmu_server_mode(SYSCTL_HANDLER_ARGS) { struct pmu_softc *sc = arg1; u_int server_mode = 0; uint8_t getcmd[] = {PMU_PWR_GET_POWERUP_EVENTS}; uint8_t setcmd[] = {0, 0, PMU_PWR_WAKEUP_AC_INSERT}; uint8_t resp[3]; int error, len; mtx_lock(&sc->sc_mutex); len = pmu_send(sc, PMU_POWER_EVENTS, 1, getcmd, 3, resp); mtx_unlock(&sc->sc_mutex); if (len == 3) server_mode = (resp[2] & PMU_PWR_WAKEUP_AC_INSERT) ? 1 : 0; error = sysctl_handle_int(oidp, &server_mode, 0, req); if (len != 3) return (EINVAL); if (error || !req->newptr) return (error); if (server_mode == 1) setcmd[0] = PMU_PWR_SET_POWERUP_EVENTS; else if (server_mode == 0) setcmd[0] = PMU_PWR_CLR_POWERUP_EVENTS; else return (EINVAL); setcmd[1] = resp[1]; mtx_lock(&sc->sc_mutex); pmu_send(sc, PMU_POWER_EVENTS, 3, setcmd, 2, resp); mtx_unlock(&sc->sc_mutex); return (0); } static int pmu_query_battery(struct pmu_softc *sc, int batt, struct pmu_battstate *info) { uint8_t reg; uint8_t resp[16]; int len; reg = batt + 1; mtx_lock(&sc->sc_mutex); len = pmu_send(sc, PMU_SMART_BATTERY_STATE, 1, ®, 16, resp); mtx_unlock(&sc->sc_mutex); if (len < 3) return (-1); /* All PMU battery info replies share a common header: * Byte 1 Payload Format * Byte 2 Battery Flags */ info->state = resp[2]; switch (resp[1]) { case 3: case 4: /* * Formats 3 and 4 appear to be the same: * Byte 3 Charge * Byte 4 Max Charge * Byte 5 Current * Byte 6 Voltage */ info->charge = resp[3]; info->maxcharge = resp[4]; /* Current can be positive or negative */ info->current = (int8_t)resp[5]; info->voltage = resp[6]; break; case 5: /* * Formats 5 is a wider version of formats 3 and 4 * Byte 3-4 Charge * Byte 5-6 Max Charge * Byte 7-8 Current * Byte 9-10 Voltage */ info->charge = (resp[3] << 8) | resp[4]; info->maxcharge = (resp[5] << 8) | resp[6]; /* Current can be positive or negative */ info->current = (int16_t)((resp[7] << 8) | resp[8]); info->voltage = (resp[9] << 8) | resp[10]; break; default: device_printf(sc->sc_dev, "Unknown battery info format (%d)!\n", resp[1]); return (-1); } return (0); } static void pmu_battery_notify(struct pmu_battstate *batt, struct pmu_battstate *old) { char notify_buf[16]; int new_acline, old_acline; new_acline = (batt->state & PMU_PWR_AC_PRESENT) ? 1 : 0; old_acline = (old->state & PMU_PWR_AC_PRESENT) ? 1 : 0; if (new_acline != old_acline) { snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", new_acline); devctl_notify("PMU", "POWER", "ACLINE", notify_buf); } } static void pmu_battquery_proc(void) { struct pmu_softc *sc; struct pmu_battstate batt; struct pmu_battstate cur_batt; int error; sc = device_get_softc(pmu); bzero(&cur_batt, sizeof(cur_batt)); while (1) { kproc_suspend_check(curproc); error = pmu_query_battery(sc, 0, &batt); if (error == 0) { pmu_battery_notify(&batt, &cur_batt); cur_batt = batt; } pause("pmu_batt", hz); } } static int pmu_battmon(SYSCTL_HANDLER_ARGS) { int error, result; result = pmu_battmon_enabled; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (!result && pmu_battmon_enabled) error = kproc_suspend(pmubattproc, hz); else if (result && pmu_battmon_enabled == 0) error = kproc_resume(pmubattproc); pmu_battmon_enabled = (result != 0); return (error); } static int pmu_acline_state(SYSCTL_HANDLER_ARGS) { struct pmu_softc *sc; struct pmu_battstate batt; int error, result; sc = arg1; /* The PMU treats the AC line status as a property of the battery */ error = pmu_query_battery(sc, 0, &batt); if (error != 0) return (error); result = (batt.state & PMU_PWR_AC_PRESENT) ? 1 : 0; error = sysctl_handle_int(oidp, &result, 0, req); return (error); } static int pmu_battquery_sysctl(SYSCTL_HANDLER_ARGS) { struct pmu_softc *sc; struct pmu_battstate batt; int error, result; sc = arg1; error = pmu_query_battery(sc, arg2 & 0x00ff, &batt); if (error != 0) return (error); switch (arg2 & 0xff00) { case PMU_BATSYSCTL_PRESENT: result = (batt.state & PMU_PWR_BATT_PRESENT) ? 1 : 0; break; case PMU_BATSYSCTL_CHARGING: result = (batt.state & PMU_PWR_BATT_CHARGING) ? 1 : 0; break; case PMU_BATSYSCTL_CHARGE: result = batt.charge; break; case PMU_BATSYSCTL_MAXCHARGE: result = batt.maxcharge; break; case PMU_BATSYSCTL_CURRENT: result = batt.current; break; case PMU_BATSYSCTL_VOLTAGE: result = batt.voltage; break; case PMU_BATSYSCTL_TIME: /* Time remaining until full charge/discharge, in minutes */ if (batt.current >= 0) result = (batt.maxcharge - batt.charge) /* mAh */ * 60 / batt.current /* mA */; else result = (batt.charge /* mAh */ * 60) / (-batt.current /* mA */); break; case PMU_BATSYSCTL_LIFE: /* Battery charge fraction, in percent */ result = (batt.charge * 100) / batt.maxcharge; break; default: /* This should never happen */ result = -1; } error = sysctl_handle_int(oidp, &result, 0, req); return (error); } #define DIFF19041970 2082844800 static int pmu_gettime(device_t dev, struct timespec *ts) { struct pmu_softc *sc = device_get_softc(dev); uint8_t resp[16]; uint32_t sec; mtx_lock(&sc->sc_mutex); pmu_send(sc, PMU_READ_RTC, 0, NULL, 16, resp); mtx_unlock(&sc->sc_mutex); memcpy(&sec, &resp[1], 4); ts->tv_sec = sec - DIFF19041970; ts->tv_nsec = 0; return (0); } static int pmu_settime(device_t dev, struct timespec *ts) { struct pmu_softc *sc = device_get_softc(dev); uint32_t sec; sec = ts->tv_sec + DIFF19041970; mtx_lock(&sc->sc_mutex); pmu_send(sc, PMU_SET_RTC, sizeof(sec), (uint8_t *)&sec, 0, NULL); mtx_unlock(&sc->sc_mutex); return (0); } int pmu_set_speed(int low_speed) { struct pmu_softc *sc; uint8_t sleepcmd[] = {'W', 'O', 'O', 'F', 0}; uint8_t resp[16]; sc = device_get_softc(pmu); pmu_write_reg(sc, vIER, 0x10); spinlock_enter(); mtdec(0x7fffffff); mb(); mtdec(0x7fffffff); sleepcmd[4] = low_speed; pmu_send(sc, PMU_CPU_SPEED, 5, sleepcmd, 16, resp); unin_chip_sleep(NULL, 1); platform_sleep(); unin_chip_wake(NULL); mtdec(1); /* Force a decrementer exception */ spinlock_exit(); pmu_write_reg(sc, vIER, 0x90); return (0); }